Lines Matching defs:sched_domain

211 				  unsigned int sched_domain, unsigned int type)
213 unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
214 atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type];
226 unsigned int sched_domain, unsigned int type,
229 unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
242 if (!kqd->latency_timeout[sched_domain])
243 kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL);
245 time_is_after_jiffies(kqd->latency_timeout[sched_domain])) {
248 kqd->latency_timeout[sched_domain] = 0;
256 memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
258 trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain],
266 unsigned int sched_domain, unsigned int depth)
268 depth = clamp(depth, 1U, kyber_depth[sched_domain]);
269 if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
270 sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
271 trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain],
279 unsigned int sched_domain;
288 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
289 flush_latency_buckets(kqd, cpu_latency, sched_domain,
291 flush_latency_buckets(kqd, cpu_latency, sched_domain,
301 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
304 p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY,
315 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
319 p99 = calculate_percentile(kqd, sched_domain,
331 p99 = kqd->domain_p99[sched_domain];
332 kqd->domain_p99[sched_domain] = -1;
334 kqd->domain_p99[sched_domain] = p99;
349 orig_depth = kqd->domain_tokens[sched_domain].sb.depth;
351 kyber_resize_domain(kqd, sched_domain, depth);
537 unsigned int sched_domain;
542 sched_domain = kyber_sched_domain(rq->cmd_flags);
543 sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr,
568 unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
569 struct list_head *rq_list = &kcq->rq_list[sched_domain];
591 unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
593 struct list_head *head = &kcq->rq_list[sched_domain];
600 sbitmap_set_bit(&khd->kcq_map[sched_domain],
615 unsigned int sched_domain, unsigned int type,
629 atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]);
636 unsigned int sched_domain;
639 sched_domain = kyber_sched_domain(rq->cmd_flags);
640 if (sched_domain == KYBER_OTHER)
644 target = kqd->latency_targets[sched_domain];
645 add_latency_sample(cpu_latency, sched_domain, KYBER_TOTAL_LATENCY,
647 add_latency_sample(cpu_latency, sched_domain, KYBER_IO_LATENCY, target,
656 unsigned int sched_domain;
666 list_splice_tail_init(&kcq->rq_list[flush_data->sched_domain],
675 unsigned int sched_domain,
680 .sched_domain = sched_domain,
684 sbitmap_for_each_set(&khd->kcq_map[sched_domain],
703 unsigned int sched_domain = khd->cur_domain;
704 struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
705 struct sbq_wait *wait = &khd->domain_wait[sched_domain];
718 &khd->wait_index[sched_domain]);
719 khd->domain_ws[sched_domain] = ws;
737 ws = khd->domain_ws[sched_domain];