Lines Matching refs:khd
138 * There is a same mapping between ctx & hctx and kcq & khd,
139 * we use request->mq_ctx->index_hw to index the kcq in khd.
461 struct kyber_hctx_data *khd;
464 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node);
465 if (!khd)
468 khd->kcqs = kmalloc_array_node(hctx->nr_ctx,
471 if (!khd->kcqs)
475 kyber_ctx_queue_init(&khd->kcqs[i]);
478 if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx,
481 sbitmap_free(&khd->kcq_map[i]);
486 spin_lock_init(&khd->lock);
489 INIT_LIST_HEAD(&khd->rqs[i]);
490 khd->domain_wait[i].sbq = NULL;
491 init_waitqueue_func_entry(&khd->domain_wait[i].wait,
493 khd->domain_wait[i].wait.private = hctx;
494 INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry);
495 atomic_set(&khd->wait_index[i], 0);
498 khd->cur_domain = 0;
499 khd->batching = 0;
501 hctx->sched_data = khd;
507 kfree(khd->kcqs);
509 kfree(khd);
515 struct kyber_hctx_data *khd = hctx->sched_data;
519 sbitmap_free(&khd->kcq_map[i]);
520 kfree(khd->kcqs);
566 struct kyber_hctx_data *khd = hctx->sched_data;
567 struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
587 struct kyber_hctx_data *khd = hctx->sched_data;
592 struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]];
600 sbitmap_set_bit(&khd->kcq_map[sched_domain],
655 struct kyber_hctx_data *khd;
663 struct kyber_ctx_queue *kcq = &flush_data->khd->kcqs[bitnr];
674 static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd,
679 .khd = khd,
684 sbitmap_for_each_set(&khd->kcq_map[sched_domain],
700 struct kyber_hctx_data *khd,
703 unsigned int sched_domain = khd->cur_domain;
705 struct sbq_wait *wait = &khd->domain_wait[sched_domain];
714 * khd->lock, but we still need to be careful about the waker.
718 &khd->wait_index[sched_domain]);
719 khd->domain_ws[sched_domain] = ws;
737 ws = khd->domain_ws[sched_domain];
748 struct kyber_hctx_data *khd,
755 rqs = &khd->rqs[khd->cur_domain];
762 * khd->lock serializes the flushes, so if we observed any bit set in
767 nr = kyber_get_domain_token(kqd, khd, hctx);
769 khd->batching++;
775 kyber_domain_names[khd->cur_domain]);
777 } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
778 nr = kyber_get_domain_token(kqd, khd, hctx);
780 kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs);
782 khd->batching++;
788 kyber_domain_names[khd->cur_domain]);
799 struct kyber_hctx_data *khd = hctx->sched_data;
803 spin_lock(&khd->lock);
809 if (khd->batching < kyber_batch_size[khd->cur_domain]) {
810 rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
824 khd->batching = 0;
826 if (khd->cur_domain == KYBER_NUM_DOMAINS - 1)
827 khd->cur_domain = 0;
829 khd->cur_domain++;
831 rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
838 spin_unlock(&khd->lock);
844 struct kyber_hctx_data *khd = hctx->sched_data;
848 if (!list_empty_careful(&khd->rqs[i]) ||
849 sbitmap_any_bit_set(&khd->kcq_map[i]))
904 __acquires(&khd->lock) \
907 struct kyber_hctx_data *khd = hctx->sched_data; \
909 spin_lock(&khd->lock); \
910 return seq_list_start(&khd->rqs[domain], *pos); \
917 struct kyber_hctx_data *khd = hctx->sched_data; \
919 return seq_list_next(v, &khd->rqs[domain], pos); \
923 __releases(&khd->lock) \
926 struct kyber_hctx_data *khd = hctx->sched_data; \
928 spin_unlock(&khd->lock); \
941 struct kyber_hctx_data *khd = hctx->sched_data; \
942 wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \
965 struct kyber_hctx_data *khd = hctx->sched_data;
967 seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]);
974 struct kyber_hctx_data *khd = hctx->sched_data;
976 seq_printf(m, "%u\n", khd->batching);