Lines Matching refs:sbq

324 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
344 * Each word can be limited to sbq->min_shallow_depth bits.
346 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
347 depth = ((depth >> sbq->sb.shift) * shallow_depth +
348 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
355 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
361 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
365 sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
366 if (!sbq->alloc_hint) {
367 sbitmap_free(&sbq->sb);
373 *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
376 sbq->min_shallow_depth = UINT_MAX;
377 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
378 atomic_set(&sbq->wake_index, 0);
379 atomic_set(&sbq->ws_active, 0);
381 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
382 if (!sbq->ws) {
383 free_percpu(sbq->alloc_hint);
384 sbitmap_free(&sbq->sb);
389 init_waitqueue_head(&sbq->ws[i].wait);
390 atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
393 sbq->round_robin = round_robin;
398 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
401 unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
404 if (sbq->wake_batch != wake_batch) {
405 WRITE_ONCE(sbq->wake_batch, wake_batch);
413 atomic_set(&sbq->ws[i].wait_cnt, 1);
417 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
419 sbitmap_queue_update_wake_batch(sbq, depth);
420 sbitmap_resize(&sbq->sb, depth);
424 int __sbitmap_queue_get(struct sbitmap_queue *sbq)
429 hint = this_cpu_read(*sbq->alloc_hint);
430 depth = READ_ONCE(sbq->sb.depth);
433 this_cpu_write(*sbq->alloc_hint, hint);
435 nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
439 this_cpu_write(*sbq->alloc_hint, 0);
440 } else if (nr == hint || unlikely(sbq->round_robin)) {
445 this_cpu_write(*sbq->alloc_hint, hint);
452 int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
458 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
460 hint = this_cpu_read(*sbq->alloc_hint);
461 depth = READ_ONCE(sbq->sb.depth);
464 this_cpu_write(*sbq->alloc_hint, hint);
466 nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
470 this_cpu_write(*sbq->alloc_hint, 0);
471 } else if (nr == hint || unlikely(sbq->round_robin)) {
476 this_cpu_write(*sbq->alloc_hint, hint);
483 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
486 sbq->min_shallow_depth = min_shallow_depth;
487 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
491 static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
495 if (!atomic_read(&sbq->ws_active))
498 wake_index = atomic_read(&sbq->wake_index);
500 struct sbq_wait_state *ws = &sbq->ws[wake_index];
503 if (wake_index != atomic_read(&sbq->wake_index))
504 atomic_set(&sbq->wake_index, wake_index);
514 static bool __sbq_wake_up(struct sbitmap_queue *sbq)
520 ws = sbq_wake_ptr(sbq);
528 wake_batch = READ_ONCE(sbq->wake_batch);
544 sbq_index_atomic_inc(&sbq->wake_index);
555 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
557 while (__sbq_wake_up(sbq))
562 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
576 sbitmap_deferred_clear_bit(&sbq->sb, nr);
585 sbitmap_queue_wake_up(sbq);
587 if (likely(!sbq->round_robin && nr < sbq->sb.depth))
588 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
592 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
601 wake_index = atomic_read(&sbq->wake_index);
603 struct sbq_wait_state *ws = &sbq->ws[wake_index];
613 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
618 sbitmap_show(&sbq->sb, m);
626 seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
630 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
631 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
632 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
636 struct sbq_wait_state *ws = &sbq->ws[i];
644 seq_printf(m, "round_robin=%d\n", sbq->round_robin);
645 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
649 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
653 if (!sbq_wait->sbq) {
654 sbq_wait->sbq = sbq;
655 atomic_inc(&sbq->ws_active);
664 if (sbq_wait->sbq) {
665 atomic_dec(&sbq_wait->sbq->ws_active);
666 sbq_wait->sbq = NULL;
671 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
675 if (!sbq_wait->sbq) {
676 atomic_inc(&sbq->ws_active);
677 sbq_wait->sbq = sbq;
683 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
687 if (sbq_wait->sbq) {
688 atomic_dec(&sbq->ws_active);
689 sbq_wait->sbq = NULL;