Lines Matching defs:queue

58  * from the queue itself.
245 * The stochastic-multi-queue is a set of lru lists stacked into levels.
252 struct queue {
269 static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
287 static unsigned q_size(struct queue *q)
295 static void q_push(struct queue *q, struct entry *e)
305 static void q_push_front(struct queue *q, struct entry *e)
315 static void q_push_before(struct queue *q, struct entry *old, struct entry *e)
325 static void q_del(struct queue *q, struct entry *e)
335 static struct entry *q_peek(struct queue *q, unsigned max_level, bool can_cross_sentinel)
357 static struct entry *q_pop(struct queue *q)
372 static struct entry *__redist_pop_from(struct queue *q, unsigned level)
386 static void q_set_targets_subrange_(struct queue *q, unsigned nr_elts, unsigned lbegin, unsigned lend)
405 static void q_set_targets(struct queue *q)
427 static void q_redistribute(struct queue *q)
470 static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels,
552 * There are times when we don't have any confidence in the hotspot queue.
555 * seeing how often a lookup is in the top levels of the hotspot queue.
818 * consisting of a clean and dirty queue, containing the currently
819 * active mappings. The hotspot queue uses a larger block size to
823 struct queue hotspot;
824 struct queue clean;
825 struct queue dirty;
886 struct queue *q = &mq->dirty;
899 struct queue *q = &mq->clean;
1055 * If the hotspot queue is performing badly then we have little
1077 * If the hotspot queue is performing badly, then we try and move entries
1140 * size of the clean queue.
1392 * The hotspot queue only gets updated with misses.