xref: /kernel/linux/linux-5.10/block/bfq-wf2q.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Hierarchical Budget Worst-case Fair Weighted Fair Queueing
4 * (B-WF2Q+): hierarchical scheduling algorithm by which the BFQ I/O
5 * scheduler schedules generic entities. The latter can represent
6 * either single bfq queues (associated with processes) or groups of
7 * bfq queues (associated with cgroups).
8 */
9#include "bfq-iosched.h"
10
11/**
12 * bfq_gt - compare two timestamps.
13 * @a: first ts.
14 * @b: second ts.
15 *
16 * Return @a > @b, dealing with wrapping correctly.
17 */
18static int bfq_gt(u64 a, u64 b)
19{
20	return (s64)(a - b) > 0;
21}
22
23static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree)
24{
25	struct rb_node *node = tree->rb_node;
26
27	return rb_entry(node, struct bfq_entity, rb_node);
28}
29
30static unsigned int bfq_class_idx(struct bfq_entity *entity)
31{
32	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
33
34	return bfqq ? bfqq->ioprio_class - 1 :
35		BFQ_DEFAULT_GRP_CLASS - 1;
36}
37
38unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd)
39{
40	return bfqd->busy_queues[0] + bfqd->busy_queues[1] +
41		bfqd->busy_queues[2];
42}
43
44static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
45						 bool expiration);
46
47static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
48
49/**
50 * bfq_update_next_in_service - update sd->next_in_service
51 * @sd: sched_data for which to perform the update.
52 * @new_entity: if not NULL, pointer to the entity whose activation,
53 *		requeueing or repositioning triggered the invocation of
54 *		this function.
55 * @expiration: id true, this function is being invoked after the
56 *             expiration of the in-service entity
57 *
58 * This function is called to update sd->next_in_service, which, in
59 * its turn, may change as a consequence of the insertion or
60 * extraction of an entity into/from one of the active trees of
61 * sd. These insertions/extractions occur as a consequence of
62 * activations/deactivations of entities, with some activations being
63 * 'true' activations, and other activations being requeueings (i.e.,
64 * implementing the second, requeueing phase of the mechanism used to
65 * reposition an entity in its active tree; see comments on
66 * __bfq_activate_entity and __bfq_requeue_entity for details). In
67 * both the last two activation sub-cases, new_entity points to the
68 * just activated or requeued entity.
69 *
70 * Returns true if sd->next_in_service changes in such a way that
71 * entity->parent may become the next_in_service for its parent
72 * entity.
73 */
74static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
75				       struct bfq_entity *new_entity,
76				       bool expiration)
77{
78	struct bfq_entity *next_in_service = sd->next_in_service;
79	bool parent_sched_may_change = false;
80	bool change_without_lookup = false;
81
82	/*
83	 * If this update is triggered by the activation, requeueing
84	 * or repositioning of an entity that does not coincide with
85	 * sd->next_in_service, then a full lookup in the active tree
86	 * can be avoided. In fact, it is enough to check whether the
87	 * just-modified entity has the same priority as
88	 * sd->next_in_service, is eligible and has a lower virtual
89	 * finish time than sd->next_in_service. If this compound
90	 * condition holds, then the new entity becomes the new
91	 * next_in_service. Otherwise no change is needed.
92	 */
93	if (new_entity && new_entity != sd->next_in_service) {
94		/*
95		 * Flag used to decide whether to replace
96		 * sd->next_in_service with new_entity. Tentatively
97		 * set to true, and left as true if
98		 * sd->next_in_service is NULL.
99		 */
100		change_without_lookup = true;
101
102		/*
103		 * If there is already a next_in_service candidate
104		 * entity, then compare timestamps to decide whether
105		 * to replace sd->service_tree with new_entity.
106		 */
107		if (next_in_service) {
108			unsigned int new_entity_class_idx =
109				bfq_class_idx(new_entity);
110			struct bfq_service_tree *st =
111				sd->service_tree + new_entity_class_idx;
112
113			change_without_lookup =
114				(new_entity_class_idx ==
115				 bfq_class_idx(next_in_service)
116				 &&
117				 !bfq_gt(new_entity->start, st->vtime)
118				 &&
119				 bfq_gt(next_in_service->finish,
120					new_entity->finish));
121		}
122
123		if (change_without_lookup)
124			next_in_service = new_entity;
125	}
126
127	if (!change_without_lookup) /* lookup needed */
128		next_in_service = bfq_lookup_next_entity(sd, expiration);
129
130	if (next_in_service) {
131		bool new_budget_triggers_change =
132			bfq_update_parent_budget(next_in_service);
133
134		parent_sched_may_change = !sd->next_in_service ||
135			new_budget_triggers_change;
136	}
137
138	sd->next_in_service = next_in_service;
139
140	if (!next_in_service)
141		return parent_sched_may_change;
142
143	return parent_sched_may_change;
144}
145
146#ifdef CONFIG_BFQ_GROUP_IOSCHED
147
148struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
149{
150	struct bfq_entity *group_entity = bfqq->entity.parent;
151
152	if (!group_entity)
153		group_entity = &bfqq->bfqd->root_group->entity;
154
155	return container_of(group_entity, struct bfq_group, entity);
156}
157
158/*
159 * Returns true if this budget changes may let next_in_service->parent
160 * become the next_in_service entity for its parent entity.
161 */
162static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
163{
164	struct bfq_entity *bfqg_entity;
165	struct bfq_group *bfqg;
166	struct bfq_sched_data *group_sd;
167	bool ret = false;
168
169	group_sd = next_in_service->sched_data;
170
171	bfqg = container_of(group_sd, struct bfq_group, sched_data);
172	/*
173	 * bfq_group's my_entity field is not NULL only if the group
174	 * is not the root group. We must not touch the root entity
175	 * as it must never become an in-service entity.
176	 */
177	bfqg_entity = bfqg->my_entity;
178	if (bfqg_entity) {
179		if (bfqg_entity->budget > next_in_service->budget)
180			ret = true;
181		bfqg_entity->budget = next_in_service->budget;
182	}
183
184	return ret;
185}
186
187/*
188 * This function tells whether entity stops being a candidate for next
189 * service, according to the restrictive definition of the field
190 * next_in_service. In particular, this function is invoked for an
191 * entity that is about to be set in service.
192 *
193 * If entity is a queue, then the entity is no longer a candidate for
194 * next service according to the that definition, because entity is
195 * about to become the in-service queue. This function then returns
196 * true if entity is a queue.
197 *
198 * In contrast, entity could still be a candidate for next service if
199 * it is not a queue, and has more than one active child. In fact,
200 * even if one of its children is about to be set in service, other
201 * active children may still be the next to serve, for the parent
202 * entity, even according to the above definition. As a consequence, a
203 * non-queue entity is not a candidate for next-service only if it has
204 * only one active child. And only if this condition holds, then this
205 * function returns true for a non-queue entity.
206 */
207static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
208{
209	struct bfq_group *bfqg;
210
211	if (bfq_entity_to_bfqq(entity))
212		return true;
213
214	bfqg = container_of(entity, struct bfq_group, entity);
215
216	/*
217	 * The field active_entities does not always contain the
218	 * actual number of active children entities: it happens to
219	 * not account for the in-service entity in case the latter is
220	 * removed from its active tree (which may get done after
221	 * invoking the function bfq_no_longer_next_in_service in
222	 * bfq_get_next_queue). Fortunately, here, i.e., while
223	 * bfq_no_longer_next_in_service is not yet completed in
224	 * bfq_get_next_queue, bfq_active_extract has not yet been
225	 * invoked, and thus active_entities still coincides with the
226	 * actual number of active entities.
227	 */
228	if (bfqg->active_entities == 1)
229		return true;
230
231	return false;
232}
233
234#else /* CONFIG_BFQ_GROUP_IOSCHED */
235
236struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
237{
238	return bfqq->bfqd->root_group;
239}
240
241static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
242{
243	return false;
244}
245
246static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
247{
248	return true;
249}
250
251#endif /* CONFIG_BFQ_GROUP_IOSCHED */
252
253/*
254 * Shift for timestamp calculations.  This actually limits the maximum
255 * service allowed in one timestamp delta (small shift values increase it),
256 * the maximum total weight that can be used for the queues in the system
257 * (big shift values increase it), and the period of virtual time
258 * wraparounds.
259 */
260#define WFQ_SERVICE_SHIFT	22
261
262struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
263{
264	struct bfq_queue *bfqq = NULL;
265
266	if (!entity->my_sched_data)
267		bfqq = container_of(entity, struct bfq_queue, entity);
268
269	return bfqq;
270}
271
272
273/**
274 * bfq_delta - map service into the virtual time domain.
275 * @service: amount of service.
276 * @weight: scale factor (weight of an entity or weight sum).
277 */
278static u64 bfq_delta(unsigned long service, unsigned long weight)
279{
280	return div64_ul((u64)service << WFQ_SERVICE_SHIFT, weight);
281}
282
283/**
284 * bfq_calc_finish - assign the finish time to an entity.
285 * @entity: the entity to act upon.
286 * @service: the service to be charged to the entity.
287 */
288static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service)
289{
290	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
291
292	entity->finish = entity->start +
293		bfq_delta(service, entity->weight);
294
295	if (bfqq) {
296		bfq_log_bfqq(bfqq->bfqd, bfqq,
297			"calc_finish: serv %lu, w %d",
298			service, entity->weight);
299		bfq_log_bfqq(bfqq->bfqd, bfqq,
300			"calc_finish: start %llu, finish %llu, delta %llu",
301			entity->start, entity->finish,
302			bfq_delta(service, entity->weight));
303	}
304}
305
306/**
307 * bfq_entity_of - get an entity from a node.
308 * @node: the node field of the entity.
309 *
310 * Convert a node pointer to the relative entity.  This is used only
311 * to simplify the logic of some functions and not as the generic
312 * conversion mechanism because, e.g., in the tree walking functions,
313 * the check for a %NULL value would be redundant.
314 */
315struct bfq_entity *bfq_entity_of(struct rb_node *node)
316{
317	struct bfq_entity *entity = NULL;
318
319	if (node)
320		entity = rb_entry(node, struct bfq_entity, rb_node);
321
322	return entity;
323}
324
325/**
326 * bfq_extract - remove an entity from a tree.
327 * @root: the tree root.
328 * @entity: the entity to remove.
329 */
330static void bfq_extract(struct rb_root *root, struct bfq_entity *entity)
331{
332	entity->tree = NULL;
333	rb_erase(&entity->rb_node, root);
334}
335
336/**
337 * bfq_idle_extract - extract an entity from the idle tree.
338 * @st: the service tree of the owning @entity.
339 * @entity: the entity being removed.
340 */
341static void bfq_idle_extract(struct bfq_service_tree *st,
342			     struct bfq_entity *entity)
343{
344	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
345	struct rb_node *next;
346
347	if (entity == st->first_idle) {
348		next = rb_next(&entity->rb_node);
349		st->first_idle = bfq_entity_of(next);
350	}
351
352	if (entity == st->last_idle) {
353		next = rb_prev(&entity->rb_node);
354		st->last_idle = bfq_entity_of(next);
355	}
356
357	bfq_extract(&st->idle, entity);
358
359	if (bfqq)
360		list_del(&bfqq->bfqq_list);
361}
362
363/**
364 * bfq_insert - generic tree insertion.
365 * @root: tree root.
366 * @entity: entity to insert.
367 *
368 * This is used for the idle and the active tree, since they are both
369 * ordered by finish time.
370 */
371static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
372{
373	struct bfq_entity *entry;
374	struct rb_node **node = &root->rb_node;
375	struct rb_node *parent = NULL;
376
377	while (*node) {
378		parent = *node;
379		entry = rb_entry(parent, struct bfq_entity, rb_node);
380
381		if (bfq_gt(entry->finish, entity->finish))
382			node = &parent->rb_left;
383		else
384			node = &parent->rb_right;
385	}
386
387	rb_link_node(&entity->rb_node, parent, node);
388	rb_insert_color(&entity->rb_node, root);
389
390	entity->tree = root;
391}
392
393/**
394 * bfq_update_min - update the min_start field of a entity.
395 * @entity: the entity to update.
396 * @node: one of its children.
397 *
398 * This function is called when @entity may store an invalid value for
399 * min_start due to updates to the active tree.  The function  assumes
400 * that the subtree rooted at @node (which may be its left or its right
401 * child) has a valid min_start value.
402 */
403static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node)
404{
405	struct bfq_entity *child;
406
407	if (node) {
408		child = rb_entry(node, struct bfq_entity, rb_node);
409		if (bfq_gt(entity->min_start, child->min_start))
410			entity->min_start = child->min_start;
411	}
412}
413
414/**
415 * bfq_update_active_node - recalculate min_start.
416 * @node: the node to update.
417 *
418 * @node may have changed position or one of its children may have moved,
419 * this function updates its min_start value.  The left and right subtrees
420 * are assumed to hold a correct min_start value.
421 */
422static void bfq_update_active_node(struct rb_node *node)
423{
424	struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
425
426	entity->min_start = entity->start;
427	bfq_update_min(entity, node->rb_right);
428	bfq_update_min(entity, node->rb_left);
429}
430
431/**
432 * bfq_update_active_tree - update min_start for the whole active tree.
433 * @node: the starting node.
434 *
435 * @node must be the deepest modified node after an update.  This function
436 * updates its min_start using the values held by its children, assuming
437 * that they did not change, and then updates all the nodes that may have
438 * changed in the path to the root.  The only nodes that may have changed
439 * are the ones in the path or their siblings.
440 */
441static void bfq_update_active_tree(struct rb_node *node)
442{
443	struct rb_node *parent;
444
445up:
446	bfq_update_active_node(node);
447
448	parent = rb_parent(node);
449	if (!parent)
450		return;
451
452	if (node == parent->rb_left && parent->rb_right)
453		bfq_update_active_node(parent->rb_right);
454	else if (parent->rb_left)
455		bfq_update_active_node(parent->rb_left);
456
457	node = parent;
458	goto up;
459}
460
461/**
462 * bfq_active_insert - insert an entity in the active tree of its
463 *                     group/device.
464 * @st: the service tree of the entity.
465 * @entity: the entity being inserted.
466 *
467 * The active tree is ordered by finish time, but an extra key is kept
468 * per each node, containing the minimum value for the start times of
469 * its children (and the node itself), so it's possible to search for
470 * the eligible node with the lowest finish time in logarithmic time.
471 */
472static void bfq_active_insert(struct bfq_service_tree *st,
473			      struct bfq_entity *entity)
474{
475	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
476	struct rb_node *node = &entity->rb_node;
477#ifdef CONFIG_BFQ_GROUP_IOSCHED
478	struct bfq_sched_data *sd = NULL;
479	struct bfq_group *bfqg = NULL;
480	struct bfq_data *bfqd = NULL;
481#endif
482
483	bfq_insert(&st->active, entity);
484
485	if (node->rb_left)
486		node = node->rb_left;
487	else if (node->rb_right)
488		node = node->rb_right;
489
490	bfq_update_active_tree(node);
491
492#ifdef CONFIG_BFQ_GROUP_IOSCHED
493	sd = entity->sched_data;
494	bfqg = container_of(sd, struct bfq_group, sched_data);
495	bfqd = (struct bfq_data *)bfqg->bfqd;
496#endif
497	if (bfqq)
498		list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
499#ifdef CONFIG_BFQ_GROUP_IOSCHED
500	if (bfqg != bfqd->root_group)
501		bfqg->active_entities++;
502#endif
503}
504
505/**
506 * bfq_ioprio_to_weight - calc a weight from an ioprio.
507 * @ioprio: the ioprio value to convert.
508 */
509unsigned short bfq_ioprio_to_weight(int ioprio)
510{
511	return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF;
512}
513
514/**
515 * bfq_weight_to_ioprio - calc an ioprio from a weight.
516 * @weight: the weight value to convert.
517 *
518 * To preserve as much as possible the old only-ioprio user interface,
519 * 0 is used as an escape ioprio value for weights (numerically) equal or
520 * larger than IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF.
521 */
522static unsigned short bfq_weight_to_ioprio(int weight)
523{
524	return max_t(int, 0,
525		     IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight);
526}
527
528static void bfq_get_entity(struct bfq_entity *entity)
529{
530	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
531
532	if (bfqq) {
533		bfqq->ref++;
534		bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
535			     bfqq, bfqq->ref);
536	}
537}
538
539/**
540 * bfq_find_deepest - find the deepest node that an extraction can modify.
541 * @node: the node being removed.
542 *
543 * Do the first step of an extraction in an rb tree, looking for the
544 * node that will replace @node, and returning the deepest node that
545 * the following modifications to the tree can touch.  If @node is the
546 * last node in the tree return %NULL.
547 */
548static struct rb_node *bfq_find_deepest(struct rb_node *node)
549{
550	struct rb_node *deepest;
551
552	if (!node->rb_right && !node->rb_left)
553		deepest = rb_parent(node);
554	else if (!node->rb_right)
555		deepest = node->rb_left;
556	else if (!node->rb_left)
557		deepest = node->rb_right;
558	else {
559		deepest = rb_next(node);
560		if (deepest->rb_right)
561			deepest = deepest->rb_right;
562		else if (rb_parent(deepest) != node)
563			deepest = rb_parent(deepest);
564	}
565
566	return deepest;
567}
568
569/**
570 * bfq_active_extract - remove an entity from the active tree.
571 * @st: the service_tree containing the tree.
572 * @entity: the entity being removed.
573 */
574static void bfq_active_extract(struct bfq_service_tree *st,
575			       struct bfq_entity *entity)
576{
577	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
578	struct rb_node *node;
579#ifdef CONFIG_BFQ_GROUP_IOSCHED
580	struct bfq_sched_data *sd = NULL;
581	struct bfq_group *bfqg = NULL;
582	struct bfq_data *bfqd = NULL;
583#endif
584
585	node = bfq_find_deepest(&entity->rb_node);
586	bfq_extract(&st->active, entity);
587
588	if (node)
589		bfq_update_active_tree(node);
590
591#ifdef CONFIG_BFQ_GROUP_IOSCHED
592	sd = entity->sched_data;
593	bfqg = container_of(sd, struct bfq_group, sched_data);
594	bfqd = (struct bfq_data *)bfqg->bfqd;
595#endif
596	if (bfqq)
597		list_del(&bfqq->bfqq_list);
598#ifdef CONFIG_BFQ_GROUP_IOSCHED
599	if (bfqg != bfqd->root_group)
600		bfqg->active_entities--;
601#endif
602}
603
604/**
605 * bfq_idle_insert - insert an entity into the idle tree.
606 * @st: the service tree containing the tree.
607 * @entity: the entity to insert.
608 */
609static void bfq_idle_insert(struct bfq_service_tree *st,
610			    struct bfq_entity *entity)
611{
612	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
613	struct bfq_entity *first_idle = st->first_idle;
614	struct bfq_entity *last_idle = st->last_idle;
615
616	if (!first_idle || bfq_gt(first_idle->finish, entity->finish))
617		st->first_idle = entity;
618	if (!last_idle || bfq_gt(entity->finish, last_idle->finish))
619		st->last_idle = entity;
620
621	bfq_insert(&st->idle, entity);
622
623	if (bfqq)
624		list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
625}
626
627/**
628 * bfq_forget_entity - do not consider entity any longer for scheduling
629 * @st: the service tree.
630 * @entity: the entity being removed.
631 * @is_in_service: true if entity is currently the in-service entity.
632 *
633 * Forget everything about @entity. In addition, if entity represents
634 * a queue, and the latter is not in service, then release the service
635 * reference to the queue (the one taken through bfq_get_entity). In
636 * fact, in this case, there is really no more service reference to
637 * the queue, as the latter is also outside any service tree. If,
638 * instead, the queue is in service, then __bfq_bfqd_reset_in_service
639 * will take care of putting the reference when the queue finally
640 * stops being served.
641 */
642static void bfq_forget_entity(struct bfq_service_tree *st,
643			      struct bfq_entity *entity,
644			      bool is_in_service)
645{
646	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
647
648	entity->on_st_or_in_serv = false;
649	st->wsum -= entity->weight;
650	if (bfqq && !is_in_service)
651		bfq_put_queue(bfqq);
652}
653
654/**
655 * bfq_put_idle_entity - release the idle tree ref of an entity.
656 * @st: service tree for the entity.
657 * @entity: the entity being released.
658 */
659void bfq_put_idle_entity(struct bfq_service_tree *st, struct bfq_entity *entity)
660{
661	bfq_idle_extract(st, entity);
662	bfq_forget_entity(st, entity,
663			  entity == entity->sched_data->in_service_entity);
664}
665
666/**
667 * bfq_forget_idle - update the idle tree if necessary.
668 * @st: the service tree to act upon.
669 *
670 * To preserve the global O(log N) complexity we only remove one entry here;
671 * as the idle tree will not grow indefinitely this can be done safely.
672 */
673static void bfq_forget_idle(struct bfq_service_tree *st)
674{
675	struct bfq_entity *first_idle = st->first_idle;
676	struct bfq_entity *last_idle = st->last_idle;
677
678	if (RB_EMPTY_ROOT(&st->active) && last_idle &&
679	    !bfq_gt(last_idle->finish, st->vtime)) {
680		/*
681		 * Forget the whole idle tree, increasing the vtime past
682		 * the last finish time of idle entities.
683		 */
684		st->vtime = last_idle->finish;
685	}
686
687	if (first_idle && !bfq_gt(first_idle->finish, st->vtime))
688		bfq_put_idle_entity(st, first_idle);
689}
690
691struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity)
692{
693	struct bfq_sched_data *sched_data = entity->sched_data;
694	unsigned int idx = bfq_class_idx(entity);
695
696	return sched_data->service_tree + idx;
697}
698
699/*
700 * Update weight and priority of entity. If update_class_too is true,
701 * then update the ioprio_class of entity too.
702 *
703 * The reason why the update of ioprio_class is controlled through the
704 * last parameter is as follows. Changing the ioprio class of an
705 * entity implies changing the destination service trees for that
706 * entity. If such a change occurred when the entity is already on one
707 * of the service trees for its previous class, then the state of the
708 * entity would become more complex: none of the new possible service
709 * trees for the entity, according to bfq_entity_service_tree(), would
710 * match any of the possible service trees on which the entity
711 * is. Complex operations involving these trees, such as entity
712 * activations and deactivations, should take into account this
713 * additional complexity.  To avoid this issue, this function is
714 * invoked with update_class_too unset in the points in the code where
715 * entity may happen to be on some tree.
716 */
717struct bfq_service_tree *
718__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
719				struct bfq_entity *entity,
720				bool update_class_too)
721{
722	struct bfq_service_tree *new_st = old_st;
723
724	if (entity->prio_changed) {
725		struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
726		unsigned int prev_weight, new_weight;
727		struct bfq_data *bfqd = NULL;
728		struct rb_root_cached *root;
729#ifdef CONFIG_BFQ_GROUP_IOSCHED
730		struct bfq_sched_data *sd;
731		struct bfq_group *bfqg;
732#endif
733
734		if (bfqq)
735			bfqd = bfqq->bfqd;
736#ifdef CONFIG_BFQ_GROUP_IOSCHED
737		else {
738			sd = entity->my_sched_data;
739			bfqg = container_of(sd, struct bfq_group, sched_data);
740			bfqd = (struct bfq_data *)bfqg->bfqd;
741		}
742#endif
743
744		/* Matches the smp_wmb() in bfq_group_set_weight. */
745		smp_rmb();
746		old_st->wsum -= entity->weight;
747
748		if (entity->new_weight != entity->orig_weight) {
749			if (entity->new_weight < BFQ_MIN_WEIGHT ||
750			    entity->new_weight > BFQ_MAX_WEIGHT) {
751				pr_crit("update_weight_prio: new_weight %d\n",
752					entity->new_weight);
753				if (entity->new_weight < BFQ_MIN_WEIGHT)
754					entity->new_weight = BFQ_MIN_WEIGHT;
755				else
756					entity->new_weight = BFQ_MAX_WEIGHT;
757			}
758			entity->orig_weight = entity->new_weight;
759			if (bfqq)
760				bfqq->ioprio =
761				  bfq_weight_to_ioprio(entity->orig_weight);
762		}
763
764		if (bfqq && update_class_too)
765			bfqq->ioprio_class = bfqq->new_ioprio_class;
766
767		/*
768		 * Reset prio_changed only if the ioprio_class change
769		 * is not pending any longer.
770		 */
771		if (!bfqq || bfqq->ioprio_class == bfqq->new_ioprio_class)
772			entity->prio_changed = 0;
773
774		/*
775		 * NOTE: here we may be changing the weight too early,
776		 * this will cause unfairness.  The correct approach
777		 * would have required additional complexity to defer
778		 * weight changes to the proper time instants (i.e.,
779		 * when entity->finish <= old_st->vtime).
780		 */
781		new_st = bfq_entity_service_tree(entity);
782
783		prev_weight = entity->weight;
784		new_weight = entity->orig_weight *
785			     (bfqq ? bfqq->wr_coeff : 1);
786		/*
787		 * If the weight of the entity changes, and the entity is a
788		 * queue, remove the entity from its old weight counter (if
789		 * there is a counter associated with the entity).
790		 */
791		if (prev_weight != new_weight && bfqq) {
792			root = &bfqd->queue_weights_tree;
793			__bfq_weights_tree_remove(bfqd, bfqq, root);
794		}
795		entity->weight = new_weight;
796		/*
797		 * Add the entity, if it is not a weight-raised queue,
798		 * to the counter associated with its new weight.
799		 */
800		if (prev_weight != new_weight && bfqq && bfqq->wr_coeff == 1) {
801			/* If we get here, root has been initialized. */
802			bfq_weights_tree_add(bfqd, bfqq, root);
803		}
804
805		new_st->wsum += entity->weight;
806
807		if (new_st != old_st)
808			entity->start = new_st->vtime;
809	}
810
811	return new_st;
812}
813
814/**
815 * bfq_bfqq_served - update the scheduler status after selection for
816 *                   service.
817 * @bfqq: the queue being served.
818 * @served: bytes to transfer.
819 *
820 * NOTE: this can be optimized, as the timestamps of upper level entities
821 * are synchronized every time a new bfqq is selected for service.  By now,
822 * we keep it to better check consistency.
823 */
824void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
825{
826	struct bfq_entity *entity = &bfqq->entity;
827	struct bfq_service_tree *st;
828
829	if (!bfqq->service_from_backlogged)
830		bfqq->first_IO_time = jiffies;
831
832	if (bfqq->wr_coeff > 1)
833		bfqq->service_from_wr += served;
834
835	bfqq->service_from_backlogged += served;
836	for_each_entity(entity) {
837		st = bfq_entity_service_tree(entity);
838
839		entity->service += served;
840
841		st->vtime += bfq_delta(served, st->wsum);
842		bfq_forget_idle(st);
843	}
844	bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs", served);
845}
846
847/**
848 * bfq_bfqq_charge_time - charge an amount of service equivalent to the length
849 *			  of the time interval during which bfqq has been in
850 *			  service.
851 * @bfqd: the device
852 * @bfqq: the queue that needs a service update.
853 * @time_ms: the amount of time during which the queue has received service
854 *
855 * If a queue does not consume its budget fast enough, then providing
856 * the queue with service fairness may impair throughput, more or less
857 * severely. For this reason, queues that consume their budget slowly
858 * are provided with time fairness instead of service fairness. This
859 * goal is achieved through the BFQ scheduling engine, even if such an
860 * engine works in the service, and not in the time domain. The trick
861 * is charging these queues with an inflated amount of service, equal
862 * to the amount of service that they would have received during their
863 * service slot if they had been fast, i.e., if their requests had
864 * been dispatched at a rate equal to the estimated peak rate.
865 *
866 * It is worth noting that time fairness can cause important
867 * distortions in terms of bandwidth distribution, on devices with
868 * internal queueing. The reason is that I/O requests dispatched
869 * during the service slot of a queue may be served after that service
870 * slot is finished, and may have a total processing time loosely
871 * correlated with the duration of the service slot. This is
872 * especially true for short service slots.
873 */
874void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
875			  unsigned long time_ms)
876{
877	struct bfq_entity *entity = &bfqq->entity;
878	unsigned long timeout_ms = jiffies_to_msecs(bfq_timeout);
879	unsigned long bounded_time_ms = min(time_ms, timeout_ms);
880	int serv_to_charge_for_time =
881		(bfqd->bfq_max_budget * bounded_time_ms) / timeout_ms;
882	int tot_serv_to_charge = max(serv_to_charge_for_time, entity->service);
883
884	/* Increase budget to avoid inconsistencies */
885	if (tot_serv_to_charge > entity->budget)
886		entity->budget = tot_serv_to_charge;
887
888	bfq_bfqq_served(bfqq,
889			max_t(int, 0, tot_serv_to_charge - entity->service));
890}
891
892static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
893					struct bfq_service_tree *st,
894					bool backshifted)
895{
896	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
897
898	/*
899	 * When this function is invoked, entity is not in any service
900	 * tree, then it is safe to invoke next function with the last
901	 * parameter set (see the comments on the function).
902	 */
903	st = __bfq_entity_update_weight_prio(st, entity, true);
904	bfq_calc_finish(entity, entity->budget);
905
906	/*
907	 * If some queues enjoy backshifting for a while, then their
908	 * (virtual) finish timestamps may happen to become lower and
909	 * lower than the system virtual time.	In particular, if
910	 * these queues often happen to be idle for short time
911	 * periods, and during such time periods other queues with
912	 * higher timestamps happen to be busy, then the backshifted
913	 * timestamps of the former queues can become much lower than
914	 * the system virtual time. In fact, to serve the queues with
915	 * higher timestamps while the ones with lower timestamps are
916	 * idle, the system virtual time may be pushed-up to much
917	 * higher values than the finish timestamps of the idle
918	 * queues. As a consequence, the finish timestamps of all new
919	 * or newly activated queues may end up being much larger than
920	 * those of lucky queues with backshifted timestamps. The
921	 * latter queues may then monopolize the device for a lot of
922	 * time. This would simply break service guarantees.
923	 *
924	 * To reduce this problem, push up a little bit the
925	 * backshifted timestamps of the queue associated with this
926	 * entity (only a queue can happen to have the backshifted
927	 * flag set): just enough to let the finish timestamp of the
928	 * queue be equal to the current value of the system virtual
929	 * time. This may introduce a little unfairness among queues
930	 * with backshifted timestamps, but it does not break
931	 * worst-case fairness guarantees.
932	 *
933	 * As a special case, if bfqq is weight-raised, push up
934	 * timestamps much less, to keep very low the probability that
935	 * this push up causes the backshifted finish timestamps of
936	 * weight-raised queues to become higher than the backshifted
937	 * finish timestamps of non weight-raised queues.
938	 */
939	if (backshifted && bfq_gt(st->vtime, entity->finish)) {
940		unsigned long delta = st->vtime - entity->finish;
941
942		if (bfqq)
943			delta /= bfqq->wr_coeff;
944
945		entity->start += delta;
946		entity->finish += delta;
947	}
948
949	bfq_active_insert(st, entity);
950}
951
952/**
953 * __bfq_activate_entity - handle activation of entity.
954 * @entity: the entity being activated.
955 * @non_blocking_wait_rq: true if entity was waiting for a request
956 *
957 * Called for a 'true' activation, i.e., if entity is not active and
958 * one of its children receives a new request.
959 *
960 * Basically, this function updates the timestamps of entity and
961 * inserts entity into its active tree, after possibly extracting it
962 * from its idle tree.
963 */
964static void __bfq_activate_entity(struct bfq_entity *entity,
965				  bool non_blocking_wait_rq)
966{
967	struct bfq_service_tree *st = bfq_entity_service_tree(entity);
968	bool backshifted = false;
969	unsigned long long min_vstart;
970
971	/* See comments on bfq_fqq_update_budg_for_activation */
972	if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) {
973		backshifted = true;
974		min_vstart = entity->finish;
975	} else
976		min_vstart = st->vtime;
977
978	if (entity->tree == &st->idle) {
979		/*
980		 * Must be on the idle tree, bfq_idle_extract() will
981		 * check for that.
982		 */
983		bfq_idle_extract(st, entity);
984		entity->start = bfq_gt(min_vstart, entity->finish) ?
985			min_vstart : entity->finish;
986	} else {
987		/*
988		 * The finish time of the entity may be invalid, and
989		 * it is in the past for sure, otherwise the queue
990		 * would have been on the idle tree.
991		 */
992		entity->start = min_vstart;
993		st->wsum += entity->weight;
994		/*
995		 * entity is about to be inserted into a service tree,
996		 * and then set in service: get a reference to make
997		 * sure entity does not disappear until it is no
998		 * longer in service or scheduled for service.
999		 */
1000		bfq_get_entity(entity);
1001
1002		entity->on_st_or_in_serv = true;
1003	}
1004
1005#ifdef CONFIG_BFQ_GROUP_IOSCHED
1006	if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
1007		struct bfq_group *bfqg =
1008			container_of(entity, struct bfq_group, entity);
1009		struct bfq_data *bfqd = bfqg->bfqd;
1010
1011		if (!entity->in_groups_with_pending_reqs) {
1012			entity->in_groups_with_pending_reqs = true;
1013			bfqd->num_groups_with_pending_reqs++;
1014		}
1015	}
1016#endif
1017
1018	bfq_update_fin_time_enqueue(entity, st, backshifted);
1019}
1020
1021/**
1022 * __bfq_requeue_entity - handle requeueing or repositioning of an entity.
1023 * @entity: the entity being requeued or repositioned.
1024 *
1025 * Requeueing is needed if this entity stops being served, which
1026 * happens if a leaf descendant entity has expired. On the other hand,
1027 * repositioning is needed if the next_inservice_entity for the child
1028 * entity has changed. See the comments inside the function for
1029 * details.
1030 *
1031 * Basically, this function: 1) removes entity from its active tree if
1032 * present there, 2) updates the timestamps of entity and 3) inserts
1033 * entity back into its active tree (in the new, right position for
1034 * the new values of the timestamps).
1035 */
1036static void __bfq_requeue_entity(struct bfq_entity *entity)
1037{
1038	struct bfq_sched_data *sd = entity->sched_data;
1039	struct bfq_service_tree *st = bfq_entity_service_tree(entity);
1040
1041	if (entity == sd->in_service_entity) {
1042		/*
1043		 * We are requeueing the current in-service entity,
1044		 * which may have to be done for one of the following
1045		 * reasons:
1046		 * - entity represents the in-service queue, and the
1047		 *   in-service queue is being requeued after an
1048		 *   expiration;
1049		 * - entity represents a group, and its budget has
1050		 *   changed because one of its child entities has
1051		 *   just been either activated or requeued for some
1052		 *   reason; the timestamps of the entity need then to
1053		 *   be updated, and the entity needs to be enqueued
1054		 *   or repositioned accordingly.
1055		 *
1056		 * In particular, before requeueing, the start time of
1057		 * the entity must be moved forward to account for the
1058		 * service that the entity has received while in
1059		 * service. This is done by the next instructions. The
1060		 * finish time will then be updated according to this
1061		 * new value of the start time, and to the budget of
1062		 * the entity.
1063		 */
1064		bfq_calc_finish(entity, entity->service);
1065		entity->start = entity->finish;
1066		/*
1067		 * In addition, if the entity had more than one child
1068		 * when set in service, then it was not extracted from
1069		 * the active tree. This implies that the position of
1070		 * the entity in the active tree may need to be
1071		 * changed now, because we have just updated the start
1072		 * time of the entity, and we will update its finish
1073		 * time in a moment (the requeueing is then, more
1074		 * precisely, a repositioning in this case). To
1075		 * implement this repositioning, we: 1) dequeue the
1076		 * entity here, 2) update the finish time and requeue
1077		 * the entity according to the new timestamps below.
1078		 */
1079		if (entity->tree)
1080			bfq_active_extract(st, entity);
1081	} else { /* The entity is already active, and not in service */
1082		/*
1083		 * In this case, this function gets called only if the
1084		 * next_in_service entity below this entity has
1085		 * changed, and this change has caused the budget of
1086		 * this entity to change, which, finally implies that
1087		 * the finish time of this entity must be
1088		 * updated. Such an update may cause the scheduling,
1089		 * i.e., the position in the active tree, of this
1090		 * entity to change. We handle this change by: 1)
1091		 * dequeueing the entity here, 2) updating the finish
1092		 * time and requeueing the entity according to the new
1093		 * timestamps below. This is the same approach as the
1094		 * non-extracted-entity sub-case above.
1095		 */
1096		bfq_active_extract(st, entity);
1097	}
1098
1099	bfq_update_fin_time_enqueue(entity, st, false);
1100}
1101
1102static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
1103					  struct bfq_sched_data *sd,
1104					  bool non_blocking_wait_rq)
1105{
1106	struct bfq_service_tree *st = bfq_entity_service_tree(entity);
1107
1108	if (sd->in_service_entity == entity || entity->tree == &st->active)
1109		 /*
1110		  * in service or already queued on the active tree,
1111		  * requeue or reposition
1112		  */
1113		__bfq_requeue_entity(entity);
1114	else
1115		/*
1116		 * Not in service and not queued on its active tree:
1117		 * the activity is idle and this is a true activation.
1118		 */
1119		__bfq_activate_entity(entity, non_blocking_wait_rq);
1120}
1121
1122
1123/**
1124 * bfq_activate_requeue_entity - activate or requeue an entity representing a
1125 *				 bfq_queue, and activate, requeue or reposition
1126 *				 all ancestors for which such an update becomes
1127 *				 necessary.
1128 * @entity: the entity to activate.
1129 * @non_blocking_wait_rq: true if this entity was waiting for a request
1130 * @requeue: true if this is a requeue, which implies that bfqq is
1131 *	     being expired; thus ALL its ancestors stop being served and must
1132 *	     therefore be requeued
1133 * @expiration: true if this function is being invoked in the expiration path
1134 *             of the in-service queue
1135 */
1136static void bfq_activate_requeue_entity(struct bfq_entity *entity,
1137					bool non_blocking_wait_rq,
1138					bool requeue, bool expiration)
1139{
1140	struct bfq_sched_data *sd;
1141
1142	for_each_entity(entity) {
1143		sd = entity->sched_data;
1144		__bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq);
1145
1146		if (!bfq_update_next_in_service(sd, entity, expiration) &&
1147		    !requeue)
1148			break;
1149	}
1150}
1151
1152/**
1153 * __bfq_deactivate_entity - update sched_data and service trees for
1154 * entity, so as to represent entity as inactive
1155 * @entity: the entity being deactivated.
1156 * @ins_into_idle_tree: if false, the entity will not be put into the
1157 *			idle tree.
1158 *
1159 * If necessary and allowed, puts entity into the idle tree. NOTE:
1160 * entity may be on no tree if in service.
1161 */
1162bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
1163{
1164	struct bfq_sched_data *sd = entity->sched_data;
1165	struct bfq_service_tree *st;
1166	bool is_in_service;
1167
1168	if (!entity->on_st_or_in_serv) /*
1169					* entity never activated, or
1170					* already inactive
1171					*/
1172		return false;
1173
1174	/*
1175	 * If we get here, then entity is active, which implies that
1176	 * bfq_group_set_parent has already been invoked for the group
1177	 * represented by entity. Therefore, the field
1178	 * entity->sched_data has been set, and we can safely use it.
1179	 */
1180	st = bfq_entity_service_tree(entity);
1181	is_in_service = entity == sd->in_service_entity;
1182
1183	bfq_calc_finish(entity, entity->service);
1184
1185	if (is_in_service)
1186		sd->in_service_entity = NULL;
1187	else
1188		/*
1189		 * Non in-service entity: nobody will take care of
1190		 * resetting its service counter on expiration. Do it
1191		 * now.
1192		 */
1193		entity->service = 0;
1194
1195	if (entity->tree == &st->active)
1196		bfq_active_extract(st, entity);
1197	else if (!is_in_service && entity->tree == &st->idle)
1198		bfq_idle_extract(st, entity);
1199
1200	if (!ins_into_idle_tree || !bfq_gt(entity->finish, st->vtime))
1201		bfq_forget_entity(st, entity, is_in_service);
1202	else
1203		bfq_idle_insert(st, entity);
1204
1205	return true;
1206}
1207
1208/**
1209 * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
1210 * @entity: the entity to deactivate.
1211 * @ins_into_idle_tree: true if the entity can be put into the idle tree
1212 * @expiration: true if this function is being invoked in the expiration path
1213 *             of the in-service queue
1214 */
1215static void bfq_deactivate_entity(struct bfq_entity *entity,
1216				  bool ins_into_idle_tree,
1217				  bool expiration)
1218{
1219	struct bfq_sched_data *sd;
1220	struct bfq_entity *parent = NULL;
1221
1222	for_each_entity_safe(entity, parent) {
1223		sd = entity->sched_data;
1224
1225		if (!__bfq_deactivate_entity(entity, ins_into_idle_tree)) {
1226			/*
1227			 * entity is not in any tree any more, so
1228			 * this deactivation is a no-op, and there is
1229			 * nothing to change for upper-level entities
1230			 * (in case of expiration, this can never
1231			 * happen).
1232			 */
1233			return;
1234		}
1235
1236		if (sd->next_in_service == entity)
1237			/*
1238			 * entity was the next_in_service entity,
1239			 * then, since entity has just been
1240			 * deactivated, a new one must be found.
1241			 */
1242			bfq_update_next_in_service(sd, NULL, expiration);
1243
1244		if (sd->next_in_service || sd->in_service_entity) {
1245			/*
1246			 * The parent entity is still active, because
1247			 * either next_in_service or in_service_entity
1248			 * is not NULL. So, no further upwards
1249			 * deactivation must be performed.  Yet,
1250			 * next_in_service has changed.	Then the
1251			 * schedule does need to be updated upwards.
1252			 *
1253			 * NOTE If in_service_entity is not NULL, then
1254			 * next_in_service may happen to be NULL,
1255			 * although the parent entity is evidently
1256			 * active. This happens if 1) the entity
1257			 * pointed by in_service_entity is the only
1258			 * active entity in the parent entity, and 2)
1259			 * according to the definition of
1260			 * next_in_service, the in_service_entity
1261			 * cannot be considered as
1262			 * next_in_service. See the comments on the
1263			 * definition of next_in_service for details.
1264			 */
1265			break;
1266		}
1267
1268		/*
1269		 * If we get here, then the parent is no more
1270		 * backlogged and we need to propagate the
1271		 * deactivation upwards. Thus let the loop go on.
1272		 */
1273
1274		/*
1275		 * Also let parent be queued into the idle tree on
1276		 * deactivation, to preserve service guarantees, and
1277		 * assuming that who invoked this function does not
1278		 * need parent entities too to be removed completely.
1279		 */
1280		ins_into_idle_tree = true;
1281	}
1282
1283	/*
1284	 * If the deactivation loop is fully executed, then there are
1285	 * no more entities to touch and next loop is not executed at
1286	 * all. Otherwise, requeue remaining entities if they are
1287	 * about to stop receiving service, or reposition them if this
1288	 * is not the case.
1289	 */
1290	entity = parent;
1291	for_each_entity(entity) {
1292		/*
1293		 * Invoke __bfq_requeue_entity on entity, even if
1294		 * already active, to requeue/reposition it in the
1295		 * active tree (because sd->next_in_service has
1296		 * changed)
1297		 */
1298		__bfq_requeue_entity(entity);
1299
1300		sd = entity->sched_data;
1301		if (!bfq_update_next_in_service(sd, entity, expiration) &&
1302		    !expiration)
1303			/*
1304			 * next_in_service unchanged or not causing
1305			 * any change in entity->parent->sd, and no
1306			 * requeueing needed for expiration: stop
1307			 * here.
1308			 */
1309			break;
1310	}
1311}
1312
1313/**
1314 * bfq_calc_vtime_jump - compute the value to which the vtime should jump,
1315 *                       if needed, to have at least one entity eligible.
1316 * @st: the service tree to act upon.
1317 *
1318 * Assumes that st is not empty.
1319 */
1320static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
1321{
1322	struct bfq_entity *root_entity = bfq_root_active_entity(&st->active);
1323
1324	if (bfq_gt(root_entity->min_start, st->vtime))
1325		return root_entity->min_start;
1326
1327	return st->vtime;
1328}
1329
1330static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value)
1331{
1332	if (new_value > st->vtime) {
1333		st->vtime = new_value;
1334		bfq_forget_idle(st);
1335	}
1336}
1337
1338/**
1339 * bfq_first_active_entity - find the eligible entity with
1340 *                           the smallest finish time
1341 * @st: the service tree to select from.
1342 * @vtime: the system virtual to use as a reference for eligibility
1343 *
1344 * This function searches the first schedulable entity, starting from the
1345 * root of the tree and going on the left every time on this side there is
1346 * a subtree with at least one eligible (start <= vtime) entity. The path on
1347 * the right is followed only if a) the left subtree contains no eligible
1348 * entities and b) no eligible entity has been found yet.
1349 */
1350static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st,
1351						  u64 vtime)
1352{
1353	struct bfq_entity *entry, *first = NULL;
1354	struct rb_node *node = st->active.rb_node;
1355
1356	while (node) {
1357		entry = rb_entry(node, struct bfq_entity, rb_node);
1358left:
1359		if (!bfq_gt(entry->start, vtime))
1360			first = entry;
1361
1362		if (node->rb_left) {
1363			entry = rb_entry(node->rb_left,
1364					 struct bfq_entity, rb_node);
1365			if (!bfq_gt(entry->min_start, vtime)) {
1366				node = node->rb_left;
1367				goto left;
1368			}
1369		}
1370		if (first)
1371			break;
1372		node = node->rb_right;
1373	}
1374
1375	return first;
1376}
1377
1378/**
1379 * __bfq_lookup_next_entity - return the first eligible entity in @st.
1380 * @st: the service tree.
1381 *
1382 * If there is no in-service entity for the sched_data st belongs to,
1383 * then return the entity that will be set in service if:
1384 * 1) the parent entity this st belongs to is set in service;
1385 * 2) no entity belonging to such parent entity undergoes a state change
1386 * that would influence the timestamps of the entity (e.g., becomes idle,
1387 * becomes backlogged, changes its budget, ...).
1388 *
1389 * In this first case, update the virtual time in @st too (see the
1390 * comments on this update inside the function).
1391 *
1392 * In contrast, if there is an in-service entity, then return the
1393 * entity that would be set in service if not only the above
1394 * conditions, but also the next one held true: the currently
1395 * in-service entity, on expiration,
1396 * 1) gets a finish time equal to the current one, or
1397 * 2) is not eligible any more, or
1398 * 3) is idle.
1399 */
1400static struct bfq_entity *
1401__bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service)
1402{
1403	struct bfq_entity *entity;
1404	u64 new_vtime;
1405
1406	if (RB_EMPTY_ROOT(&st->active))
1407		return NULL;
1408
1409	/*
1410	 * Get the value of the system virtual time for which at
1411	 * least one entity is eligible.
1412	 */
1413	new_vtime = bfq_calc_vtime_jump(st);
1414
1415	/*
1416	 * If there is no in-service entity for the sched_data this
1417	 * active tree belongs to, then push the system virtual time
1418	 * up to the value that guarantees that at least one entity is
1419	 * eligible. If, instead, there is an in-service entity, then
1420	 * do not make any such update, because there is already an
1421	 * eligible entity, namely the in-service one (even if the
1422	 * entity is not on st, because it was extracted when set in
1423	 * service).
1424	 */
1425	if (!in_service)
1426		bfq_update_vtime(st, new_vtime);
1427
1428	entity = bfq_first_active_entity(st, new_vtime);
1429
1430	return entity;
1431}
1432
1433/**
1434 * bfq_lookup_next_entity - return the first eligible entity in @sd.
1435 * @sd: the sched_data.
1436 * @expiration: true if we are on the expiration path of the in-service queue
1437 *
1438 * This function is invoked when there has been a change in the trees
1439 * for sd, and we need to know what is the new next entity to serve
1440 * after this change.
1441 */
1442static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
1443						 bool expiration)
1444{
1445	struct bfq_service_tree *st = sd->service_tree;
1446	struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1);
1447	struct bfq_entity *entity = NULL;
1448	int class_idx = 0;
1449
1450	/*
1451	 * Choose from idle class, if needed to guarantee a minimum
1452	 * bandwidth to this class (and if there is some active entity
1453	 * in idle class). This should also mitigate
1454	 * priority-inversion problems in case a low priority task is
1455	 * holding file system resources.
1456	 */
1457	if (time_is_before_jiffies(sd->bfq_class_idle_last_service +
1458				   BFQ_CL_IDLE_TIMEOUT)) {
1459		if (!RB_EMPTY_ROOT(&idle_class_st->active))
1460			class_idx = BFQ_IOPRIO_CLASSES - 1;
1461		/* About to be served if backlogged, or not yet backlogged */
1462		sd->bfq_class_idle_last_service = jiffies;
1463	}
1464
1465	/*
1466	 * Find the next entity to serve for the highest-priority
1467	 * class, unless the idle class needs to be served.
1468	 */
1469	for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) {
1470		/*
1471		 * If expiration is true, then bfq_lookup_next_entity
1472		 * is being invoked as a part of the expiration path
1473		 * of the in-service queue. In this case, even if
1474		 * sd->in_service_entity is not NULL,
1475		 * sd->in_service_entity at this point is actually not
1476		 * in service any more, and, if needed, has already
1477		 * been properly queued or requeued into the right
1478		 * tree. The reason why sd->in_service_entity is still
1479		 * not NULL here, even if expiration is true, is that
1480		 * sd->in_service_entity is reset as a last step in the
1481		 * expiration path. So, if expiration is true, tell
1482		 * __bfq_lookup_next_entity that there is no
1483		 * sd->in_service_entity.
1484		 */
1485		entity = __bfq_lookup_next_entity(st + class_idx,
1486						  sd->in_service_entity &&
1487						  !expiration);
1488
1489		if (entity)
1490			break;
1491	}
1492
1493	if (!entity)
1494		return NULL;
1495
1496	return entity;
1497}
1498
1499bool next_queue_may_preempt(struct bfq_data *bfqd)
1500{
1501	struct bfq_sched_data *sd = &bfqd->root_group->sched_data;
1502
1503	return sd->next_in_service != sd->in_service_entity;
1504}
1505
1506/*
1507 * Get next queue for service.
1508 */
1509struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
1510{
1511	struct bfq_entity *entity = NULL;
1512	struct bfq_sched_data *sd;
1513	struct bfq_queue *bfqq;
1514
1515	if (bfq_tot_busy_queues(bfqd) == 0)
1516		return NULL;
1517
1518	/*
1519	 * Traverse the path from the root to the leaf entity to
1520	 * serve. Set in service all the entities visited along the
1521	 * way.
1522	 */
1523	sd = &bfqd->root_group->sched_data;
1524	for (; sd ; sd = entity->my_sched_data) {
1525		/*
1526		 * WARNING. We are about to set the in-service entity
1527		 * to sd->next_in_service, i.e., to the (cached) value
1528		 * returned by bfq_lookup_next_entity(sd) the last
1529		 * time it was invoked, i.e., the last time when the
1530		 * service order in sd changed as a consequence of the
1531		 * activation or deactivation of an entity. In this
1532		 * respect, if we execute bfq_lookup_next_entity(sd)
1533		 * in this very moment, it may, although with low
1534		 * probability, yield a different entity than that
1535		 * pointed to by sd->next_in_service. This rare event
1536		 * happens in case there was no CLASS_IDLE entity to
1537		 * serve for sd when bfq_lookup_next_entity(sd) was
1538		 * invoked for the last time, while there is now one
1539		 * such entity.
1540		 *
1541		 * If the above event happens, then the scheduling of
1542		 * such entity in CLASS_IDLE is postponed until the
1543		 * service of the sd->next_in_service entity
1544		 * finishes. In fact, when the latter is expired,
1545		 * bfq_lookup_next_entity(sd) gets called again,
1546		 * exactly to update sd->next_in_service.
1547		 */
1548
1549		/* Make next_in_service entity become in_service_entity */
1550		entity = sd->next_in_service;
1551		sd->in_service_entity = entity;
1552
1553		/*
1554		 * If entity is no longer a candidate for next
1555		 * service, then it must be extracted from its active
1556		 * tree, so as to make sure that it won't be
1557		 * considered when computing next_in_service. See the
1558		 * comments on the function
1559		 * bfq_no_longer_next_in_service() for details.
1560		 */
1561		if (bfq_no_longer_next_in_service(entity))
1562			bfq_active_extract(bfq_entity_service_tree(entity),
1563					   entity);
1564
1565		/*
1566		 * Even if entity is not to be extracted according to
1567		 * the above check, a descendant entity may get
1568		 * extracted in one of the next iterations of this
1569		 * loop. Such an event could cause a change in
1570		 * next_in_service for the level of the descendant
1571		 * entity, and thus possibly back to this level.
1572		 *
1573		 * However, we cannot perform the resulting needed
1574		 * update of next_in_service for this level before the
1575		 * end of the whole loop, because, to know which is
1576		 * the correct next-to-serve candidate entity for each
1577		 * level, we need first to find the leaf entity to set
1578		 * in service. In fact, only after we know which is
1579		 * the next-to-serve leaf entity, we can discover
1580		 * whether the parent entity of the leaf entity
1581		 * becomes the next-to-serve, and so on.
1582		 */
1583	}
1584
1585	bfqq = bfq_entity_to_bfqq(entity);
1586
1587	/*
1588	 * We can finally update all next-to-serve entities along the
1589	 * path from the leaf entity just set in service to the root.
1590	 */
1591	for_each_entity(entity) {
1592		struct bfq_sched_data *sd = entity->sched_data;
1593
1594		if (!bfq_update_next_in_service(sd, NULL, false))
1595			break;
1596	}
1597
1598	return bfqq;
1599}
1600
1601/* returns true if the in-service queue gets freed */
1602bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
1603{
1604	struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
1605	struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
1606	struct bfq_entity *entity = in_serv_entity;
1607
1608	bfq_clear_bfqq_wait_request(in_serv_bfqq);
1609	hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
1610	bfqd->in_service_queue = NULL;
1611
1612	/*
1613	 * When this function is called, all in-service entities have
1614	 * been properly deactivated or requeued, so we can safely
1615	 * execute the final step: reset in_service_entity along the
1616	 * path from entity to the root.
1617	 */
1618	for_each_entity(entity)
1619		entity->sched_data->in_service_entity = NULL;
1620
1621	/*
1622	 * in_serv_entity is no longer in service, so, if it is in no
1623	 * service tree either, then release the service reference to
1624	 * the queue it represents (taken with bfq_get_entity).
1625	 */
1626	if (!in_serv_entity->on_st_or_in_serv) {
1627		/*
1628		 * If no process is referencing in_serv_bfqq any
1629		 * longer, then the service reference may be the only
1630		 * reference to the queue. If this is the case, then
1631		 * bfqq gets freed here.
1632		 */
1633		int ref = in_serv_bfqq->ref;
1634		bfq_put_queue(in_serv_bfqq);
1635		if (ref == 1)
1636			return true;
1637	}
1638
1639	return false;
1640}
1641
1642void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1643			 bool ins_into_idle_tree, bool expiration)
1644{
1645	struct bfq_entity *entity = &bfqq->entity;
1646
1647	bfq_deactivate_entity(entity, ins_into_idle_tree, expiration);
1648}
1649
1650void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1651{
1652	struct bfq_entity *entity = &bfqq->entity;
1653
1654	bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq),
1655				    false, false);
1656	bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
1657}
1658
1659void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1660		      bool expiration)
1661{
1662	struct bfq_entity *entity = &bfqq->entity;
1663
1664	bfq_activate_requeue_entity(entity, false,
1665				    bfqq == bfqd->in_service_queue, expiration);
1666}
1667
1668/*
1669 * Called when the bfqq no longer has requests pending, remove it from
1670 * the service tree. As a special case, it can be invoked during an
1671 * expiration.
1672 */
1673void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1674		       bool expiration)
1675{
1676	bfq_log_bfqq(bfqd, bfqq, "del from busy");
1677
1678	bfq_clear_bfqq_busy(bfqq);
1679
1680	bfqd->busy_queues[bfqq->ioprio_class - 1]--;
1681
1682	if (bfqq->wr_coeff > 1)
1683		bfqd->wr_busy_queues--;
1684
1685	bfqg_stats_update_dequeue(bfqq_group(bfqq));
1686
1687	bfq_deactivate_bfqq(bfqd, bfqq, true, expiration);
1688
1689	if (!bfqq->dispatched)
1690		bfq_weights_tree_remove(bfqd, bfqq);
1691}
1692
1693/*
1694 * Called when an inactive queue receives a new request.
1695 */
1696void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1697{
1698	bfq_log_bfqq(bfqd, bfqq, "add to busy");
1699
1700	bfq_activate_bfqq(bfqd, bfqq);
1701
1702	bfq_mark_bfqq_busy(bfqq);
1703	bfqd->busy_queues[bfqq->ioprio_class - 1]++;
1704
1705	if (!bfqq->dispatched)
1706		if (bfqq->wr_coeff == 1)
1707			bfq_weights_tree_add(bfqd, bfqq,
1708					     &bfqd->queue_weights_tree);
1709
1710	if (bfqq->wr_coeff > 1)
1711		bfqd->wr_busy_queues++;
1712}
1713