xref: /kernel/linux/linux-5.10/block/bfq-cgroup.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * cgroups support for the BFQ I/O scheduler.
4 */
5#include <linux/module.h>
6#include <linux/slab.h>
7#include <linux/blkdev.h>
8#include <linux/cgroup.h>
9#include <linux/elevator.h>
10#include <linux/ktime.h>
11#include <linux/rbtree.h>
12#include <linux/ioprio.h>
13#include <linux/sbitmap.h>
14#include <linux/delay.h>
15
16#include "bfq-iosched.h"
17
18#ifdef CONFIG_BFQ_CGROUP_DEBUG
19static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
20{
21	int ret;
22
23	ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
24	if (ret)
25		return ret;
26
27	atomic64_set(&stat->aux_cnt, 0);
28	return 0;
29}
30
31static void bfq_stat_exit(struct bfq_stat *stat)
32{
33	percpu_counter_destroy(&stat->cpu_cnt);
34}
35
36/**
37 * bfq_stat_add - add a value to a bfq_stat
38 * @stat: target bfq_stat
39 * @val: value to add
40 *
41 * Add @val to @stat.  The caller must ensure that IRQ on the same CPU
42 * don't re-enter this function for the same counter.
43 */
44static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
45{
46	percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
47}
48
49/**
50 * bfq_stat_read - read the current value of a bfq_stat
51 * @stat: bfq_stat to read
52 */
53static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
54{
55	return percpu_counter_sum_positive(&stat->cpu_cnt);
56}
57
58/**
59 * bfq_stat_reset - reset a bfq_stat
60 * @stat: bfq_stat to reset
61 */
62static inline void bfq_stat_reset(struct bfq_stat *stat)
63{
64	percpu_counter_set(&stat->cpu_cnt, 0);
65	atomic64_set(&stat->aux_cnt, 0);
66}
67
68/**
69 * bfq_stat_add_aux - add a bfq_stat into another's aux count
70 * @to: the destination bfq_stat
71 * @from: the source
72 *
73 * Add @from's count including the aux one to @to's aux count.
74 */
75static inline void bfq_stat_add_aux(struct bfq_stat *to,
76				     struct bfq_stat *from)
77{
78	atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt),
79		     &to->aux_cnt);
80}
81
82/**
83 * blkg_prfill_stat - prfill callback for bfq_stat
84 * @sf: seq_file to print to
85 * @pd: policy private data of interest
86 * @off: offset to the bfq_stat in @pd
87 *
88 * prfill callback for printing a bfq_stat.
89 */
90static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
91		int off)
92{
93	return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off));
94}
95
96/* bfqg stats flags */
97enum bfqg_stats_flags {
98	BFQG_stats_waiting = 0,
99	BFQG_stats_idling,
100	BFQG_stats_empty,
101};
102
103#define BFQG_FLAG_FNS(name)						\
104static void bfqg_stats_mark_##name(struct bfqg_stats *stats)	\
105{									\
106	stats->flags |= (1 << BFQG_stats_##name);			\
107}									\
108static void bfqg_stats_clear_##name(struct bfqg_stats *stats)	\
109{									\
110	stats->flags &= ~(1 << BFQG_stats_##name);			\
111}									\
112static int bfqg_stats_##name(struct bfqg_stats *stats)		\
113{									\
114	return (stats->flags & (1 << BFQG_stats_##name)) != 0;		\
115}									\
116
117BFQG_FLAG_FNS(waiting)
118BFQG_FLAG_FNS(idling)
119BFQG_FLAG_FNS(empty)
120#undef BFQG_FLAG_FNS
121
122/* This should be called with the scheduler lock held. */
123static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
124{
125	u64 now;
126
127	if (!bfqg_stats_waiting(stats))
128		return;
129
130	now = ktime_get_ns();
131	if (now > stats->start_group_wait_time)
132		bfq_stat_add(&stats->group_wait_time,
133			      now - stats->start_group_wait_time);
134	bfqg_stats_clear_waiting(stats);
135}
136
137/* This should be called with the scheduler lock held. */
138static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
139						 struct bfq_group *curr_bfqg)
140{
141	struct bfqg_stats *stats = &bfqg->stats;
142
143	if (bfqg_stats_waiting(stats))
144		return;
145	if (bfqg == curr_bfqg)
146		return;
147	stats->start_group_wait_time = ktime_get_ns();
148	bfqg_stats_mark_waiting(stats);
149}
150
151/* This should be called with the scheduler lock held. */
152static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
153{
154	u64 now;
155
156	if (!bfqg_stats_empty(stats))
157		return;
158
159	now = ktime_get_ns();
160	if (now > stats->start_empty_time)
161		bfq_stat_add(&stats->empty_time,
162			      now - stats->start_empty_time);
163	bfqg_stats_clear_empty(stats);
164}
165
166void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
167{
168	bfq_stat_add(&bfqg->stats.dequeue, 1);
169}
170
171void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
172{
173	struct bfqg_stats *stats = &bfqg->stats;
174
175	if (blkg_rwstat_total(&stats->queued))
176		return;
177
178	/*
179	 * group is already marked empty. This can happen if bfqq got new
180	 * request in parent group and moved to this group while being added
181	 * to service tree. Just ignore the event and move on.
182	 */
183	if (bfqg_stats_empty(stats))
184		return;
185
186	stats->start_empty_time = ktime_get_ns();
187	bfqg_stats_mark_empty(stats);
188}
189
190void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
191{
192	struct bfqg_stats *stats = &bfqg->stats;
193
194	if (bfqg_stats_idling(stats)) {
195		u64 now = ktime_get_ns();
196
197		if (now > stats->start_idle_time)
198			bfq_stat_add(&stats->idle_time,
199				      now - stats->start_idle_time);
200		bfqg_stats_clear_idling(stats);
201	}
202}
203
204void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
205{
206	struct bfqg_stats *stats = &bfqg->stats;
207
208	stats->start_idle_time = ktime_get_ns();
209	bfqg_stats_mark_idling(stats);
210}
211
212void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
213{
214	struct bfqg_stats *stats = &bfqg->stats;
215
216	bfq_stat_add(&stats->avg_queue_size_sum,
217		      blkg_rwstat_total(&stats->queued));
218	bfq_stat_add(&stats->avg_queue_size_samples, 1);
219	bfqg_stats_update_group_wait_time(stats);
220}
221
222void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
223			      unsigned int op)
224{
225	blkg_rwstat_add(&bfqg->stats.queued, op, 1);
226	bfqg_stats_end_empty_time(&bfqg->stats);
227	if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
228		bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
229}
230
231void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
232{
233	blkg_rwstat_add(&bfqg->stats.queued, op, -1);
234}
235
236void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
237{
238	blkg_rwstat_add(&bfqg->stats.merged, op, 1);
239}
240
241void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
242				  u64 io_start_time_ns, unsigned int op)
243{
244	struct bfqg_stats *stats = &bfqg->stats;
245	u64 now = ktime_get_ns();
246
247	if (now > io_start_time_ns)
248		blkg_rwstat_add(&stats->service_time, op,
249				now - io_start_time_ns);
250	if (io_start_time_ns > start_time_ns)
251		blkg_rwstat_add(&stats->wait_time, op,
252				io_start_time_ns - start_time_ns);
253}
254
255#else /* CONFIG_BFQ_CGROUP_DEBUG */
256
257void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
258			      unsigned int op) { }
259void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
260void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
261void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
262				  u64 io_start_time_ns, unsigned int op) { }
263void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
264void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
265void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
266void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
267void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
268
269#endif /* CONFIG_BFQ_CGROUP_DEBUG */
270
271#ifdef CONFIG_BFQ_GROUP_IOSCHED
272
273/*
274 * blk-cgroup policy-related handlers
275 * The following functions help in converting between blk-cgroup
276 * internal structures and BFQ-specific structures.
277 */
278
279static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
280{
281	return pd ? container_of(pd, struct bfq_group, pd) : NULL;
282}
283
284struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
285{
286	return pd_to_blkg(&bfqg->pd);
287}
288
289static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
290{
291	return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
292}
293
294/*
295 * bfq_group handlers
296 * The following functions help in navigating the bfq_group hierarchy
297 * by allowing to find the parent of a bfq_group or the bfq_group
298 * associated to a bfq_queue.
299 */
300
301static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
302{
303	struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
304
305	return pblkg ? blkg_to_bfqg(pblkg) : NULL;
306}
307
308struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
309{
310	struct bfq_entity *group_entity = bfqq->entity.parent;
311
312	return group_entity ? container_of(group_entity, struct bfq_group,
313					   entity) :
314			      bfqq->bfqd->root_group;
315}
316
317/*
318 * The following two functions handle get and put of a bfq_group by
319 * wrapping the related blk-cgroup hooks.
320 */
321
322static void bfqg_get(struct bfq_group *bfqg)
323{
324	bfqg->ref++;
325}
326
327static void bfqg_put(struct bfq_group *bfqg)
328{
329	bfqg->ref--;
330
331	if (bfqg->ref == 0)
332		kfree(bfqg);
333}
334
335static void bfqg_and_blkg_get(struct bfq_group *bfqg)
336{
337	/* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
338	bfqg_get(bfqg);
339
340	blkg_get(bfqg_to_blkg(bfqg));
341}
342
343void bfqg_and_blkg_put(struct bfq_group *bfqg)
344{
345	blkg_put(bfqg_to_blkg(bfqg));
346
347	bfqg_put(bfqg);
348}
349
350void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
351{
352	struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg);
353
354	if (!bfqg)
355		return;
356
357	blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq));
358	blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1);
359}
360
361/* @stats = 0 */
362static void bfqg_stats_reset(struct bfqg_stats *stats)
363{
364#ifdef CONFIG_BFQ_CGROUP_DEBUG
365	/* queued stats shouldn't be cleared */
366	blkg_rwstat_reset(&stats->merged);
367	blkg_rwstat_reset(&stats->service_time);
368	blkg_rwstat_reset(&stats->wait_time);
369	bfq_stat_reset(&stats->time);
370	bfq_stat_reset(&stats->avg_queue_size_sum);
371	bfq_stat_reset(&stats->avg_queue_size_samples);
372	bfq_stat_reset(&stats->dequeue);
373	bfq_stat_reset(&stats->group_wait_time);
374	bfq_stat_reset(&stats->idle_time);
375	bfq_stat_reset(&stats->empty_time);
376#endif
377}
378
379/* @to += @from */
380static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
381{
382	if (!to || !from)
383		return;
384
385#ifdef CONFIG_BFQ_CGROUP_DEBUG
386	/* queued stats shouldn't be cleared */
387	blkg_rwstat_add_aux(&to->merged, &from->merged);
388	blkg_rwstat_add_aux(&to->service_time, &from->service_time);
389	blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
390	bfq_stat_add_aux(&from->time, &from->time);
391	bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
392	bfq_stat_add_aux(&to->avg_queue_size_samples,
393			  &from->avg_queue_size_samples);
394	bfq_stat_add_aux(&to->dequeue, &from->dequeue);
395	bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
396	bfq_stat_add_aux(&to->idle_time, &from->idle_time);
397	bfq_stat_add_aux(&to->empty_time, &from->empty_time);
398#endif
399}
400
401/*
402 * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
403 * recursive stats can still account for the amount used by this bfqg after
404 * it's gone.
405 */
406static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
407{
408	struct bfq_group *parent;
409
410	if (!bfqg) /* root_group */
411		return;
412
413	parent = bfqg_parent(bfqg);
414
415	lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
416
417	if (unlikely(!parent))
418		return;
419
420	bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
421	bfqg_stats_reset(&bfqg->stats);
422}
423
424void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
425{
426	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
427
428	entity->weight = entity->new_weight;
429	entity->orig_weight = entity->new_weight;
430	if (bfqq) {
431		bfqq->ioprio = bfqq->new_ioprio;
432		bfqq->ioprio_class = bfqq->new_ioprio_class;
433		/*
434		 * Make sure that bfqg and its associated blkg do not
435		 * disappear before entity.
436		 */
437		bfqg_and_blkg_get(bfqg);
438	}
439	entity->parent = bfqg->my_entity; /* NULL for root group */
440	entity->sched_data = &bfqg->sched_data;
441}
442
443static void bfqg_stats_exit(struct bfqg_stats *stats)
444{
445	blkg_rwstat_exit(&stats->bytes);
446	blkg_rwstat_exit(&stats->ios);
447#ifdef CONFIG_BFQ_CGROUP_DEBUG
448	blkg_rwstat_exit(&stats->merged);
449	blkg_rwstat_exit(&stats->service_time);
450	blkg_rwstat_exit(&stats->wait_time);
451	blkg_rwstat_exit(&stats->queued);
452	bfq_stat_exit(&stats->time);
453	bfq_stat_exit(&stats->avg_queue_size_sum);
454	bfq_stat_exit(&stats->avg_queue_size_samples);
455	bfq_stat_exit(&stats->dequeue);
456	bfq_stat_exit(&stats->group_wait_time);
457	bfq_stat_exit(&stats->idle_time);
458	bfq_stat_exit(&stats->empty_time);
459#endif
460}
461
462static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
463{
464	if (blkg_rwstat_init(&stats->bytes, gfp) ||
465	    blkg_rwstat_init(&stats->ios, gfp))
466		goto error;
467
468#ifdef CONFIG_BFQ_CGROUP_DEBUG
469	if (blkg_rwstat_init(&stats->merged, gfp) ||
470	    blkg_rwstat_init(&stats->service_time, gfp) ||
471	    blkg_rwstat_init(&stats->wait_time, gfp) ||
472	    blkg_rwstat_init(&stats->queued, gfp) ||
473	    bfq_stat_init(&stats->time, gfp) ||
474	    bfq_stat_init(&stats->avg_queue_size_sum, gfp) ||
475	    bfq_stat_init(&stats->avg_queue_size_samples, gfp) ||
476	    bfq_stat_init(&stats->dequeue, gfp) ||
477	    bfq_stat_init(&stats->group_wait_time, gfp) ||
478	    bfq_stat_init(&stats->idle_time, gfp) ||
479	    bfq_stat_init(&stats->empty_time, gfp))
480		goto error;
481#endif
482
483	return 0;
484
485error:
486	bfqg_stats_exit(stats);
487	return -ENOMEM;
488}
489
490static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
491{
492	return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
493}
494
495static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
496{
497	return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
498}
499
500static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
501{
502	struct bfq_group_data *bgd;
503
504	bgd = kzalloc(sizeof(*bgd), gfp);
505	if (!bgd)
506		return NULL;
507	return &bgd->pd;
508}
509
510static void bfq_cpd_init(struct blkcg_policy_data *cpd)
511{
512	struct bfq_group_data *d = cpd_to_bfqgd(cpd);
513
514	d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
515		CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
516}
517
518static void bfq_cpd_free(struct blkcg_policy_data *cpd)
519{
520	kfree(cpd_to_bfqgd(cpd));
521}
522
523static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
524					     struct blkcg *blkcg)
525{
526	struct bfq_group *bfqg;
527
528	bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node);
529	if (!bfqg)
530		return NULL;
531
532	if (bfqg_stats_init(&bfqg->stats, gfp)) {
533		kfree(bfqg);
534		return NULL;
535	}
536
537	/* see comments in bfq_bic_update_cgroup for why refcounting */
538	bfqg_get(bfqg);
539	return &bfqg->pd;
540}
541
542static void bfq_pd_init(struct blkg_policy_data *pd)
543{
544	struct blkcg_gq *blkg = pd_to_blkg(pd);
545	struct bfq_group *bfqg = blkg_to_bfqg(blkg);
546	struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
547	struct bfq_entity *entity = &bfqg->entity;
548	struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
549
550	entity->orig_weight = entity->weight = entity->new_weight = d->weight;
551	entity->my_sched_data = &bfqg->sched_data;
552	bfqg->my_entity = entity; /*
553				   * the root_group's will be set to NULL
554				   * in bfq_init_queue()
555				   */
556	bfqg->bfqd = bfqd;
557	bfqg->active_entities = 0;
558	bfqg->online = true;
559	bfqg->rq_pos_tree = RB_ROOT;
560}
561
562static void bfq_pd_free(struct blkg_policy_data *pd)
563{
564	struct bfq_group *bfqg = pd_to_bfqg(pd);
565
566	bfqg_stats_exit(&bfqg->stats);
567	bfqg_put(bfqg);
568}
569
570static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
571{
572	struct bfq_group *bfqg = pd_to_bfqg(pd);
573
574	bfqg_stats_reset(&bfqg->stats);
575}
576
577static void bfq_group_set_parent(struct bfq_group *bfqg,
578					struct bfq_group *parent)
579{
580	struct bfq_entity *entity;
581
582	entity = &bfqg->entity;
583	entity->parent = parent->my_entity;
584	entity->sched_data = &parent->sched_data;
585}
586
587static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg)
588{
589	struct bfq_group *parent;
590	struct bfq_entity *entity;
591
592	/*
593	 * Update chain of bfq_groups as we might be handling a leaf group
594	 * which, along with some of its relatives, has not been hooked yet
595	 * to the private hierarchy of BFQ.
596	 */
597	entity = &bfqg->entity;
598	for_each_entity(entity) {
599		struct bfq_group *curr_bfqg = container_of(entity,
600						struct bfq_group, entity);
601		if (curr_bfqg != bfqd->root_group) {
602			parent = bfqg_parent(curr_bfqg);
603			if (!parent)
604				parent = bfqd->root_group;
605			bfq_group_set_parent(curr_bfqg, parent);
606		}
607	}
608}
609
610struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
611{
612	struct blkcg_gq *blkg = bio->bi_blkg;
613	struct bfq_group *bfqg;
614
615	while (blkg) {
616		if (!blkg->online) {
617			blkg = blkg->parent;
618			continue;
619		}
620		bfqg = blkg_to_bfqg(blkg);
621		if (bfqg->online) {
622			bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
623			return bfqg;
624		}
625		blkg = blkg->parent;
626	}
627	bio_associate_blkg_from_css(bio,
628				&bfqg_to_blkg(bfqd->root_group)->blkcg->css);
629	return bfqd->root_group;
630}
631
632/**
633 * bfq_bfqq_move - migrate @bfqq to @bfqg.
634 * @bfqd: queue descriptor.
635 * @bfqq: the queue to move.
636 * @bfqg: the group to move to.
637 *
638 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
639 * it on the new one.  Avoid putting the entity on the old group idle tree.
640 *
641 * Must be called under the scheduler lock, to make sure that the blkg
642 * owning @bfqg does not disappear (see comments in
643 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
644 * objects).
645 */
646void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
647		   struct bfq_group *bfqg)
648{
649	struct bfq_entity *entity = &bfqq->entity;
650
651	/*
652	 * oom_bfqq is not allowed to move, oom_bfqq will hold ref to root_group
653	 * until elevator exit.
654	 */
655	if (bfqq == &bfqd->oom_bfqq)
656		return;
657	/*
658	 * Get extra reference to prevent bfqq from being freed in
659	 * next possible expire or deactivate.
660	 */
661	bfqq->ref++;
662
663	/* If bfqq is empty, then bfq_bfqq_expire also invokes
664	 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
665	 * from data structures related to current group. Otherwise we
666	 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
667	 * we do below.
668	 */
669	if (bfqq == bfqd->in_service_queue)
670		bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
671				false, BFQQE_PREEMPTED);
672
673	if (bfq_bfqq_busy(bfqq))
674		bfq_deactivate_bfqq(bfqd, bfqq, false, false);
675	else if (entity->on_st_or_in_serv)
676		bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
677	bfqg_and_blkg_put(bfqq_group(bfqq));
678
679	entity->parent = bfqg->my_entity;
680	entity->sched_data = &bfqg->sched_data;
681	/* pin down bfqg and its associated blkg  */
682	bfqg_and_blkg_get(bfqg);
683
684	if (bfq_bfqq_busy(bfqq)) {
685		if (unlikely(!bfqd->nonrot_with_queueing))
686			bfq_pos_tree_add_move(bfqd, bfqq);
687		bfq_activate_bfqq(bfqd, bfqq);
688	}
689
690	if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
691		bfq_schedule_dispatch(bfqd);
692	/* release extra ref taken above, bfqq may happen to be freed now */
693	bfq_put_queue(bfqq);
694}
695
696/**
697 * __bfq_bic_change_cgroup - move @bic to @cgroup.
698 * @bfqd: the queue descriptor.
699 * @bic: the bic to move.
700 * @blkcg: the blk-cgroup to move to.
701 *
702 * Move bic to blkcg, assuming that bfqd->lock is held; which makes
703 * sure that the reference to cgroup is valid across the call (see
704 * comments in bfq_bic_update_cgroup on this issue)
705 */
706static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
707				     struct bfq_io_cq *bic,
708				     struct bfq_group *bfqg)
709{
710	struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false);
711	struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true);
712	struct bfq_entity *entity;
713
714	if (async_bfqq) {
715		entity = &async_bfqq->entity;
716
717		if (entity->sched_data != &bfqg->sched_data) {
718			bic_set_bfqq(bic, NULL, false);
719			bfq_release_process_ref(bfqd, async_bfqq);
720		}
721	}
722
723	if (sync_bfqq) {
724		if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) {
725			/* We are the only user of this bfqq, just move it */
726			if (sync_bfqq->entity.sched_data != &bfqg->sched_data)
727				bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
728		} else {
729			struct bfq_queue *bfqq;
730
731			/*
732			 * The queue was merged to a different queue. Check
733			 * that the merge chain still belongs to the same
734			 * cgroup.
735			 */
736			for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq)
737				if (bfqq->entity.sched_data !=
738				    &bfqg->sched_data)
739					break;
740			if (bfqq) {
741				/*
742				 * Some queue changed cgroup so the merge is
743				 * not valid anymore. We cannot easily just
744				 * cancel the merge (by clearing new_bfqq) as
745				 * there may be other processes using this
746				 * queue and holding refs to all queues below
747				 * sync_bfqq->new_bfqq. Similarly if the merge
748				 * already happened, we need to detach from
749				 * bfqq now so that we cannot merge bio to a
750				 * request from the old cgroup.
751				 */
752				bfq_put_cooperator(sync_bfqq);
753				bic_set_bfqq(bic, NULL, true);
754				bfq_release_process_ref(bfqd, sync_bfqq);
755			}
756		}
757	}
758
759	return bfqg;
760}
761
762void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
763{
764	struct bfq_data *bfqd = bic_to_bfqd(bic);
765	struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio);
766	uint64_t serial_nr;
767
768	serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr;
769
770	/*
771	 * Check whether blkcg has changed.  The condition may trigger
772	 * spuriously on a newly created cic but there's no harm.
773	 */
774	if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
775		return;
776
777	/*
778	 * New cgroup for this process. Make sure it is linked to bfq internal
779	 * cgroup hierarchy.
780	 */
781	bfq_link_bfqg(bfqd, bfqg);
782	__bfq_bic_change_cgroup(bfqd, bic, bfqg);
783	/*
784	 * Update blkg_path for bfq_log_* functions. We cache this
785	 * path, and update it here, for the following
786	 * reasons. Operations on blkg objects in blk-cgroup are
787	 * protected with the request_queue lock, and not with the
788	 * lock that protects the instances of this scheduler
789	 * (bfqd->lock). This exposes BFQ to the following sort of
790	 * race.
791	 *
792	 * The blkg_lookup performed in bfq_get_queue, protected
793	 * through rcu, may happen to return the address of a copy of
794	 * the original blkg. If this is the case, then the
795	 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
796	 * the blkg, is useless: it does not prevent blk-cgroup code
797	 * from destroying both the original blkg and all objects
798	 * directly or indirectly referred by the copy of the
799	 * blkg.
800	 *
801	 * On the bright side, destroy operations on a blkg invoke, as
802	 * a first step, hooks of the scheduler associated with the
803	 * blkg. And these hooks are executed with bfqd->lock held for
804	 * BFQ. As a consequence, for any blkg associated with the
805	 * request queue this instance of the scheduler is attached
806	 * to, we are guaranteed that such a blkg is not destroyed, and
807	 * that all the pointers it contains are consistent, while we
808	 * are holding bfqd->lock. A blkg_lookup performed with
809	 * bfqd->lock held then returns a fully consistent blkg, which
810	 * remains consistent until this lock is held.
811	 *
812	 * Thanks to the last fact, and to the fact that: (1) bfqg has
813	 * been obtained through a blkg_lookup in the above
814	 * assignment, and (2) bfqd->lock is being held, here we can
815	 * safely use the policy data for the involved blkg (i.e., the
816	 * field bfqg->pd) to get to the blkg associated with bfqg,
817	 * and then we can safely use any field of blkg. After we
818	 * release bfqd->lock, even just getting blkg through this
819	 * bfqg may cause dangling references to be traversed, as
820	 * bfqg->pd may not exist any more.
821	 *
822	 * In view of the above facts, here we cache, in the bfqg, any
823	 * blkg data we may need for this bic, and for its associated
824	 * bfq_queue. As of now, we need to cache only the path of the
825	 * blkg, which is used in the bfq_log_* functions.
826	 *
827	 * Finally, note that bfqg itself needs to be protected from
828	 * destruction on the blkg_free of the original blkg (which
829	 * invokes bfq_pd_free). We use an additional private
830	 * refcounter for bfqg, to let it disappear only after no
831	 * bfq_queue refers to it any longer.
832	 */
833	blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
834	bic->blkcg_serial_nr = serial_nr;
835}
836
837/**
838 * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
839 * @st: the service tree being flushed.
840 */
841static void bfq_flush_idle_tree(struct bfq_service_tree *st)
842{
843	struct bfq_entity *entity = st->first_idle;
844
845	for (; entity ; entity = st->first_idle)
846		__bfq_deactivate_entity(entity, false);
847}
848
849/**
850 * bfq_reparent_leaf_entity - move leaf entity to the root_group.
851 * @bfqd: the device data structure with the root group.
852 * @entity: the entity to move, if entity is a leaf; or the parent entity
853 *	    of an active leaf entity to move, if entity is not a leaf.
854 */
855static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
856				     struct bfq_entity *entity,
857				     int ioprio_class)
858{
859	struct bfq_queue *bfqq;
860	struct bfq_entity *child_entity = entity;
861
862	while (child_entity->my_sched_data) { /* leaf not reached yet */
863		struct bfq_sched_data *child_sd = child_entity->my_sched_data;
864		struct bfq_service_tree *child_st = child_sd->service_tree +
865			ioprio_class;
866		struct rb_root *child_active = &child_st->active;
867
868		child_entity = bfq_entity_of(rb_first(child_active));
869
870		if (!child_entity)
871			child_entity = child_sd->in_service_entity;
872	}
873
874	bfqq = bfq_entity_to_bfqq(child_entity);
875	bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
876}
877
878/**
879 * bfq_reparent_active_queues - move to the root group all active queues.
880 * @bfqd: the device data structure with the root group.
881 * @bfqg: the group to move from.
882 * @st: the service tree to start the search from.
883 */
884static void bfq_reparent_active_queues(struct bfq_data *bfqd,
885				       struct bfq_group *bfqg,
886				       struct bfq_service_tree *st,
887				       int ioprio_class)
888{
889	struct rb_root *active = &st->active;
890	struct bfq_entity *entity;
891
892	while ((entity = bfq_entity_of(rb_first(active))))
893		bfq_reparent_leaf_entity(bfqd, entity, ioprio_class);
894
895	if (bfqg->sched_data.in_service_entity)
896		bfq_reparent_leaf_entity(bfqd,
897					 bfqg->sched_data.in_service_entity,
898					 ioprio_class);
899}
900
901/**
902 * bfq_pd_offline - deactivate the entity associated with @pd,
903 *		    and reparent its children entities.
904 * @pd: descriptor of the policy going offline.
905 *
906 * blkio already grabs the queue_lock for us, so no need to use
907 * RCU-based magic
908 */
909static void bfq_pd_offline(struct blkg_policy_data *pd)
910{
911	struct bfq_service_tree *st;
912	struct bfq_group *bfqg = pd_to_bfqg(pd);
913	struct bfq_data *bfqd = bfqg->bfqd;
914	struct bfq_entity *entity = bfqg->my_entity;
915	unsigned long flags;
916	int i;
917
918	spin_lock_irqsave(&bfqd->lock, flags);
919
920	if (!entity) /* root group */
921		goto put_async_queues;
922
923	/*
924	 * Empty all service_trees belonging to this group before
925	 * deactivating the group itself.
926	 */
927	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
928		st = bfqg->sched_data.service_tree + i;
929
930		/*
931		 * It may happen that some queues are still active
932		 * (busy) upon group destruction (if the corresponding
933		 * processes have been forced to terminate). We move
934		 * all the leaf entities corresponding to these queues
935		 * to the root_group.
936		 * Also, it may happen that the group has an entity
937		 * in service, which is disconnected from the active
938		 * tree: it must be moved, too.
939		 * There is no need to put the sync queues, as the
940		 * scheduler has taken no reference.
941		 */
942		bfq_reparent_active_queues(bfqd, bfqg, st, i);
943
944		/*
945		 * The idle tree may still contain bfq_queues
946		 * belonging to exited task because they never
947		 * migrated to a different cgroup from the one being
948		 * destroyed now. In addition, even
949		 * bfq_reparent_active_queues() may happen to add some
950		 * entities to the idle tree. It happens if, in some
951		 * of the calls to bfq_bfqq_move() performed by
952		 * bfq_reparent_active_queues(), the queue to move is
953		 * empty and gets expired.
954		 */
955		bfq_flush_idle_tree(st);
956	}
957
958	__bfq_deactivate_entity(entity, false);
959
960put_async_queues:
961	bfq_put_async_queues(bfqd, bfqg);
962	bfqg->online = false;
963
964	spin_unlock_irqrestore(&bfqd->lock, flags);
965	/*
966	 * @blkg is going offline and will be ignored by
967	 * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
968	 * that they don't get lost.  If IOs complete after this point, the
969	 * stats for them will be lost.  Oh well...
970	 */
971	bfqg_stats_xfer_dead(bfqg);
972}
973
974void bfq_end_wr_async(struct bfq_data *bfqd)
975{
976	struct blkcg_gq *blkg;
977
978	list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
979		struct bfq_group *bfqg = blkg_to_bfqg(blkg);
980
981		bfq_end_wr_async_queues(bfqd, bfqg);
982	}
983	bfq_end_wr_async_queues(bfqd, bfqd->root_group);
984}
985
986static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
987{
988	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
989	struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
990	unsigned int val = 0;
991
992	if (bfqgd)
993		val = bfqgd->weight;
994
995	seq_printf(sf, "%u\n", val);
996
997	return 0;
998}
999
1000static u64 bfqg_prfill_weight_device(struct seq_file *sf,
1001				     struct blkg_policy_data *pd, int off)
1002{
1003	struct bfq_group *bfqg = pd_to_bfqg(pd);
1004
1005	if (!bfqg->entity.dev_weight)
1006		return 0;
1007	return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight);
1008}
1009
1010static int bfq_io_show_weight(struct seq_file *sf, void *v)
1011{
1012	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1013	struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1014
1015	seq_printf(sf, "default %u\n", bfqgd->weight);
1016	blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device,
1017			  &blkcg_policy_bfq, 0, false);
1018	return 0;
1019}
1020
1021static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
1022{
1023	weight = dev_weight ?: weight;
1024
1025	bfqg->entity.dev_weight = dev_weight;
1026	/*
1027	 * Setting the prio_changed flag of the entity
1028	 * to 1 with new_weight == weight would re-set
1029	 * the value of the weight to its ioprio mapping.
1030	 * Set the flag only if necessary.
1031	 */
1032	if ((unsigned short)weight != bfqg->entity.new_weight) {
1033		bfqg->entity.new_weight = (unsigned short)weight;
1034		/*
1035		 * Make sure that the above new value has been
1036		 * stored in bfqg->entity.new_weight before
1037		 * setting the prio_changed flag. In fact,
1038		 * this flag may be read asynchronously (in
1039		 * critical sections protected by a different
1040		 * lock than that held here), and finding this
1041		 * flag set may cause the execution of the code
1042		 * for updating parameters whose value may
1043		 * depend also on bfqg->entity.new_weight (in
1044		 * __bfq_entity_update_weight_prio).
1045		 * This barrier makes sure that the new value
1046		 * of bfqg->entity.new_weight is correctly
1047		 * seen in that code.
1048		 */
1049		smp_wmb();
1050		bfqg->entity.prio_changed = 1;
1051	}
1052}
1053
1054static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
1055				    struct cftype *cftype,
1056				    u64 val)
1057{
1058	struct blkcg *blkcg = css_to_blkcg(css);
1059	struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1060	struct blkcg_gq *blkg;
1061	int ret = -ERANGE;
1062
1063	if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
1064		return ret;
1065
1066	ret = 0;
1067	spin_lock_irq(&blkcg->lock);
1068	bfqgd->weight = (unsigned short)val;
1069	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1070		struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1071
1072		if (bfqg)
1073			bfq_group_set_weight(bfqg, val, 0);
1074	}
1075	spin_unlock_irq(&blkcg->lock);
1076
1077	return ret;
1078}
1079
1080static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
1081					char *buf, size_t nbytes,
1082					loff_t off)
1083{
1084	int ret;
1085	struct blkg_conf_ctx ctx;
1086	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1087	struct bfq_group *bfqg;
1088	u64 v;
1089
1090	ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx);
1091	if (ret)
1092		return ret;
1093
1094	if (sscanf(ctx.body, "%llu", &v) == 1) {
1095		/* require "default" on dfl */
1096		ret = -ERANGE;
1097		if (!v)
1098			goto out;
1099	} else if (!strcmp(strim(ctx.body), "default")) {
1100		v = 0;
1101	} else {
1102		ret = -EINVAL;
1103		goto out;
1104	}
1105
1106	bfqg = blkg_to_bfqg(ctx.blkg);
1107
1108	ret = -ERANGE;
1109	if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) {
1110		bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
1111		ret = 0;
1112	}
1113out:
1114	blkg_conf_finish(&ctx);
1115	return ret ?: nbytes;
1116}
1117
1118static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
1119				 char *buf, size_t nbytes,
1120				 loff_t off)
1121{
1122	char *endp;
1123	int ret;
1124	u64 v;
1125
1126	buf = strim(buf);
1127
1128	/* "WEIGHT" or "default WEIGHT" sets the default weight */
1129	v = simple_strtoull(buf, &endp, 0);
1130	if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
1131		ret = bfq_io_set_weight_legacy(of_css(of), NULL, v);
1132		return ret ?: nbytes;
1133	}
1134
1135	return bfq_io_set_device_weight(of, buf, nbytes, off);
1136}
1137
1138static int bfqg_print_rwstat(struct seq_file *sf, void *v)
1139{
1140	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1141			  &blkcg_policy_bfq, seq_cft(sf)->private, true);
1142	return 0;
1143}
1144
1145static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
1146					struct blkg_policy_data *pd, int off)
1147{
1148	struct blkg_rwstat_sample sum;
1149
1150	blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
1151	return __blkg_prfill_rwstat(sf, pd, &sum);
1152}
1153
1154static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1155{
1156	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1157			  bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
1158			  seq_cft(sf)->private, true);
1159	return 0;
1160}
1161
1162#ifdef CONFIG_BFQ_CGROUP_DEBUG
1163static int bfqg_print_stat(struct seq_file *sf, void *v)
1164{
1165	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1166			  &blkcg_policy_bfq, seq_cft(sf)->private, false);
1167	return 0;
1168}
1169
1170static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
1171				      struct blkg_policy_data *pd, int off)
1172{
1173	struct blkcg_gq *blkg = pd_to_blkg(pd);
1174	struct blkcg_gq *pos_blkg;
1175	struct cgroup_subsys_state *pos_css;
1176	u64 sum = 0;
1177
1178	lockdep_assert_held(&blkg->q->queue_lock);
1179
1180	rcu_read_lock();
1181	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
1182		struct bfq_stat *stat;
1183
1184		if (!pos_blkg->online)
1185			continue;
1186
1187		stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off;
1188		sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt);
1189	}
1190	rcu_read_unlock();
1191
1192	return __blkg_prfill_u64(sf, pd, sum);
1193}
1194
1195static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
1196{
1197	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1198			  bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
1199			  seq_cft(sf)->private, false);
1200	return 0;
1201}
1202
1203static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1204			       int off)
1205{
1206	struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg);
1207	u64 sum = blkg_rwstat_total(&bfqg->stats.bytes);
1208
1209	return __blkg_prfill_u64(sf, pd, sum >> 9);
1210}
1211
1212static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
1213{
1214	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1215			  bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
1216	return 0;
1217}
1218
1219static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
1220					 struct blkg_policy_data *pd, int off)
1221{
1222	struct blkg_rwstat_sample tmp;
1223
1224	blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq,
1225			offsetof(struct bfq_group, stats.bytes), &tmp);
1226
1227	return __blkg_prfill_u64(sf, pd,
1228		(tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
1229}
1230
1231static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1232{
1233	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1234			  bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
1235			  false);
1236	return 0;
1237}
1238
1239static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
1240				      struct blkg_policy_data *pd, int off)
1241{
1242	struct bfq_group *bfqg = pd_to_bfqg(pd);
1243	u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples);
1244	u64 v = 0;
1245
1246	if (samples) {
1247		v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum);
1248		v = div64_u64(v, samples);
1249	}
1250	__blkg_prfill_u64(sf, pd, v);
1251	return 0;
1252}
1253
1254/* print avg_queue_size */
1255static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1256{
1257	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1258			  bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
1259			  0, false);
1260	return 0;
1261}
1262#endif /* CONFIG_BFQ_CGROUP_DEBUG */
1263
1264struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1265{
1266	int ret;
1267
1268	ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
1269	if (ret)
1270		return NULL;
1271
1272	return blkg_to_bfqg(bfqd->queue->root_blkg);
1273}
1274
1275struct blkcg_policy blkcg_policy_bfq = {
1276	.dfl_cftypes		= bfq_blkg_files,
1277	.legacy_cftypes		= bfq_blkcg_legacy_files,
1278
1279	.cpd_alloc_fn		= bfq_cpd_alloc,
1280	.cpd_init_fn		= bfq_cpd_init,
1281	.cpd_bind_fn	        = bfq_cpd_init,
1282	.cpd_free_fn		= bfq_cpd_free,
1283
1284	.pd_alloc_fn		= bfq_pd_alloc,
1285	.pd_init_fn		= bfq_pd_init,
1286	.pd_offline_fn		= bfq_pd_offline,
1287	.pd_free_fn		= bfq_pd_free,
1288	.pd_reset_stats_fn	= bfq_pd_reset_stats,
1289};
1290
1291struct cftype bfq_blkcg_legacy_files[] = {
1292	{
1293		.name = "bfq.weight",
1294		.flags = CFTYPE_NOT_ON_ROOT,
1295		.seq_show = bfq_io_show_weight_legacy,
1296		.write_u64 = bfq_io_set_weight_legacy,
1297	},
1298	{
1299		.name = "bfq.weight_device",
1300		.flags = CFTYPE_NOT_ON_ROOT,
1301		.seq_show = bfq_io_show_weight,
1302		.write = bfq_io_set_weight,
1303	},
1304
1305	/* statistics, covers only the tasks in the bfqg */
1306	{
1307		.name = "bfq.io_service_bytes",
1308		.private = offsetof(struct bfq_group, stats.bytes),
1309		.seq_show = bfqg_print_rwstat,
1310	},
1311	{
1312		.name = "bfq.io_serviced",
1313		.private = offsetof(struct bfq_group, stats.ios),
1314		.seq_show = bfqg_print_rwstat,
1315	},
1316#ifdef CONFIG_BFQ_CGROUP_DEBUG
1317	{
1318		.name = "bfq.time",
1319		.private = offsetof(struct bfq_group, stats.time),
1320		.seq_show = bfqg_print_stat,
1321	},
1322	{
1323		.name = "bfq.sectors",
1324		.seq_show = bfqg_print_stat_sectors,
1325	},
1326	{
1327		.name = "bfq.io_service_time",
1328		.private = offsetof(struct bfq_group, stats.service_time),
1329		.seq_show = bfqg_print_rwstat,
1330	},
1331	{
1332		.name = "bfq.io_wait_time",
1333		.private = offsetof(struct bfq_group, stats.wait_time),
1334		.seq_show = bfqg_print_rwstat,
1335	},
1336	{
1337		.name = "bfq.io_merged",
1338		.private = offsetof(struct bfq_group, stats.merged),
1339		.seq_show = bfqg_print_rwstat,
1340	},
1341	{
1342		.name = "bfq.io_queued",
1343		.private = offsetof(struct bfq_group, stats.queued),
1344		.seq_show = bfqg_print_rwstat,
1345	},
1346#endif /* CONFIG_BFQ_CGROUP_DEBUG */
1347
1348	/* the same statistics which cover the bfqg and its descendants */
1349	{
1350		.name = "bfq.io_service_bytes_recursive",
1351		.private = offsetof(struct bfq_group, stats.bytes),
1352		.seq_show = bfqg_print_rwstat_recursive,
1353	},
1354	{
1355		.name = "bfq.io_serviced_recursive",
1356		.private = offsetof(struct bfq_group, stats.ios),
1357		.seq_show = bfqg_print_rwstat_recursive,
1358	},
1359#ifdef CONFIG_BFQ_CGROUP_DEBUG
1360	{
1361		.name = "bfq.time_recursive",
1362		.private = offsetof(struct bfq_group, stats.time),
1363		.seq_show = bfqg_print_stat_recursive,
1364	},
1365	{
1366		.name = "bfq.sectors_recursive",
1367		.seq_show = bfqg_print_stat_sectors_recursive,
1368	},
1369	{
1370		.name = "bfq.io_service_time_recursive",
1371		.private = offsetof(struct bfq_group, stats.service_time),
1372		.seq_show = bfqg_print_rwstat_recursive,
1373	},
1374	{
1375		.name = "bfq.io_wait_time_recursive",
1376		.private = offsetof(struct bfq_group, stats.wait_time),
1377		.seq_show = bfqg_print_rwstat_recursive,
1378	},
1379	{
1380		.name = "bfq.io_merged_recursive",
1381		.private = offsetof(struct bfq_group, stats.merged),
1382		.seq_show = bfqg_print_rwstat_recursive,
1383	},
1384	{
1385		.name = "bfq.io_queued_recursive",
1386		.private = offsetof(struct bfq_group, stats.queued),
1387		.seq_show = bfqg_print_rwstat_recursive,
1388	},
1389	{
1390		.name = "bfq.avg_queue_size",
1391		.seq_show = bfqg_print_avg_queue_size,
1392	},
1393	{
1394		.name = "bfq.group_wait_time",
1395		.private = offsetof(struct bfq_group, stats.group_wait_time),
1396		.seq_show = bfqg_print_stat,
1397	},
1398	{
1399		.name = "bfq.idle_time",
1400		.private = offsetof(struct bfq_group, stats.idle_time),
1401		.seq_show = bfqg_print_stat,
1402	},
1403	{
1404		.name = "bfq.empty_time",
1405		.private = offsetof(struct bfq_group, stats.empty_time),
1406		.seq_show = bfqg_print_stat,
1407	},
1408	{
1409		.name = "bfq.dequeue",
1410		.private = offsetof(struct bfq_group, stats.dequeue),
1411		.seq_show = bfqg_print_stat,
1412	},
1413#endif	/* CONFIG_BFQ_CGROUP_DEBUG */
1414	{ }	/* terminate */
1415};
1416
1417struct cftype bfq_blkg_files[] = {
1418	{
1419		.name = "bfq.weight",
1420		.flags = CFTYPE_NOT_ON_ROOT,
1421		.seq_show = bfq_io_show_weight,
1422		.write = bfq_io_set_weight,
1423	},
1424	{} /* terminate */
1425};
1426
1427#else	/* CONFIG_BFQ_GROUP_IOSCHED */
1428
1429void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1430		   struct bfq_group *bfqg) {}
1431
1432void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1433{
1434	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1435
1436	entity->weight = entity->new_weight;
1437	entity->orig_weight = entity->new_weight;
1438	if (bfqq) {
1439		bfqq->ioprio = bfqq->new_ioprio;
1440		bfqq->ioprio_class = bfqq->new_ioprio_class;
1441	}
1442	entity->sched_data = &bfqg->sched_data;
1443}
1444
1445void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
1446
1447void bfq_end_wr_async(struct bfq_data *bfqd)
1448{
1449	bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1450}
1451
1452struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
1453{
1454	return bfqd->root_group;
1455}
1456
1457struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1458{
1459	return bfqq->bfqd->root_group;
1460}
1461
1462void bfqg_and_blkg_get(struct bfq_group *bfqg) {}
1463
1464void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
1465
1466struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1467{
1468	struct bfq_group *bfqg;
1469	int i;
1470
1471	bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1472	if (!bfqg)
1473		return NULL;
1474
1475	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
1476		bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1477
1478	return bfqg;
1479}
1480#endif	/* CONFIG_BFQ_GROUP_IOSCHED */
1481