Lines Matching defs:bfqg
96 /* bfqg stats flags */
138 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
141 struct bfqg_stats *stats = &bfqg->stats;
145 if (bfqg == curr_bfqg)
166 void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
168 bfq_stat_add(&bfqg->stats.dequeue, 1);
171 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
173 struct bfqg_stats *stats = &bfqg->stats;
190 void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
192 struct bfqg_stats *stats = &bfqg->stats;
204 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
206 struct bfqg_stats *stats = &bfqg->stats;
212 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
214 struct bfqg_stats *stats = &bfqg->stats;
222 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
225 blkg_rwstat_add(&bfqg->stats.queued, op, 1);
226 bfqg_stats_end_empty_time(&bfqg->stats);
227 if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
228 bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
231 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
233 blkg_rwstat_add(&bfqg->stats.queued, op, -1);
236 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
238 blkg_rwstat_add(&bfqg->stats.merged, op, 1);
241 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
244 struct bfqg_stats *stats = &bfqg->stats;
257 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
259 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
260 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
261 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
263 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
264 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
265 void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
266 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
267 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
284 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
286 return pd_to_blkg(&bfqg->pd);
301 static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
303 struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
322 static void bfqg_get(struct bfq_group *bfqg)
324 bfqg->ref++;
327 static void bfqg_put(struct bfq_group *bfqg)
329 bfqg->ref--;
331 if (bfqg->ref == 0)
332 kfree(bfqg);
335 static void bfqg_and_blkg_get(struct bfq_group *bfqg)
337 /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
338 bfqg_get(bfqg);
340 blkg_get(bfqg_to_blkg(bfqg));
343 void bfqg_and_blkg_put(struct bfq_group *bfqg)
345 blkg_put(bfqg_to_blkg(bfqg));
347 bfqg_put(bfqg);
352 struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg);
354 if (!bfqg)
357 blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq));
358 blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1);
402 * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
403 * recursive stats can still account for the amount used by this bfqg after
406 static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
410 if (!bfqg) /* root_group */
413 parent = bfqg_parent(bfqg);
415 lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
420 bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
421 bfqg_stats_reset(&bfqg->stats);
424 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
434 * Make sure that bfqg and its associated blkg do not
437 bfqg_and_blkg_get(bfqg);
439 entity->parent = bfqg->my_entity; /* NULL for root group */
440 entity->sched_data = &bfqg->sched_data;
526 struct bfq_group *bfqg;
528 bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node);
529 if (!bfqg)
532 if (bfqg_stats_init(&bfqg->stats, gfp)) {
533 kfree(bfqg);
538 bfqg_get(bfqg);
539 return &bfqg->pd;
545 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
547 struct bfq_entity *entity = &bfqg->entity;
551 entity->my_sched_data = &bfqg->sched_data;
552 bfqg->my_entity = entity; /*
556 bfqg->bfqd = bfqd;
557 bfqg->active_entities = 0;
558 bfqg->online = true;
559 bfqg->rq_pos_tree = RB_ROOT;
564 struct bfq_group *bfqg = pd_to_bfqg(pd);
566 bfqg_stats_exit(&bfqg->stats);
567 bfqg_put(bfqg);
572 struct bfq_group *bfqg = pd_to_bfqg(pd);
574 bfqg_stats_reset(&bfqg->stats);
577 static void bfq_group_set_parent(struct bfq_group *bfqg,
582 entity = &bfqg->entity;
587 static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg)
597 entity = &bfqg->entity;
613 struct bfq_group *bfqg;
620 bfqg = blkg_to_bfqg(blkg);
621 if (bfqg->online) {
623 return bfqg;
633 * bfq_bfqq_move - migrate @bfqq to @bfqg.
636 * @bfqg: the group to move to.
638 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
642 * owning @bfqg does not disappear (see comments in
647 struct bfq_group *bfqg)
679 entity->parent = bfqg->my_entity;
680 entity->sched_data = &bfqg->sched_data;
681 /* pin down bfqg and its associated blkg */
682 bfqg_and_blkg_get(bfqg);
708 struct bfq_group *bfqg)
717 if (entity->sched_data != &bfqg->sched_data) {
726 if (sync_bfqq->entity.sched_data != &bfqg->sched_data)
727 bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
738 &bfqg->sched_data)
759 return bfqg;
765 struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio);
768 serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr;
781 bfq_link_bfqg(bfqd, bfqg);
782 __bfq_bic_change_cgroup(bfqd, bic, bfqg);
812 * Thanks to the last fact, and to the fact that: (1) bfqg has
816 * field bfqg->pd) to get to the blkg associated with bfqg,
819 * bfqg may cause dangling references to be traversed, as
820 * bfqg->pd may not exist any more.
822 * In view of the above facts, here we cache, in the bfqg, any
827 * Finally, note that bfqg itself needs to be protected from
830 * refcounter for bfqg, to let it disappear only after no
833 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
881 * @bfqg: the group to move from.
885 struct bfq_group *bfqg,
895 if (bfqg->sched_data.in_service_entity)
897 bfqg->sched_data.in_service_entity,
912 struct bfq_group *bfqg = pd_to_bfqg(pd);
913 struct bfq_data *bfqd = bfqg->bfqd;
914 struct bfq_entity *entity = bfqg->my_entity;
928 st = bfqg->sched_data.service_tree + i;
942 bfq_reparent_active_queues(bfqd, bfqg, st, i);
961 bfq_put_async_queues(bfqd, bfqg);
962 bfqg->online = false;
971 bfqg_stats_xfer_dead(bfqg);
979 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
981 bfq_end_wr_async_queues(bfqd, bfqg);
1003 struct bfq_group *bfqg = pd_to_bfqg(pd);
1005 if (!bfqg->entity.dev_weight)
1007 return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight);
1021 static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
1025 bfqg->entity.dev_weight = dev_weight;
1032 if ((unsigned short)weight != bfqg->entity.new_weight) {
1033 bfqg->entity.new_weight = (unsigned short)weight;
1036 * stored in bfqg->entity.new_weight before
1043 * depend also on bfqg->entity.new_weight (in
1046 * of bfqg->entity.new_weight is correctly
1050 bfqg->entity.prio_changed = 1;
1070 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1072 if (bfqg)
1073 bfq_group_set_weight(bfqg, val, 0);
1087 struct bfq_group *bfqg;
1106 bfqg = blkg_to_bfqg(ctx.blkg);
1110 bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
1206 struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg);
1207 u64 sum = blkg_rwstat_total(&bfqg->stats.bytes);
1242 struct bfq_group *bfqg = pd_to_bfqg(pd);
1243 u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples);
1247 v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum);
1305 /* statistics, covers only the tasks in the bfqg */
1348 /* the same statistics which cover the bfqg and its descendants */
1430 struct bfq_group *bfqg) {}
1432 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1442 entity->sched_data = &bfqg->sched_data;
1462 void bfqg_and_blkg_get(struct bfq_group *bfqg) {}
1464 void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
1468 struct bfq_group *bfqg;
1471 bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1472 if (!bfqg)
1476 bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1478 return bfqg;