Lines Matching refs:bfq_group
122 * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each
202 struct bfq_group;
253 /* request-position tree member (see bfq_group's @rq_pos_tree) */
255 /* request-position tree root (see bfq_group's @rq_pos_tree) */
451 /* root bfq_group for the device */
452 struct bfq_group *root_group;
854 * @weight: weight of the bfq_group
864 * struct bfq_group - per (device, cgroup) data structure.
886 * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
895 struct bfq_group {
904 /* Is bfq_group still online? */
925 struct bfq_group {
960 void bfq_end_wr_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
963 void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
970 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
972 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op);
973 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op);
974 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
976 void bfqg_stats_update_dequeue(struct bfq_group *bfqg);
977 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg);
978 void bfqg_stats_update_idle_time(struct bfq_group *bfqg);
979 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg);
980 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg);
982 struct bfq_group *bfqg);
984 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg);
987 struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio);
988 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
989 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
990 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
991 void bfqg_and_blkg_put(struct bfq_group *bfqg);
1030 struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq);
1071 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);