Lines Matching defs:tg

159  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
332 if (cfs_rq && task_group_is_autogroup(cfs_rq->tg)) {
333 autogroup_path(cfs_rq->tg, path, len);
334 } else if (cfs_rq && cfs_rq->tg->css.cgroup) {
335 cgroup_path(cfs_rq->tg->css.cgroup, path, len);
361 if (cfs_rq->tg->parent && cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
368 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
378 if (!cfs_rq->tg->parent) {
3252 * tg->weight * grq->load.weight
3266 * tg->weight * grq->avg.load_avg
3268 * tg->load_avg
3270 * Where: tg->load_avg ~= \Sum grq->avg.load_avg
3282 * tg->weight * grq->load.weight
3283 * ge->load.weight = ----------------------------- = tg->weight (4)
3293 * tg->weight * grq->load.weight
3295 * tg->load_avg - grq->avg.load_avg + grq->load.weight
3301 * tg->weight * grq->load.weight
3307 * tg_load_avg' = tg->load_avg - grq->avg.load_avg +
3314 * \Sum ge->load.weight >= tg->weight
3321 struct task_group *tg = cfs_rq->tg;
3323 tg_shares = READ_ONCE(tg->shares);
3327 tg_weight = atomic_long_read(&tg->load_avg);
3340 * of a group with small tg->shares value. It is a floor value which is
3344 * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024
3374 shares = READ_ONCE(gcfs_rq->tg->shares);
3417 * update_tg_load_avg - update the tg's load avg
3420 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
3421 * However, because tg->load_avg is a global value there are performance
3428 * Updating tg's load_avg is necessary before update_cfs_share().
3437 if (cfs_rq->tg == &root_task_group) {
3442 atomic_long_add(delta, &cfs_rq->tg->load_avg);
4848 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4850 return &tg->cfs_bandwidth;
4883 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4939 static inline int throttled_lb_pair(struct task_group *tg, int src_cpu, int dest_cpu)
4943 src_cfs_rq = tg->cfs_rq[src_cpu];
4944 dest_cfs_rq = tg->cfs_rq[dest_cpu];
4949 static int tg_unthrottle_up(struct task_group *tg, void *data)
4952 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4967 static int tg_throttle_down(struct task_group *tg, void *data)
4970 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4985 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5010 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
5014 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
5059 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5064 se = cfs_rq->tg->se[cpu_of(rq)];
5076 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
5312 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5409 static void sync_throttle(struct task_group *tg, int cpu)
5417 if (!tg->parent) {
5421 cfs_rq = tg->cfs_rq[cpu];
5422 pcfs_rq = tg->parent->cfs_rq[cpu];
5568 struct task_group *tg;
5573 list_for_each_entry_rcu(tg, &task_groups, list)
5575 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
5576 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5588 struct task_group *tg;
5593 list_for_each_entry_rcu(tg, &task_groups, list)
5595 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5636 static inline void sync_throttle(struct task_group *tg, int cpu)
5653 static inline int throttled_lb_pair(struct task_group *tg, int src_cpu, int dest_cpu)
5668 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
8472 se = cfs_rq->tg->se[cpu];
8502 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
11657 * Propagate the changes of the sched_entity across the tg tree to make it
11844 void free_fair_sched_group(struct task_group *tg)
11848 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
11852 if (tg->cfs_rq) {
11853 kfree(tg->cfs_rq[i]);
11855 if (tg->se) {
11856 kfree(tg->se[i]);
11860 kfree(tg->cfs_rq);
11861 kfree(tg->se);
11864 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
11870 tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
11871 if (!tg->cfs_rq) {
11874 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
11875 if (!tg->se) {
11879 tg->shares = NICE_0_LOAD;
11881 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
11896 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
11908 void online_fair_sched_group(struct task_group *tg)
11918 se = tg->se[i];
11922 sync_throttle(tg, i);
11927 void unregister_fair_sched_group(struct task_group *tg)
11935 if (tg->se[cpu]) {
11936 remove_entity_load_avg(tg->se[cpu]);
11943 if (!tg->cfs_rq[cpu]->on_list) {
11950 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
11955 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu,
11960 cfs_rq->tg = tg;
11964 tg->cfs_rq[cpu] = cfs_rq;
11965 tg->se[cpu] = se;
11988 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
11995 if (!tg->se[0]) {
12002 if (tg->shares == shares) {
12006 tg->shares = shares;
12009 struct sched_entity *se = tg->se[i];
12028 void free_fair_sched_group(struct task_group *tg)
12032 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
12037 void online_fair_sched_group(struct task_group *tg)
12041 void unregister_fair_sched_group(struct task_group *tg)