Lines Matching refs:task_group

543 static int tg_is_idle(struct task_group *tg)
591 static inline int tg_is_idle(struct task_group *tg)
3948 struct task_group *tg = cfs_rq->tg;
5636 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
5725 static inline int throttled_lb_pair(struct task_group *tg,
5737 static int tg_unthrottle_up(struct task_group *tg, void *data)
5766 static int tg_throttle_down(struct task_group *tg, void *data)
6114 * Responsible for refilling a task_group's bandwidth and unthrottling its
6310 static void sync_throttle(struct task_group *tg, int cpu)
6502 struct task_group *tg;
6521 struct task_group *tg;
6608 static inline void sync_throttle(struct task_group *tg, int cpu) {}
6621 static inline int throttled_lb_pair(struct task_group *tg,
6632 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
9137 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
9561 * Iterates the task_group tree in a bottom up fashion, see
13055 cfs_rq = task_group(p)->cfs_rq[cpu];
13300 void free_fair_sched_group(struct task_group *tg)
13315 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
13356 void online_fair_sched_group(struct task_group *tg)
13374 void unregister_fair_sched_group(struct task_group *tg)
13401 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
13434 static int __sched_group_set_shares(struct task_group *tg, unsigned long shares)
13470 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
13484 int sched_group_set_idle(struct task_group *tg, long idle)
13559 void free_fair_sched_group(struct task_group *tg) { }
13561 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
13566 void online_fair_sched_group(struct task_group *tg) { }
13568 void unregister_fair_sched_group(struct task_group *tg) { }