Lines Matching defs:tg
149 void free_rt_sched_group(struct task_group *tg)
153 if (tg->rt_se) {
154 destroy_rt_bandwidth(&tg->rt_bandwidth);
159 if (tg->rt_rq) {
160 kfree(tg->rt_rq[i]);
162 if (tg->rt_se) {
163 kfree(tg->rt_se[i]);
167 kfree(tg->rt_rq);
168 kfree(tg->rt_se);
171 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int cpu,
179 rt_rq->tg = tg;
181 tg->rt_rq[cpu] = rt_rq;
182 tg->rt_se[cpu] = rt_se;
199 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
205 tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
206 if (!tg->rt_rq) {
209 tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
210 if (!tg->rt_se) {
214 init_rt_bandwidth(&tg->rt_bandwidth, ktime_to_ns(def_rt_bandwidth.rt_period), 0);
229 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
230 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
269 void free_rt_sched_group(struct task_group *tg)
273 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
513 if (!rt_rq->tg) {
522 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
527 static inline struct task_group *next_task_group(struct task_group *tg)
530 tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list);
531 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
533 if (&tg->list == &task_groups) {
534 tg = NULL;
537 return tg;
564 rt_se = rt_rq->tg->rt_se[cpu];
584 rt_se = rt_rq->tg->rt_se[cpu];
632 return &rt_rq->tg->rt_bandwidth;
1247 if (rt_rq->tg) {
1248 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
2882 static inline int tg_has_rt_tasks(struct task_group *tg)
2891 if (task_group_is_autogroup(tg)) {
2895 css_task_iter_start(&tg->css, 0, &it);
2905 struct task_group *tg;
2910 static int tg_rt_schedulable(struct task_group *tg, void *data)
2917 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2918 runtime = tg->rt_bandwidth.rt_runtime;
2920 if (tg == d->tg) {
2935 if (rt_bandwidth_enabled() && !runtime && tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg)) {
2950 list_for_each_entry_rcu(child, &tg->children, siblings)
2955 if (child == d->tg) {
2970 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
2975 .tg = tg,
2987 static int tg_set_rt_bandwidth(struct task_group *tg, u64 rt_period, u64 rt_runtime)
2995 if (tg == &root_task_group && rt_runtime == 0) {
3012 err = __rt_schedulable(tg, rt_period, rt_runtime);
3017 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
3018 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
3019 tg->rt_bandwidth.rt_runtime = rt_runtime;
3023 struct rt_rq *rt_rq = tg->rt_rq[i];
3029 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
3036 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
3040 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
3048 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
3051 long sched_group_rt_runtime(struct task_group *tg)
3055 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) {
3059 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
3064 int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
3073 rt_runtime = tg->rt_bandwidth.rt_runtime;
3075 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
3078 long sched_group_rt_period(struct task_group *tg)
3082 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
3098 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
3101 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) {