Lines Matching refs:cpu

171 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int cpu,
174 struct rq *rq = cpu_rq(cpu);
181 tg->rt_rq[cpu] = rt_rq;
182 tg->rt_se[cpu] = rt_se;
286 * Try to pull RT tasks here if we lower this rq's prio and cpu is not
303 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
325 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
396 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
401 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
471 * Verify the fitness of task @p to run on @cpu taking into account the uclamp
475 * is higher than the capacity of a @cpu. For non-heterogeneous system this
478 * The function will return true if the capacity of the @cpu is >= the
484 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
498 cpu_cap = capacity_orig_of(cpu);
503 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
562 int cpu = cpu_of(rq);
564 rt_se = rt_rq->tg->rt_se[cpu];
582 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
584 rt_se = rt_rq->tg->rt_se[cpu];
625 static inline struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
627 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
685 static inline struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
687 return &cpu_rq(cpu)->rt;
1161 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1178 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1533 static int select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1548 rq = cpu_rq(cpu);
1587 cpu = this_cpu;
1591 if (test || !rt_task_fits_capacity(p, cpu)) {
1606 cpu = target;
1614 return cpu;
1681 * cpu. If so, we will reschedule to allow the push logic to try
1777 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1779 if (!task_running(rq, p) && cpumask_test_cpu(cpu, p->cpus_ptr)) {
1790 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1801 if (pick_rt_task(rq, p, cpu)) {
1817 int cpu = -1;
1859 cpu = group_first_cpu(sg);
1863 if (cpumask_test_cpu(cpu, rtg_target)) {
1870 if (capacity_orig_of(cpu) > capacity_orig_of(cpumask_any(rtg_target))) {
1885 if (!rt_task_fits_capacity(task, cpu)) {
1891 cpu_capacity = capacity_orig_of(cpu);
1918 cpu = cpumask_first(&search_cpu);
1920 trace_sched_find_cas_cpu_each(task, cpu, target_cpu, cpu_isolated(cpu), idle_cpu(cpu), boosted_tutil,
1921 cpu_util(cpu), capacity_orig_of(cpu));
1923 if (cpu_isolated(cpu)) {
1927 if (!cpumask_test_cpu(cpu, task->cpus_ptr)) {
1931 /* find best cpu with smallest max_capacity */
1932 if (target_cpu != -1 && capacity_orig_of(cpu) > capacity_orig_of(target_cpu)) {
1936 util = cpu_util(cpu);
1956 target_cpu = cpu;
1957 } while ((cpu = cpumask_next(cpu, &search_cpu)) < nr_cpu_ids);
1981 int cpu = task_cpu(task);
2025 if (cpumask_test_cpu(cpu, lowest_mask)) {
2026 return cpu;
2038 for_each_domain(cpu, sd)
2070 cpu = cpumask_any(lowest_mask);
2071 if (cpu < nr_cpu_ids) {
2072 return cpu;
2088 BUG_ON(rq->cpu != task_cpu(p));
2103 int cpu;
2106 cpu = find_lowest_rq(task);
2107 if ((cpu == -1) || (cpu == rq->cpu)) {
2111 lowest_rq = cpu_rq(cpu);
2130 if (unlikely(next_task != task || !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr))) {
2225 set_task_cpu(next_task, lowest_rq->cpu);
2293 int cpu;
2310 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
2312 rd->rto_cpu = cpu;
2314 if (cpu < nr_cpu_ids) {
2315 return cpu;
2349 int cpu = -1;
2368 cpu = rto_next_cpu(rq->rd);
2375 if (cpu >= 0) {
2378 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2387 int cpu;
2403 cpu = rto_next_cpu(rd);
2407 if (cpu < 0) {
2413 irq_work_queue_on(&rd->rto_push_work, cpu);
2419 int this_cpu = this_rq->cpu, cpu;
2435 if (rt_overload_count == 1 && cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) {
2446 for_each_cpu(cpu, this_rq->rd->rto_mask)
2448 if (this_cpu == cpu) {
2452 src_rq = cpu_rq(cpu);
2540 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2552 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2759 set_task_cpu(next_task, lowest_rq->cpu);
2776 int cpu = task_cpu(p);
2790 cpu_orig_cap = capacity_orig_of(cpu);
2791 /* cpu has max capacity, no need to do balance */
2801 misfit_task = !rt_task_fits_capacity(p, cpu);
2804 misfit_task = !rt_task_fits_capacity(p, cpu);
3212 void print_rt_stats(struct seq_file *m, int cpu)
3218 cycle_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) print_rt_rq(m, cpu, rt_rq);