Lines Matching refs:cpu
152 extern void init_sched_groups_capacity(int cpu, struct sched_domain *sd);
348 * - store the maximum -deadline bandwidth of each cpu;
389 * Verify the fitness of task @p to run on @cpu taking into account the
393 * @cpu scaled by SCHED_CAPACITY_SCALE >= runtime/deadline ratio of the
396 static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu)
398 unsigned long cap = arch_scale_cpu_capacity(cpu);
413 extern int dl_cpu_busy(int cpu, struct task_struct *p);
545 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu,
555 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int cpu,
1082 int cpu;
1189 return rq->cpu;
1213 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
1216 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
1448 extern void sched_domains_numa_masks_set(unsigned int cpu);
1449 extern void sched_domains_numa_masks_clear(unsigned int cpu);
1450 extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
1455 static inline void sched_domains_numa_masks_set(unsigned int cpu)
1458 static inline void sched_domains_numa_masks_clear(unsigned int cpu)
1461 static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
1471 extern int migrate_task_to(struct task_struct *p, int cpu);
1472 extern int migrate_swap(struct task_struct *p, struct task_struct *t, int cpu, int scpu);
1504 #define for_each_domain(cpu, __sd) \
1505 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
1509 * @cpu: The CPU whose highest level of sched domain is to
1516 static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
1520 for (sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); sd; sd = sd->parent) {
1530 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
1534 for (sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); sd; sd = sd->parent) {
1615 void dirty_sched_domain_sysctl(int cpu);
1621 static inline void dirty_sched_domain_sysctl(int cpu)
1661 static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
1668 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
1669 p->se.cfs_rq = tg->cfs_rq[cpu];
1670 p->se.parent = tg->se[cpu];
1674 p->rt.rt_rq = tg->rt_rq[cpu];
1675 p->rt.parent = tg->rt_se[cpu];
1681 static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
1691 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1693 set_task_rq(p, cpu);
1696 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1702 WRITE_ONCE(p->cpu, cpu);
1704 WRITE_ONCE(task_thread_info(p)->cpu, cpu);
1706 p->wake_cpu = cpu;
1996 extern void update_group_capacity(struct sched_domain *sd, int cpu);
2040 extern void resched_cpu(int cpu);
2071 int cpu = cpu_of(rq);
2072 if (!tick_nohz_full_cpu(cpu)) {
2077 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
2079 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
2178 * @cpu: the CPU in question.
2186 static __always_inline unsigned long arch_scale_freq_capacity(int cpu)
2192 unsigned long capacity_curr_of(int cpu);
2193 unsigned long cpu_util(int cpu);
2379 extern void print_cfs_stats(struct seq_file *m, int cpu);
2380 extern void print_rt_stats(struct seq_file *m, int cpu);
2381 extern void print_dl_stats(struct seq_file *m, int cpu);
2382 extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
2383 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2384 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
2408 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
2455 static inline u64 irq_time_read(int cpu)
2457 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
2620 static inline unsigned long capacity_of(int cpu)
2622 return cpu_rq(cpu)->cpu_capacity;
2625 static inline unsigned long capacity_orig_of(int cpu)
2627 return cpu_rq(cpu)->cpu_capacity_orig;
2648 unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, unsigned long max, enum schedutil_type type,
2677 static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, unsigned long max,
2777 extern bool task_fits_max(struct task_struct *p, int cpu);
2778 extern unsigned long capacity_spare_without(int cpu, struct task_struct *p);
2835 static inline int is_reserved(int cpu)
2837 struct rq *rq = cpu_rq(cpu);
2842 static inline int mark_reserved(int cpu)
2844 struct rq *rq = cpu_rq(cpu);
2849 static inline void clear_reserved(int cpu)
2851 struct rq *rq = cpu_rq(cpu);
2856 static inline int cpu_capacity(int cpu)
2858 return cpu_rq(cpu)->cluster->capacity;
2861 static inline int cpu_max_possible_capacity(int cpu)
2863 return cpu_rq(cpu)->cluster->max_possible_capacity;
2866 static inline int cpu_load_scale_factor(int cpu)
2868 return cpu_rq(cpu)->cluster->load_scale_factor;
2902 * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
2903 * that "most" efficient cpu gets a load_scale_factor of 1
2911 * Return load_scale_factor of a cpu in reference to cpu with best max_freq
2926 * is in reference to "best" performing cpu. Task's load will need to be
2928 * (little) cpu.
2939 static inline bool is_max_capacity_cpu(int cpu)
2941 return cpu_max_possible_capacity(cpu) == max_possible_capacity;
2944 static inline bool is_min_capacity_cpu(int cpu)
2946 return cpu_max_possible_capacity(cpu) == min_max_possible_capacity;
2950 * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
2951 * least efficient cpu gets capacity of 1024
2959 * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
2980 static inline unsigned int power_cost(int cpu, u64 demand)
2982 return cpu_max_possible_capacity(cpu);
2985 static inline unsigned long cpu_util_freq_walt(int cpu)
2988 struct rq *rq = cpu_rq(cpu);
2989 unsigned long capacity = capacity_orig_of(cpu);
2992 return cpu_util(cpu);
3015 static inline int is_reserved(int cpu)
3020 static inline void clear_reserved(int cpu)