Home
last modified time | relevance | path

Searched refs:busiest (Results 1 - 4 of 4) sorted by relevance

/kernel/linux/linux-6.6/kernel/sched/
H A Dfair.c8935 * first so the group_type can simply be compared when selecting the busiest
9711 struct sched_group *busiest; /* Busiest group in this sd */ member
9718 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
9728 * busiest_stat::idle_cpus to the worst busiest group because in init_sd_lb_stats()
9732 .busiest = NULL, in init_sd_lb_stats()
9898 * When this is so detected; this group becomes a candidate for busiest; see
10016 * @sgs: Load-balancing statistics of the candidate busiest group
10017 * @group: The candidate busiest group
10085 struct sg_lb_stats *busiest, in sibling_imbalance()
10091 if (env->idle == CPU_NOT_IDLE || !busiest in sibling_imbalance()
10083 sibling_imbalance(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *busiest, struct sg_lb_stats *local) sibling_imbalance() argument
10251 struct sg_lb_stats *busiest = &sds->busiest_stat; update_sd_pick_busiest() local
10921 struct sg_lb_stats *local, *busiest; calculate_imbalance() local
11123 struct sg_lb_stats *local, *busiest; find_busiest_group() local
11265 struct rq *busiest = NULL, *rq; find_busiest_queue() local
11567 struct rq *busiest; load_balance() local
11769 active_load_balance_cpu_stop, busiest, load_balance() local
[all...]
H A Dsched.h2856 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2858 __acquires(busiest->lock)
2862 double_rq_lock(this_rq, busiest);
2875 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2877 __acquires(busiest->lock)
2880 if (__rq_lockp(this_rq) == __rq_lockp(busiest) ||
2881 likely(raw_spin_rq_trylock(busiest))) {
2882 double_rq_clock_clear_update(this_rq, busiest);
2886 if (rq_order_less(this_rq, busiest)) {
2887 raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTIN
2903 double_lock_balance(struct rq *this_rq, struct rq *busiest) double_lock_balance() argument
2914 raw_spin_rq_unlock(busiest); global() variable
[all...]
/kernel/linux/linux-5.10/kernel/sched/
H A Dfair.c7815 * first so the group_type can simply be compared when selecting the busiest
8569 struct sched_group *busiest; /* Busiest group in this sd */ member
8576 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
8586 * busiest_stat::idle_cpus to the worst busiest group because in init_sd_lb_stats()
8590 .busiest = NULL, in init_sd_lb_stats()
8822 * When this is so detected; this group becomes a candidate for busiest; see
9051 * update_sd_pick_busiest - return 1 on busiest group
9054 * @sg: sched_group candidate to be checked for being the busiest
9058 * busiest group.
9061 * busiest grou
9068 struct sg_lb_stats *busiest = &sds->busiest_stat; update_sd_pick_busiest() local
9627 struct sg_lb_stats *local, *busiest; calculate_imbalance() local
9809 struct sg_lb_stats *local, *busiest; find_busiest_group() local
9939 struct rq *busiest = NULL, *rq; find_busiest_queue() local
10176 struct rq *busiest; load_balance() local
10374 active_load_balance_cpu_stop, busiest, load_balance() local
[all...]
H A Dsched.h2253 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2255 __acquires(busiest->lock)
2259 double_rq_lock(this_rq, busiest);
2272 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2274 __acquires(busiest->lock)
2279 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
2280 if (busiest < this_rq) {
2282 raw_spin_lock(&busiest->lock);
2287 raw_spin_lock_nested(&busiest->lock,
2296 * double_lock_balance - lock the busiest runqueu
2298 double_lock_balance(struct rq *this_rq, struct rq *busiest) double_lock_balance() argument
[all...]

Completed in 37 milliseconds