Home
last modified time | relevance | path

Searched refs:cfs_rq (Results 1 - 16 of 16) sorted by relevance

/kernel/linux/linux-6.6/kernel/sched/
H A Dfair.c71 static void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq);
72 static void walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq,
74 static void walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq,
77 struct cfs_rq *cfs_rq);
79 struct cfs_rq *cfs_rq);
81 walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq) walt_init_cfs_rq_stats() argument
83 walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) walt_inc_cfs_rq_stats() argument
85 walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) walt_dec_cfs_rq_stats() argument
398 list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_add_leaf_cfs_rq() argument
466 list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_del_leaf_cfs_rq() argument
548 cfs_rq_is_idle(struct cfs_rq *cfs_rq) cfs_rq_is_idle() argument
565 list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_add_leaf_cfs_rq() argument
570 list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_del_leaf_cfs_rq() argument
596 cfs_rq_is_idle(struct cfs_rq *cfs_rq) cfs_rq_is_idle() argument
639 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) entity_key() argument
706 avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se) avg_vruntime_add() argument
716 avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se) avg_vruntime_sub() argument
726 avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta) avg_vruntime_update() argument
738 avg_vruntime(struct cfs_rq *cfs_rq) avg_vruntime() argument
777 update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) update_entity_lag() argument
805 entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se) entity_eligible() argument
821 __update_min_vruntime(struct cfs_rq *cfs_rq, u64 vruntime) __update_min_vruntime() argument
835 update_min_vruntime(struct cfs_rq *cfs_rq) update_min_vruntime() argument
898 __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) __enqueue_entity() argument
906 __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) __dequeue_entity() argument
913 __pick_first_entity(struct cfs_rq *cfs_rq) __pick_first_entity() argument
942 __pick_eevdf(struct cfs_rq *cfs_rq) __pick_eevdf() argument
1036 pick_eevdf(struct cfs_rq *cfs_rq) pick_eevdf() argument
1052 __pick_last_entity(struct cfs_rq *cfs_rq) __pick_last_entity() argument
1088 update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se) update_deadline() argument
1169 struct cfs_rq *cfs_rq = cfs_rq_of(se); post_init_entity_util_avg() local
1211 update_tg_load_avg(struct cfs_rq *cfs_rq) update_tg_load_avg() argument
1219 update_curr(struct cfs_rq *cfs_rq) update_curr() argument
1266 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_start_fair() argument
1283 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_end_fair() argument
1309 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_enqueue_sleeper_fair() argument
1329 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_stats_enqueue_fair() argument
1346 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_stats_dequeue_fair() argument
1378 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_curr_start() argument
3595 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) account_entity_enqueue() argument
3612 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) account_entity_dequeue() argument
3676 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) enqueue_load_avg() argument
3683 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) dequeue_load_avg() argument
3693 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) enqueue_load_avg() argument
3695 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) dequeue_load_avg() argument
3698 reweight_eevdf(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight) reweight_eevdf() argument
3805 reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight) reweight_entity() argument
3860 struct cfs_rq *cfs_rq = cfs_rq_of(se); reweight_task() local
3945 calc_group_shares(struct cfs_rq *cfs_rq) calc_group_shares() argument
4010 cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) cfs_rq_util_change() argument
4057 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) cfs_rq_last_update_time() argument
4071 child_cfs_rq_on_list(struct cfs_rq *cfs_rq) child_cfs_rq_on_list() argument
4089 cfs_rq_is_decayed(struct cfs_rq *cfs_rq) cfs_rq_is_decayed() argument
4117 update_tg_load_avg(struct cfs_rq *cfs_rq) update_tg_load_avg() argument
4232 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) update_tg_cfs_util() argument
4264 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) update_tg_cfs_runnable() argument
4294 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) update_tg_cfs_load() argument
4361 add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) add_tg_cfs_propagate() argument
4370 struct cfs_rq *cfs_rq, *gcfs_rq; propagate_entity_load_avg() local
4427 update_tg_load_avg(struct cfs_rq *cfs_rq) update_tg_load_avg() argument
4434 add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) add_tg_cfs_propagate() argument
4442 struct cfs_rq *cfs_rq; migrate_se_pelt_lag() local
4537 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) update_cfs_rq_load_avg() argument
4608 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) attach_entity_load_avg() argument
4663 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) detach_entity_load_avg() argument
4694 update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_load_avg() argument
4742 struct cfs_rq *cfs_rq = cfs_rq_of(se); sync_entity_load_avg() local
4755 struct cfs_rq *cfs_rq = cfs_rq_of(se); remove_entity_load_avg() local
4774 cfs_rq_runnable_avg(struct cfs_rq *cfs_rq) cfs_rq_runnable_avg() argument
4779 cfs_rq_load_avg(struct cfs_rq *cfs_rq) cfs_rq_load_avg() argument
4839 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) util_est_enqueue() argument
4855 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) util_est_dequeue() argument
4886 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) util_est_update() argument
5155 cfs_rq_is_decayed(struct cfs_rq *cfs_rq) cfs_rq_is_decayed() argument
5165 update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) update_load_avg() argument
5173 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) attach_entity_load_avg() argument
5175 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) detach_entity_load_avg() argument
5183 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) util_est_enqueue() argument
5186 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) util_est_dequeue() argument
5189 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) util_est_update() argument
5196 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) place_entity() argument
5302 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) enqueue_entity() argument
5372 struct cfs_rq *cfs_rq = cfs_rq_of(se); __clear_buddies_next() local
5380 clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) clear_buddies() argument
5389 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) dequeue_entity() argument
5442 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) set_next_entity() argument
5495 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) pick_next_entity() argument
5509 put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) put_prev_entity() argument
5532 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) entity_tick() argument
5642 __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b, struct cfs_rq *cfs_rq, u64 target_runtime) __assign_cfs_rq_runtime() argument
5670 assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) assign_cfs_rq_runtime() argument
5682 __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) __account_cfs_rq_runtime() argument
5701 account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) account_cfs_rq_runtime() argument
5709 cfs_rq_throttled(struct cfs_rq *cfs_rq) cfs_rq_throttled() argument
5715 throttled_hierarchy(struct cfs_rq *cfs_rq) throttled_hierarchy() argument
5740 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; tg_unthrottle_up() local
5769 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; tg_throttle_down() local
5785 throttle_cfs_rq(struct cfs_rq *cfs_rq) throttle_cfs_rq() argument
5876 unthrottle_cfs_rq(struct cfs_rq *cfs_rq) unthrottle_cfs_rq() argument
6006 __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) __unthrottle_cfs_rq_async() argument
6026 __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) __unthrottle_cfs_rq_async() argument
6032 unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) unthrottle_cfs_rq_async() argument
6049 struct cfs_rq *cfs_rq; distribute_cfs_runtime() local
6223 __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) __return_cfs_rq_runtime() argument
6246 return_cfs_rq_runtime(struct cfs_rq *cfs_rq) return_cfs_rq_runtime() argument
6291 check_enqueue_throttle(struct cfs_rq *cfs_rq) check_enqueue_throttle() argument
6312 struct cfs_rq *pcfs_rq, *cfs_rq; sync_throttle() local
6328 check_cfs_rq_runtime(struct cfs_rq *cfs_rq) check_cfs_rq_runtime() argument
6434 init_cfs_rq_runtime(struct cfs_rq *cfs_rq) init_cfs_rq_runtime() argument
6509 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; update_runtime_enabled() local
6534 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; unthrottle_offline_cfs_rqs() local
6560 struct cfs_rq *cfs_rq = task_cfs_rq(p); cfs_task_bw_constrained() local
6605 account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) account_cfs_rq_runtime() argument
6606 check_cfs_rq_runtime(struct cfs_rq *cfs_rq) check_cfs_rq_runtime() argument
6607 check_enqueue_throttle(struct cfs_rq *cfs_rq) check_enqueue_throttle() argument
6609 return_cfs_rq_runtime(struct cfs_rq *cfs_rq) return_cfs_rq_runtime() argument
6611 cfs_rq_throttled(struct cfs_rq *cfs_rq) cfs_rq_throttled() argument
6616 throttled_hierarchy(struct cfs_rq *cfs_rq) throttled_hierarchy() argument
6629 init_cfs_rq_runtime(struct cfs_rq *cfs_rq) init_cfs_rq_runtime() argument
6775 struct cfs_rq *cfs_rq; enqueue_task_fair() local
6873 struct cfs_rq *cfs_rq; dequeue_task_fair() local
6982 struct cfs_rq *cfs_rq; cpu_load_without() local
7005 struct cfs_rq *cfs_rq; cpu_runnable_without() local
7758 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; cpu_util() local
8487 struct cfs_rq *cfs_rq = task_cfs_rq(curr); check_preempt_wakeup() local
8567 struct cfs_rq *cfs_rq; pick_task_fair() local
8599 struct cfs_rq *cfs_rq = &rq->cfs; pick_next_task_fair() local
8752 struct cfs_rq *cfs_rq; put_prev_task_fair() local
8766 struct cfs_rq *cfs_rq = task_cfs_rq(curr); yield_task_fair() local
9479 cfs_rq_has_blocked(struct cfs_rq *cfs_rq) cfs_rq_has_blocked() argument
9520 cfs_rq_has_blocked(struct cfs_rq *cfs_rq) cfs_rq_has_blocked() argument
9556 struct cfs_rq *cfs_rq, *pos; __update_blocked_fair() local
9602 update_cfs_rq_h_load(struct cfs_rq *cfs_rq) update_cfs_rq_h_load() argument
9637 struct cfs_rq *cfs_rq = task_cfs_rq(p); task_h_load() local
9646 struct cfs_rq *cfs_rq = &rq->cfs; __update_blocked_fair() local
12980 struct cfs_rq *cfs_rq = cfs_rq_of(se); se_fi_update() local
13052 struct cfs_rq *cfs_rq; task_is_throttled_fair() local
13075 struct cfs_rq *cfs_rq; task_tick_fair() local
13100 struct cfs_rq *cfs_rq; task_fork_fair() local
13147 struct cfs_rq *cfs_rq = cfs_rq_of(se); propagate_entity_cfs_rq() local
13176 struct cfs_rq *cfs_rq = cfs_rq_of(se); detach_entity_cfs_rq() local
13198 struct cfs_rq *cfs_rq = cfs_rq_of(se); attach_entity_cfs_rq() local
13263 struct cfs_rq *cfs_rq = cfs_rq_of(se); set_next_task_fair() local
13271 init_cfs_rq(struct cfs_rq *cfs_rq) init_cfs_rq() argument
13318 struct cfs_rq *cfs_rq; alloc_fair_sched_group() local
13401 init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu, struct sched_entity *parent) init_tg_cfs_entry() argument
13531 struct cfs_rq *cfs_rq = cfs_rq_of(se); sched_group_set_idle() local
13650 struct cfs_rq *cfs_rq, *pos; print_cfs_stats() local
13653 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) print_cfs_stats() local
13716 walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq) walt_init_cfs_rq_stats() argument
13721 walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) walt_inc_cfs_rq_stats() argument
13727 walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) walt_dec_cfs_rq_stats() argument
13768 struct cfs_rq *cfs_rq; walt_fixup_sched_stats_fair() local
[all...]
H A Dpelt.h5 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
6 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
156 static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) in update_idle_cfs_rq_clock_pelt() argument
160 if (unlikely(cfs_rq->throttle_count)) in update_idle_cfs_rq_clock_pelt()
163 throttled = cfs_rq->throttled_clock_pelt_time; in update_idle_cfs_rq_clock_pelt()
165 u64_u32_store(cfs_rq->throttled_pelt_idle, throttled); in update_idle_cfs_rq_clock_pelt()
168 /* rq->task_clock normalized against any time this cfs_rq ha
169 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) cfs_rq_clock_pelt() argument
177 update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) update_idle_cfs_rq_clock_pelt() argument
178 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) cfs_rq_clock_pelt() argument
187 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) update_cfs_rq_load_avg() argument
232 update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) update_idle_cfs_rq_clock_pelt() argument
[all...]
H A Dpelt.c208 * se has been already dequeued but cfs_rq->curr still points to it. in ___update_load_sum()
210 * but also for a cfs_rq if the latter becomes idle. As an example, in ___update_load_sum()
250 * removed from a cfs_rq and we need to update sched_avg. Scheduler entities
253 * the period_contrib of cfs_rq when updating the sched_avg of a sched_entity
306 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) in __update_load_avg_se() argument
309 cfs_rq->curr == se)) { in __update_load_avg_se()
320 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq) in __update_load_avg_cfs_rq() argument
322 if (___update_load_sum(now, &cfs_rq in __update_load_avg_cfs_rq()
[all...]
H A Ddebug.c630 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) in print_cfs_rq() argument
639 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu); in print_cfs_rq()
642 SEQ_printf(m, "cfs_rq[%d]:\n", cpu); in print_cfs_rq()
645 SPLIT_NS(cfs_rq->exec_clock)); in print_cfs_rq()
648 first = __pick_first_entity(cfs_rq); in print_cfs_rq()
651 last = __pick_last_entity(cfs_rq); in print_cfs_rq()
654 min_vruntime = cfs_rq->min_vruntime; in print_cfs_rq()
662 SPLIT_NS(avg_vruntime(cfs_rq))); in print_cfs_rq()
[all...]
H A Dsched.h416 struct cfs_rq;
455 struct cfs_rq **cfs_rq; member
517 * A weight of a cfs_rq is the sum of weights of which entities
518 * are queued on this cfs_rq, so a weight of a entity should not be
549 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
556 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
583 struct cfs_rq *pre
641 struct cfs_rq { global() struct
1309 rq_of(struct cfs_rq *cfs_rq) rq_of() argument
1316 rq_of(struct cfs_rq *cfs_rq) rq_of() argument
[all...]
H A Dcore.c4653 p->se.cfs_rq = NULL; in __sched_fork()
5704 struct sched_entity *curr = (&p->se)->cfs_rq->curr; in prefetch_curr_exec_start()
10624 root_task_group.cfs_rq = (struct cfs_rq **)ptr; in sched_init()
11143 * that tg_unthrottle_up() won't add decayed cfs_rq's to it. in sched_release_group()
11187 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
11608 * Prevent race between setting of cfs_rq->runtime_enabled and in tg_set_cfs_bandwidth()
11643 struct cfs_rq *cfs_rq = tg->cfs_rq[ in tg_set_cfs_bandwidth() local
[all...]
/kernel/linux/linux-5.10/kernel/sched/
H A Dfair.c33 static void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq);
34 static void walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq,
36 static void walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq,
39 struct cfs_rq *cfs_rq);
41 struct cfs_rq *cfs_rq);
43 walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq) walt_init_cfs_rq_stats() argument
45 walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) walt_inc_cfs_rq_stats() argument
47 walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) walt_dec_cfs_rq_stats() argument
317 cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) cfs_rq_tg_path() argument
330 list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_add_leaf_cfs_rq() argument
398 list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_del_leaf_cfs_rq() argument
504 cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) cfs_rq_tg_path() argument
510 list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_add_leaf_cfs_rq() argument
515 list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_del_leaf_cfs_rq() argument
569 update_min_vruntime(struct cfs_rq *cfs_rq) update_min_vruntime() argument
604 __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) __enqueue_entity() argument
634 __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) __dequeue_entity() argument
639 __pick_first_entity(struct cfs_rq *cfs_rq) __pick_first_entity() argument
660 __pick_last_entity(struct cfs_rq *cfs_rq) __pick_last_entity() argument
730 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) sched_slice() argument
767 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) sched_vslice() argument
828 struct cfs_rq *cfs_rq = cfs_rq_of(se); post_init_entity_util_avg() local
872 update_tg_load_avg(struct cfs_rq *cfs_rq) update_tg_load_avg() argument
880 update_curr(struct cfs_rq *cfs_rq) update_curr() argument
921 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_start() argument
939 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_end() argument
971 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_enqueue_sleeper() argument
1042 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_stats_enqueue() argument
1059 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_stats_dequeue() argument
1088 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_curr_start() argument
3026 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) account_entity_enqueue() argument
3041 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) account_entity_dequeue() argument
3103 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) enqueue_load_avg() argument
3110 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) dequeue_load_avg() argument
3117 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) enqueue_load_avg() argument
3119 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) dequeue_load_avg() argument
3122 reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight) reweight_entity() argument
3152 struct cfs_rq *cfs_rq = cfs_rq_of(se); reweight_task() local
3235 calc_group_shares(struct cfs_rq *cfs_rq) calc_group_shares() argument
3305 cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) cfs_rq_util_change() argument
3344 update_tg_load_avg(struct cfs_rq *cfs_rq) update_tg_load_avg() argument
3477 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) update_tg_cfs_util() argument
3502 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) update_tg_cfs_runnable() argument
3527 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) update_tg_cfs_load() argument
3587 add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) add_tg_cfs_propagate() argument
3596 struct cfs_rq *cfs_rq, *gcfs_rq; propagate_entity_load_avg() local
3653 update_tg_load_avg(struct cfs_rq *cfs_rq) update_tg_load_avg() argument
3660 add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) add_tg_cfs_propagate() argument
3681 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) update_cfs_rq_load_avg() argument
3750 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) attach_entity_load_avg() argument
3805 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) detach_entity_load_avg() argument
3834 update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_load_avg() argument
3870 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) cfs_rq_last_update_time() argument
3884 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) cfs_rq_last_update_time() argument
3896 struct cfs_rq *cfs_rq = cfs_rq_of(se); sync_entity_load_avg() local
3909 struct cfs_rq *cfs_rq = cfs_rq_of(se); remove_entity_load_avg() local
3928 cfs_rq_runnable_avg(struct cfs_rq *cfs_rq) cfs_rq_runnable_avg() argument
3933 cfs_rq_load_avg(struct cfs_rq *cfs_rq) cfs_rq_load_avg() argument
3993 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) util_est_enqueue() argument
4009 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) util_est_dequeue() argument
4040 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) util_est_update() argument
4318 update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) update_load_avg() argument
4326 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) attach_entity_load_avg() argument
4328 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) detach_entity_load_avg() argument
4336 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) util_est_enqueue() argument
4339 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) util_est_dequeue() argument
4342 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) util_est_update() argument
4348 check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) check_spread() argument
4363 struct cfs_rq *cfs_rq; entity_is_long_sleeper() local
4385 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) place_entity() argument
4492 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) enqueue_entity() argument
4556 struct cfs_rq *cfs_rq = cfs_rq_of(se); __clear_buddies_last() local
4567 struct cfs_rq *cfs_rq = cfs_rq_of(se); __clear_buddies_next() local
4578 struct cfs_rq *cfs_rq = cfs_rq_of(se); __clear_buddies_skip() local
4586 clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) clear_buddies() argument
4601 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) dequeue_entity() argument
4656 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) check_preempt_tick() argument
4693 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) set_next_entity() argument
4736 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) pick_next_entity() argument
4788 put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) put_prev_entity() argument
4813 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) entity_tick() argument
4914 __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b, struct cfs_rq *cfs_rq, u64 target_runtime) __assign_cfs_rq_runtime() argument
4942 assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) assign_cfs_rq_runtime() argument
4954 __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) __account_cfs_rq_runtime() argument
4973 account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) account_cfs_rq_runtime() argument
4981 cfs_rq_throttled(struct cfs_rq *cfs_rq) cfs_rq_throttled() argument
4987 throttled_hierarchy(struct cfs_rq *cfs_rq) throttled_hierarchy() argument
5012 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; tg_unthrottle_up() local
5030 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; tg_throttle_down() local
5042 throttle_cfs_rq(struct cfs_rq *cfs_rq) throttle_cfs_rq() argument
5114 unthrottle_cfs_rq(struct cfs_rq *cfs_rq) unthrottle_cfs_rq() argument
5204 struct cfs_rq *cfs_rq; distribute_cfs_runtime() local
5354 __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) __return_cfs_rq_runtime() argument
5377 return_cfs_rq_runtime(struct cfs_rq *cfs_rq) return_cfs_rq_runtime() argument
5425 check_enqueue_throttle(struct cfs_rq *cfs_rq) check_enqueue_throttle() argument
5446 struct cfs_rq *pcfs_rq, *cfs_rq; sync_throttle() local
5462 check_cfs_rq_runtime(struct cfs_rq *cfs_rq) check_cfs_rq_runtime() argument
5561 init_cfs_rq_runtime(struct cfs_rq *cfs_rq) init_cfs_rq_runtime() argument
5607 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; update_runtime_enabled() local
5625 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; unthrottle_offline_cfs_rqs() local
5654 account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) account_cfs_rq_runtime() argument
5655 check_cfs_rq_runtime(struct cfs_rq *cfs_rq) check_cfs_rq_runtime() argument
5656 check_enqueue_throttle(struct cfs_rq *cfs_rq) check_enqueue_throttle() argument
5658 return_cfs_rq_runtime(struct cfs_rq *cfs_rq) return_cfs_rq_runtime() argument
5660 cfs_rq_throttled(struct cfs_rq *cfs_rq) cfs_rq_throttled() argument
5665 throttled_hierarchy(struct cfs_rq *cfs_rq) throttled_hierarchy() argument
5679 init_cfs_rq_runtime(struct cfs_rq *cfs_rq) init_cfs_rq_runtime() argument
5700 struct cfs_rq *cfs_rq = cfs_rq_of(se); hrtick_start_fair() local
5817 struct cfs_rq *cfs_rq; enqueue_task_fair() local
5931 struct cfs_rq *cfs_rq; dequeue_task_fair() local
6034 struct cfs_rq *cfs_rq; cpu_load_without() local
6057 struct cfs_rq *cfs_rq; cpu_runnable_without() local
6716 struct cfs_rq *cfs_rq; cpu_util() local
6753 struct cfs_rq *cfs_rq; cpu_util_without() local
6858 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; cpu_util_next() local
7211 struct cfs_rq *cfs_rq = cfs_rq_of(se); migrate_task_rq_fair() local
7394 struct cfs_rq *cfs_rq = task_cfs_rq(curr); check_preempt_wakeup() local
7476 struct cfs_rq *cfs_rq = &rq->cfs; pick_next_task_fair() local
7628 struct cfs_rq *cfs_rq; put_prev_task_fair() local
7644 struct cfs_rq *cfs_rq = task_cfs_rq(curr); yield_task_fair() local
8329 cfs_rq_has_blocked(struct cfs_rq *cfs_rq) cfs_rq_has_blocked() argument
8367 cfs_rq_has_blocked(struct cfs_rq *cfs_rq) cfs_rq_has_blocked() argument
8400 cfs_rq_is_decayed(struct cfs_rq *cfs_rq) cfs_rq_is_decayed() argument
8419 struct cfs_rq *cfs_rq, *pos; __update_blocked_fair() local
8462 update_cfs_rq_h_load(struct cfs_rq *cfs_rq) update_cfs_rq_h_load() argument
8497 struct cfs_rq *cfs_rq = task_cfs_rq(p); task_h_load() local
8506 struct cfs_rq *cfs_rq = &rq->cfs; __update_blocked_fair() local
11510 struct cfs_rq *cfs_rq; task_tick_fair() local
11532 struct cfs_rq *cfs_rq; task_fork_fair() local
11621 struct cfs_rq *cfs_rq; propagate_entity_cfs_rq() local
11647 struct cfs_rq *cfs_rq = cfs_rq_of(se); detach_entity_cfs_rq() local
11658 struct cfs_rq *cfs_rq = cfs_rq_of(se); attach_entity_cfs_rq() local
11678 struct cfs_rq *cfs_rq = cfs_rq_of(se); detach_task_cfs_rq() local
11695 struct cfs_rq *cfs_rq = cfs_rq_of(se); attach_task_cfs_rq() local
11745 struct cfs_rq *cfs_rq = cfs_rq_of(se); set_next_task_fair() local
11753 init_cfs_rq(struct cfs_rq *cfs_rq) init_cfs_rq() argument
11819 struct cfs_rq *cfs_rq; alloc_fair_sched_group() local
11900 init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu, struct sched_entity *parent) init_tg_cfs_entry() argument
12057 struct cfs_rq *cfs_rq, *pos; print_cfs_stats() local
12060 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) print_cfs_stats() local
12109 walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq) walt_init_cfs_rq_stats() argument
12114 walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) walt_inc_cfs_rq_stats() argument
12120 walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) walt_dec_cfs_rq_stats() argument
12161 struct cfs_rq *cfs_rq; walt_fixup_sched_stats_fair() local
12196 sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq) sched_trace_cfs_rq_avg() argument
12206 sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len) sched_trace_cfs_rq_path() argument
12220 sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq) sched_trace_cfs_rq_cpu() argument
[all...]
H A Dpelt.h5 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
6 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
144 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
145 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) in cfs_rq_clock_pelt() argument
147 if (unlikely(cfs_rq->throttle_count)) in cfs_rq_clock_pelt()
148 return cfs_rq->throttled_clock_pelt - cfs_rq in cfs_rq_clock_pelt()
153 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) cfs_rq_clock_pelt() argument
162 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) update_cfs_rq_load_avg() argument
[all...]
H A Dpelt.c212 * se has been already dequeued but cfs_rq->curr still points to it. in ___update_load_sum()
214 * but also for a cfs_rq if the latter becomes idle. As an example, in ___update_load_sum()
254 * removed from a cfs_rq and we need to update sched_avg. Scheduler entities
257 * the period_contrib of cfs_rq when updating the sched_avg of a sched_entity
310 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) in __update_load_avg_se() argument
313 cfs_rq->curr == se)) { in __update_load_avg_se()
324 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq) in __update_load_avg_cfs_rq() argument
326 if (___update_load_sum(now, &cfs_rq in __update_load_avg_cfs_rq()
[all...]
H A Ddebug.c555 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) in print_cfs_rq() argument
565 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu); in print_cfs_rq()
568 SEQ_printf(m, "cfs_rq[%d]:\n", cpu); in print_cfs_rq()
571 SPLIT_NS(cfs_rq->exec_clock)); in print_cfs_rq()
574 if (rb_first_cached(&cfs_rq->tasks_timeline)) in print_cfs_rq()
575 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; in print_cfs_rq()
576 last = __pick_last_entity(cfs_rq); in print_cfs_rq()
579 min_vruntime = cfs_rq in print_cfs_rq()
[all...]
H A Dsched.h440 struct cfs_rq;
475 struct cfs_rq **cfs_rq; member
534 * A weight of a cfs_rq is the sum of weights of which entities
535 * are queued on this cfs_rq, so a weight of a entity should not be
566 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
573 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
599 struct cfs_rq *pre
613 struct cfs_rq { global() struct
1194 rq_of(struct cfs_rq *cfs_rq) rq_of() argument
1201 rq_of(struct cfs_rq *cfs_rq) rq_of() argument
[all...]
H A Dcore.c3242 p->se.cfs_rq = NULL; in __sched_fork()
4124 struct sched_entity *curr = (&p->se)->cfs_rq->curr; in prefetch_curr_exec_start()
7680 root_task_group.cfs_rq = (struct cfs_rq **)ptr; in sched_init()
8141 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
8582 * Prevent race between setting of cfs_rq->runtime_enabled and in tg_set_cfs_bandwidth()
8612 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth() local
8613 struct rq *rq = cfs_rq in tg_set_cfs_bandwidth()
[all...]
/kernel/linux/linux-5.10/include/linux/
H A Dsched.h46 struct cfs_rq;
399 * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
440 * For cfs_rq, they are the aggregated values of all runnable and blocked
455 * with the highest load (=88761), always runnable on a single cfs_rq,
534 struct cfs_rq *cfs_rq; member
536 struct cfs_rq *my_q;
2235 const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
2236 char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_r
[all...]
/kernel/linux/linux-5.10/include/trace/events/
H A Dsched.h774 TP_PROTO(struct cfs_rq *cfs_rq),
775 TP_ARGS(cfs_rq));
806 TP_PROTO(struct cfs_rq *cfs_rq),
807 TP_ARGS(cfs_rq));
/kernel/linux/linux-6.6/include/trace/events/
H A Dsched.h859 TP_PROTO(struct cfs_rq *cfs_rq),
860 TP_ARGS(cfs_rq));
891 TP_PROTO(struct cfs_rq *cfs_rq),
892 TP_ARGS(cfs_rq));
/kernel/linux/linux-6.6/include/linux/
H A Dsched.h49 struct cfs_rq;
476 * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
517 * For cfs_rq, they are the aggregated values of all runnable and blocked
532 * with the highest load (=88761), always runnable on a single cfs_rq,
620 struct cfs_rq *cfs_rq; member
622 struct cfs_rq *my_q;

Completed in 69 milliseconds