Home
last modified time | relevance | path

Searched refs:cfs_rq (Results 1 - 7 of 7) sorted by relevance

/device/soc/rockchip/common/sdk_linux/kernel/sched/
H A Dfair.c32 static void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq);
33 static void walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p);
34 static void walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p);
35 static void walt_inc_throttled_cfs_rq_stats(struct walt_sched_stats *stats, struct cfs_rq *cfs_rq);
36 static void walt_dec_throttled_cfs_rq_stats(struct walt_sched_stats *stats, struct cfs_rq *cfs_rq);
38 walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq) walt_init_cfs_rq_stats() argument
41 walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) walt_inc_cfs_rq_stats() argument
44 walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) walt_dec_cfs_rq_stats() argument
326 cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) cfs_rq_tg_path() argument
341 list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_add_leaf_cfs_rq() argument
407 list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_del_leaf_cfs_rq() argument
511 cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) cfs_rq_tg_path() argument
518 list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_add_leaf_cfs_rq() argument
523 list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) list_del_leaf_cfs_rq() argument
575 update_min_vruntime(struct cfs_rq *cfs_rq) update_min_vruntime() argument
612 fair_enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) fair_enqueue_entity() argument
641 fair_dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) fair_dequeue_entity() argument
646 __pick_first_entity(struct cfs_rq *cfs_rq) __pick_first_entity() argument
669 __pick_last_entity(struct cfs_rq *cfs_rq) __pick_last_entity() argument
740 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) sched_slice() argument
779 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) sched_vslice() argument
841 struct cfs_rq *cfs_rq = cfs_rq_of(se); post_init_entity_util_avg() local
886 update_tg_load_avg(struct cfs_rq *cfs_rq) update_tg_load_avg() argument
894 update_curr(struct cfs_rq *cfs_rq) update_curr() argument
935 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_start() argument
952 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_end() argument
983 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_enqueue_sleeper() argument
1055 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_stats_enqueue() argument
1074 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_stats_dequeue() argument
1103 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_curr_start() argument
3107 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) account_entity_enqueue() argument
3121 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) account_entity_dequeue() argument
3185 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) enqueue_load_avg() argument
3191 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) dequeue_load_avg() argument
3197 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) enqueue_load_avg() argument
3200 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) dequeue_load_avg() argument
3205 reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight) reweight_entity() argument
3235 struct cfs_rq *cfs_rq = cfs_rq_of(se); reweight_task() local
3318 calc_group_shares(struct cfs_rq *cfs_rq) calc_group_shares() argument
3391 cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) cfs_rq_util_change() argument
3430 update_tg_load_avg(struct cfs_rq *cfs_rq) update_tg_load_avg() argument
3563 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) update_tg_cfs_util() argument
3588 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) update_tg_cfs_runnable() argument
3613 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) update_tg_cfs_load() argument
3673 add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) add_tg_cfs_propagate() argument
3682 struct cfs_rq *cfs_rq, *gcfs_rq; propagate_entity_load_avg() local
3743 update_tg_load_avg(struct cfs_rq *cfs_rq) update_tg_load_avg() argument
3752 add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) add_tg_cfs_propagate() argument
3774 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) update_cfs_rq_load_avg() argument
3842 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) attach_entity_load_avg() argument
3897 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) detach_entity_load_avg() argument
3926 update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_load_avg() argument
3962 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) cfs_rq_last_update_time() argument
3976 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) cfs_rq_last_update_time() argument
3988 struct cfs_rq *cfs_rq = cfs_rq_of(se); sync_entity_load_avg() local
4001 struct cfs_rq *cfs_rq = cfs_rq_of(se); remove_entity_load_avg() local
4020 cfs_rq_runnable_avg(struct cfs_rq *cfs_rq) cfs_rq_runnable_avg() argument
4025 cfs_rq_load_avg(struct cfs_rq *cfs_rq) cfs_rq_load_avg() argument
4079 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) util_est_enqueue() argument
4095 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) util_est_dequeue() argument
4126 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) util_est_update() argument
4278 update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) update_load_avg() argument
4287 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) attach_entity_load_avg() argument
4290 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) detach_entity_load_avg() argument
4299 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) util_est_enqueue() argument
4303 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) util_est_dequeue() argument
4307 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) util_est_update() argument
4316 check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) check_spread() argument
4331 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) place_entity() argument
4416 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) enqueue_entity() argument
4483 struct cfs_rq *cfs_rq = cfs_rq_of(se); fair_clear_buddies_last() local
4495 struct cfs_rq *cfs_rq = cfs_rq_of(se); fair_clear_buddies_next() local
4507 struct cfs_rq *cfs_rq = cfs_rq_of(se); fair_clear_buddies_skip() local
4516 clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) clear_buddies() argument
4533 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) dequeue_entity() argument
4590 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) check_preempt_tick() argument
4629 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) set_next_entity() argument
4668 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) pick_next_entity() argument
4723 put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) put_prev_entity() argument
4748 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) entity_tick() argument
4854 fair_assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b, struct cfs_rq *cfs_rq, u64 target_runtime) fair_assign_cfs_rq_runtime() argument
4881 assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) assign_cfs_rq_runtime() argument
4893 fair_account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) fair_account_cfs_rq_runtime() argument
4914 account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) account_cfs_rq_runtime() argument
4923 cfs_rq_throttled(struct cfs_rq *cfs_rq) cfs_rq_throttled() argument
4929 throttled_hierarchy(struct cfs_rq *cfs_rq) throttled_hierarchy() argument
4952 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; tg_unthrottle_up() local
4970 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; tg_throttle_down() local
4982 throttle_cfs_rq(struct cfs_rq *cfs_rq) throttle_cfs_rq() argument
5056 unthrottle_cfs_rq(struct cfs_rq *cfs_rq) unthrottle_cfs_rq() argument
5152 struct cfs_rq *cfs_rq; distribute_cfs_runtime() local
5310 fair_return_cfs_rq_runtime(struct cfs_rq *cfs_rq) fair_return_cfs_rq_runtime() argument
5334 return_cfs_rq_runtime(struct cfs_rq *cfs_rq) return_cfs_rq_runtime() argument
5386 check_enqueue_throttle(struct cfs_rq *cfs_rq) check_enqueue_throttle() argument
5411 struct cfs_rq *pcfs_rq, *cfs_rq; sync_throttle() local
5429 check_cfs_rq_runtime(struct cfs_rq *cfs_rq) check_cfs_rq_runtime() argument
5527 init_cfs_rq_runtime(struct cfs_rq *cfs_rq) init_cfs_rq_runtime() argument
5576 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; update_runtime_enabled() local
5595 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; unthrottle_offline_cfs_rqs() local
5626 account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) account_cfs_rq_runtime() argument
5629 check_cfs_rq_runtime(struct cfs_rq *cfs_rq) check_cfs_rq_runtime() argument
5633 check_enqueue_throttle(struct cfs_rq *cfs_rq) check_enqueue_throttle() argument
5639 return_cfs_rq_runtime(struct cfs_rq *cfs_rq) return_cfs_rq_runtime() argument
5643 cfs_rq_throttled(struct cfs_rq *cfs_rq) cfs_rq_throttled() argument
5648 throttled_hierarchy(struct cfs_rq *cfs_rq) throttled_hierarchy() argument
5663 init_cfs_rq_runtime(struct cfs_rq *cfs_rq) init_cfs_rq_runtime() argument
5692 struct cfs_rq *cfs_rq = cfs_rq_of(se); hrtick_start_fair() local
5794 struct cfs_rq *cfs_rq; enqueue_task_fair() local
5914 struct cfs_rq *cfs_rq; dequeue_task_fair() local
6020 struct cfs_rq *cfs_rq; cpu_load_without() local
6044 struct cfs_rq *cfs_rq; cpu_runnable_without() local
6713 struct cfs_rq *cfs_rq; cpu_util() local
6749 struct cfs_rq *cfs_rq; cpu_util_without() local
6855 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; cpu_util_next() local
7202 struct cfs_rq *cfs_rq = cfs_rq_of(se); migrate_task_rq_fair() local
7373 struct cfs_rq *cfs_rq = task_cfs_rq(curr); check_preempt_wakeup() local
7461 struct cfs_rq *cfs_rq = &rq->cfs; pick_next_task_fair() local
7623 struct cfs_rq *cfs_rq; put_prev_task_fair() local
7639 struct cfs_rq *cfs_rq = task_cfs_rq(curr); yield_task_fair() local
8343 cfs_rq_has_blocked(struct cfs_rq *cfs_rq) cfs_rq_has_blocked() argument
8388 cfs_rq_has_blocked(struct cfs_rq *cfs_rq) cfs_rq_has_blocked() argument
8429 cfs_rq_is_decayed(struct cfs_rq *cfs_rq) cfs_rq_is_decayed() argument
8452 struct cfs_rq *cfs_rq, *pos; fair_update_blocked_fair() local
8499 update_cfs_rq_h_load(struct cfs_rq *cfs_rq) update_cfs_rq_h_load() argument
8535 struct cfs_rq *cfs_rq = task_cfs_rq(p); task_h_load() local
8543 struct cfs_rq *cfs_rq = &rq->cfs; fair_update_blocked_fair() local
11546 struct cfs_rq *cfs_rq; task_tick_fair() local
11569 struct cfs_rq *cfs_rq; task_fork_fair() local
11662 struct cfs_rq *cfs_rq; propagate_entity_cfs_rq() local
11690 struct cfs_rq *cfs_rq = cfs_rq_of(se); detach_entity_cfs_rq() local
11701 struct cfs_rq *cfs_rq = cfs_rq_of(se); attach_entity_cfs_rq() local
11721 struct cfs_rq *cfs_rq = cfs_rq_of(se); detach_task_cfs_rq() local
11738 struct cfs_rq *cfs_rq = cfs_rq_of(se); attach_task_cfs_rq() local
11790 struct cfs_rq *cfs_rq = cfs_rq_of(se); set_next_task_fair() local
11798 init_cfs_rq(struct cfs_rq *cfs_rq) init_cfs_rq() argument
11867 struct cfs_rq *cfs_rq; alloc_fair_sched_group() local
11955 init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu, struct sched_entity *parent) init_tg_cfs_entry() argument
12119 struct cfs_rq *cfs_rq, *pos; print_cfs_stats() local
12122 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) print_cfs_rq(m, cpu, cfs_rq); print_cfs_stats() local
12169 walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq) walt_init_cfs_rq_stats() argument
12174 walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) walt_inc_cfs_rq_stats() argument
12179 walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) walt_dec_cfs_rq_stats() argument
12213 struct cfs_rq *cfs_rq; walt_fixup_sched_stats_fair() local
12245 sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq) sched_trace_cfs_rq_avg() argument
12255 sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len) sched_trace_cfs_rq_path() argument
12270 sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq) sched_trace_cfs_rq_cpu() argument
[all...]
H A Dpelt.c249 * se has been already dequeued but cfs_rq->curr still points to it. in ___update_load_sum()
251 * but also for a cfs_rq if the latter becomes idle. As an example, in ___update_load_sum()
293 * removed from a cfs_rq and we need to update sched_avg. Scheduler entities
296 * the period_contrib of cfs_rq when updating the sched_avg of a sched_entity
348 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) in __update_load_avg_se() argument
350 if (___update_load_sum(now, &se->avg, !!se->on_rq, se_runnable(se), cfs_rq->curr == se)) { in __update_load_avg_se()
360 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq) in __update_load_avg_cfs_rq() argument
362 if (___update_load_sum(now, &cfs_rq in __update_load_avg_cfs_rq()
[all...]
H A Dsched.h420 struct cfs_rq;
455 struct cfs_rq **cfs_rq; member
514 * A weight of a cfs_rq is the sum of weights of which entities
515 * are queued on this cfs_rq, so a weight of a entity should not be
545 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu,
551 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
574 extern void set_task_rq_fair(struct sched_entity *se, struct cfs_rq *pre
590 struct cfs_rq { global() struct
1173 rq_of(struct cfs_rq *cfs_rq) rq_of() argument
1180 rq_of(struct cfs_rq *cfs_rq) rq_of() argument
[all...]
H A Dcore.c3415 p->se.cfs_rq = NULL; in __sched_fork()
4323 struct sched_entity *curr = (&p->se)->cfs_rq->curr; in prefetch_curr_exec_start()
8074 root_task_group.cfs_rq = (struct cfs_rq **)ptr; in sched_init()
8545 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
9000 * Prevent race between setting of cfs_rq->runtime_enabled and in tg_set_cfs_bandwidth()
9034 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth() local
9035 struct rq *rq = cfs_rq in tg_set_cfs_bandwidth()
[all...]
/device/soc/rockchip/rk3588/kernel/include/trace/hooks/
H A Dsched.h183 TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial, u64 vruntime),
184 TP_ARGS(cfs_rq, se, initial, vruntime), 1);
250 struct cfs_rq;
252 TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *curr,
254 TP_ARGS(cfs_rq, curr, se), 1);
283 unsigned long delta_exec, struct cfs_rq *cfs_rq, struct sched_entity *curr,
285 TP_ARGS(p, ideal_runtime, skip_preempt, delta_exec, cfs_rq, cur
[all...]
/device/soc/rockchip/common/sdk_linux/include/linux/
H A Dsched.h46 struct cfs_rq;
391 * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
432 * For cfs_rq, they are the aggregated values of all runnable and blocked
447 * with the highest load (=88761), always runnable on a single cfs_rq,
526 struct cfs_rq *cfs_rq; member
528 struct cfs_rq *my_q;
2236 const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
2237 char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_r
[all...]
/device/soc/rockchip/common/sdk_linux/include/trace/events/
H A Dsched.h522 DECLARE_TRACE(pelt_cfs_tp, TP_PROTO(struct cfs_rq *cfs_rq), TP_ARGS(cfs_rq));
538 DECLARE_TRACE(sched_util_est_cfs_tp, TP_PROTO(struct cfs_rq *cfs_rq), TP_ARGS(cfs_rq));

Completed in 25 milliseconds