Lines Matching defs:prev

416          * to the prev element but it will point to rq->leaf_cfs_rq_list
420 rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
3452 void set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next)
3468 if (!(se->avg.last_update_time && prev)) {
3478 p_last_update_time_copy = prev->load_last_update_time_copy;
3483 p_last_update_time = prev->avg.last_update_time;
3488 p_last_update_time = prev->avg.last_update_time;
4723 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
4729 if (prev->on_rq) {
4736 check_spread(cfs_rq, prev);
4738 if (prev->on_rq) {
4739 update_stats_wait_start(cfs_rq, prev);
4741 fair_enqueue_entity(cfs_rq, prev);
4743 update_load_avg(cfs_rq, prev, 0);
6596 static int select_idle_sibling(struct task_struct *p, int prev, int target)
6619 if (prev != target && cpus_share_cache(prev, target) &&
6620 ((available_idle_cpu(prev) || sched_idle_cpu(prev)) && !cpu_isolated(target) &&
6621 asym_fits_capacity(task_util, prev))) {
6622 return prev;
6627 prev == smp_processor_id() &&
6629 asym_fits_capacity(task_util, prev)) {
6630 return prev;
6635 if (recent_used_cpu != prev && recent_used_cpu != target && cpus_share_cache(recent_used_cpu, target) &&
6638 p->recent_used_cpu = prev;
7253 static int balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
7459 struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
7472 if (!prev || prev->sched_class != &fair_sched_class) {
7527 if (prev != p) {
7528 struct sched_entity *pse = &prev->se;
7551 if (prev) {
7552 put_prev_task(rq, prev);
7620 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
7622 struct sched_entity *se = &prev->se;