Lines Matching defs:prev
475 * to the prev element but it will point to rq->leaf_cfs_rq_list
479 rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
4074 struct list_head *prev;
4077 prev = cfs_rq->leaf_cfs_rq_list.prev;
4081 prev = rq->tmp_alone_branch;
4084 prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list);
4139 struct cfs_rq *prev, struct cfs_rq *next)
4154 if (!(se->avg.last_update_time && prev))
4157 p_last_update_time = cfs_rq_last_update_time(prev);
5509 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
5515 if (prev->on_rq)
5521 if (prev->on_rq) {
5522 update_stats_wait_start_fair(cfs_rq, prev);
5524 __enqueue_entity(cfs_rq, prev);
5526 update_load_avg(cfs_rq, prev, 0);
7610 static int select_idle_sibling(struct task_struct *p, int prev, int target)
7640 if (prev != target && cpus_share_cache(prev, target) &&
7641 ((available_idle_cpu(prev) || sched_idle_cpu(prev)) &&
7642 !cpu_isolated(target) && asym_fits_cpu(task_util, util_min, util_max, prev)))
7643 return prev;
7655 prev == smp_processor_id() &&
7657 asym_fits_cpu(task_util, util_min, util_max, prev)) {
7658 return prev;
7663 p->recent_used_cpu = prev;
7664 if (recent_used_cpu != prev &&
7700 if (!has_idle_core && cpus_share_cache(prev, target)) {
7701 i = select_idle_smt(p, sd, prev);
8373 balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
8597 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
8609 if (!prev || prev->sched_class != &fair_sched_class)
8662 if (prev != p) {
8663 struct sched_entity *pse = &prev->se;
8686 if (prev)
8687 put_prev_task(rq, prev);
8749 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
8751 struct sched_entity *se = &prev->se;