Lines Matching defs:curr

740 	struct sched_entity *curr = cfs_rq->curr;
744 if (curr && curr->on_rq) {
745 unsigned long weight = scale_load_down(curr->load.weight);
747 avg += entity_key(cfs_rq, curr) * weight;
807 struct sched_entity *curr = cfs_rq->curr;
811 if (curr && curr->on_rq) {
812 unsigned long weight = scale_load_down(curr->load.weight);
814 avg += entity_key(cfs_rq, curr) * weight;
838 struct sched_entity *curr = cfs_rq->curr;
842 if (curr) {
843 if (curr->on_rq)
844 vruntime = curr->vruntime;
846 curr = NULL;
850 if (!curr)
945 struct sched_entity *curr = cfs_rq->curr;
949 if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
950 curr = NULL;
951 best = curr;
957 if (sched_feat(RUN_TO_PARITY) && curr && curr->vlag == curr->deadline)
958 return curr;
1221 struct sched_entity *curr = cfs_rq->curr;
1225 if (unlikely(!curr))
1228 delta_exec = now - curr->exec_start;
1232 curr->exec_start = now;
1237 stats = __schedstats_from_se(curr);
1242 curr->sum_exec_runtime += delta_exec;
1245 curr->vruntime += calc_delta_fair(delta_exec, curr);
1246 update_deadline(cfs_rq, curr);
1249 if (entity_is_task(curr)) {
1250 struct task_struct *curtask = task_of(curr);
1252 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
1262 update_curr(cfs_rq_of(&rq->curr->se));
1338 if (se != cfs_rq->curr)
1356 if (se != cfs_rq->curr)
2256 cur = rcu_dereference(dst_rq->curr);
3045 tsk = READ_ONCE(cpu_rq(cpu)->curr);
3511 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
3513 struct callback_head *work = &curr->numa_work;
3519 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
3528 now = curr->se.sum_exec_runtime;
3529 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
3531 if (now > curr->node_stamp + period) {
3532 if (!curr->node_stamp)
3533 curr->numa_scan_period = task_scan_start(curr);
3534 curr->node_stamp += period;
3536 if (!time_before(jiffies, curr->mm->numa_next_scan))
3537 task_work_add(curr, work, TWA_RESUME);
3576 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
3808 bool curr = cfs_rq->curr == se;
3812 if (curr)
3843 if (!curr)
4453 is_idle = is_idle_task(rcu_dereference(rq->curr));
5213 struct sched_entity *curr = cfs_rq->curr;
5271 if (curr && curr->on_rq)
5272 load += scale_load_down(curr->load.weight);
5304 bool curr = cfs_rq->curr == se;
5310 if (curr)
5337 if (!curr)
5348 if (!curr)
5418 if (se != cfs_rq->curr)
5464 cfs_rq->curr = se;
5485 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
5495 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
5528 cfs_rq->curr = NULL;
5532 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
5542 update_load_avg(cfs_rq, curr, UPDATE_TG);
5543 update_cfs_group(curr);
5696 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
5962 if (rq->curr == rq->idle && rq->cfs.nr_running)
6297 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
6683 struct task_struct *curr = rq->curr;
6685 if (!hrtick_enabled_fair(rq) || curr->sched_class != &fair_sched_class)
6688 hrtick_start_fair(rq, curr);
6854 if (rq->curr == rq->idle)
8383 static long wakeup_latency_gran(struct sched_entity *curr, struct sched_entity *se)
8394 if ((se->latency_weight > 0) || (curr->latency_weight > 0))
8395 latency_weight -= curr->latency_weight;
8420 * Since its curr running now, convert the gran from real-time
8423 * By using 'se' instead of 'curr' we penalize light tasks, so
8424 * they get preempted easier. That is, if 'se' < 'curr' then
8426 * lighter, if otoh 'se' > 'curr' then the resulting gran will
8436 * Should 'se' preempt 'curr'.
8450 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
8452 s64 gran, vdiff = curr->vruntime - se->vruntime;
8456 vdiff += wakeup_latency_gran(curr, se);
8485 struct task_struct *curr = rq->curr;
8486 struct sched_entity *se = &curr->se, *pse = &p->se;
8487 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
8512 * Note: this also catches the edge-case of curr being in a throttled
8514 * enqueue of curr) will have resulted in resched being set. This
8518 if (test_tsk_need_resched(curr))
8522 if (unlikely(task_has_idle_policy(curr)) &&
8575 struct sched_entity *curr = cfs_rq->curr;
8578 if (curr) {
8579 if (curr->on_rq)
8582 curr = NULL;
8588 se = pick_next_entity(cfs_rq, curr);
8621 struct sched_entity *curr = cfs_rq->curr;
8625 * have to consider cfs_rq->curr. If it is still a runnable
8629 if (curr) {
8630 if (curr->on_rq)
8633 curr = NULL;
8651 se = pick_next_entity(cfs_rq, curr);
8765 struct task_struct *curr = rq->curr;
8766 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
8767 struct sched_entity *se = &curr->se;
9537 curr_class = rq->curr->sched_class;
10450 if (rq->curr != rq->idle && rq->curr != p)
11742 * if the curr task on busiest CPU can't be
11745 if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
12886 if (rq->curr->__state != TASK_RUNNING ||
12887 rq->curr->nr_cpus_allowed == 1)
12949 static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
12969 __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
13062 static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {}
13070 * and everything must be accessed through the @rq and @curr passed in
13073 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
13076 struct sched_entity *se = &curr->se;
13084 task_tick_numa(rq, curr);
13086 update_misfit_status(curr, rq);
13087 update_overutilized_status(task_rq(curr));
13089 task_tick_core(rq, curr);
13099 struct sched_entity *se = &p->se, *curr;
13108 curr = cfs_rq->curr;
13109 if (curr)
13245 * This routine is mostly called to set cfs_rq->curr field when a task