Lines Matching defs:curr
577 struct sched_entity *curr = cfs_rq->curr;
582 if (curr) {
583 if (curr->on_rq) {
584 vruntime = curr->vruntime;
586 curr = NULL;
594 if (!curr) {
896 struct sched_entity *curr = cfs_rq->curr;
900 if (unlikely(!curr)) {
904 delta_exec = now - curr->exec_start;
909 curr->exec_start = now;
911 schedstat_set(curr->statistics.exec_max, max(delta_exec, curr->statistics.exec_max));
913 curr->sum_exec_runtime += delta_exec;
916 curr->vruntime += calc_delta_fair(delta_exec, curr);
919 if (entity_is_task(curr)) {
920 struct task_struct *curtask = task_of(curr);
922 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
932 update_curr(cfs_rq_of(&rq->curr->se));
1065 if (se != cfs_rq->curr) {
1084 if (se != cfs_rq->curr) {
1815 cur = rcu_dereference(dst_rq->curr);
2605 tsk = READ_ONCE(cpu_rq(cpu)->curr);
3018 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
3020 struct callback_head *work = &curr->numa_work;
3026 if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) {
3036 now = curr->se.sum_exec_runtime;
3037 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
3039 if (now > curr->node_stamp + period) {
3040 if (!curr->node_stamp) {
3041 curr->numa_scan_period = task_scan_start(curr);
3043 curr->node_stamp += period;
3045 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
3046 task_work_add(curr, work, TWA_RESUME);
3089 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
3209 if (cfs_rq->curr == se) {
4419 bool curr = cfs_rq->curr == se;
4425 if (renorm && curr) {
4437 if (renorm && !curr) {
4461 if (!curr) {
4555 if (se != cfs_rq->curr) {
4590 static void check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
4596 ideal_runtime = sched_slice(cfs_rq, curr);
4597 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
4604 clear_buddies(cfs_rq, curr);
4618 delta = curr->vruntime - se->vruntime;
4644 cfs_rq->curr = se;
4659 static int wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
4668 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
4674 * If curr is set we have to see if its left of the leftmost entity
4677 if (!left || (curr && entity_before(curr, left))) {
4678 left = curr;
4690 if (se == curr) {
4694 if (!second || (curr && entity_before(curr, second))) {
4695 second = curr;
4745 cfs_rq->curr = NULL;
4748 static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
4758 update_load_avg(cfs_rq, curr, UPDATE_TG);
4759 update_cfs_group(curr);
4779 check_preempt_tick(cfs_rq, curr);
4909 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) {
5145 if (rq->curr == rq->idle && rq->cfs.nr_running) {
5393 if (!cfs_rq->runtime_enabled || cfs_rq->curr) {
5702 if (rq->curr == p) {
5718 struct task_struct *curr = rq->curr;
5720 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) {
5724 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) {
5725 hrtick_start_fair(rq, curr);
5881 if (rq->curr == rq->idle)
7263 static long wakeup_latency_gran(struct sched_entity *curr, struct sched_entity *se)
7267 if ((se->latency_weight > 0) || (curr->latency_weight > 0))
7268 latency_weight -= curr->latency_weight;
7285 * Since its curr running now, convert the gran from real-time
7288 * By using 'se' instead of 'curr' we penalize light tasks, so
7289 * they get preempted easier. That is, if 'se' < 'curr' then
7291 * lighter, if otoh 'se' > 'curr' then the resulting gran will
7301 * Should 'se' preempt 'curr'.
7314 static int wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
7316 s64 gran, vdiff = curr->vruntime - se->vruntime;
7319 vdiff += wakeup_latency_gran(curr, se);
7371 struct task_struct *curr = rq->curr;
7372 struct sched_entity *se = &curr->se, *pse = &p->se;
7373 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
7400 * Note: this also catches the edge-case of curr being in a throttled
7402 * enqueue of curr) will have resulted in resched being set. This
7406 if (test_tsk_need_resched(curr)) {
7411 if (unlikely(task_has_idle_policy(curr)) && likely(!task_has_idle_policy(p))) {
7450 if (unlikely(!se->on_rq || curr == rq->idle)) {
7485 struct sched_entity *curr = cfs_rq->curr;
7489 * have to consider cfs_rq->curr. If it is still a runnable
7493 if (curr) {
7494 if (curr->on_rq) {
7497 curr = NULL;
7517 se = pick_next_entity(cfs_rq, curr);
7638 struct task_struct *curr = rq->curr;
7639 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
7640 struct sched_entity *se = &curr->se;
7651 if (curr->policy != SCHED_BATCH) {
8412 curr_class = rq->curr->sched_class;
9216 if (rq->curr != rq->idle && rq->curr != p) {
10331 * if the curr task on busiest CPU can't be
10334 if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
11488 if (rq->curr->state != TASK_RUNNING || rq->curr->nr_cpus_allowed == 1) {
11541 * and everything must be accessed through the @rq and @curr passed in
11544 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
11547 struct sched_entity *se = &curr->se;
11555 task_tick_numa(rq, curr);
11558 update_misfit_status(curr, rq);
11559 update_overutilized_status(task_rq(curr));
11570 struct sched_entity *se = &p->se, *curr;
11578 curr = cfs_rq->curr;
11579 if (curr) {
11581 se->vruntime = curr->vruntime;
11585 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
11590 swap(curr->vruntime, se->vruntime);
11617 if (rq->curr == p) {
11762 if (rq->curr == p) {
11772 * This routine is mostly called to set cfs_rq->curr field when a task