Lines Matching refs:idle

545 	return tg->idle > 0;
550 return cfs_rq->idle > 0;
2161 /* Find alternative idle CPU. */
2174 /* Failed to find an alternative idle CPU */
2383 /* Evaluate an idle CPU for a task numa move. */
2387 /* Nothing cached so current CPU went idle since the search. */
2392 * If the CPU is no longer truly idle and the previous best CPU
2406 * If a move to idle is allowed because there is capacity or load
2453 /* Use idle CPU if there is no imbalance */
2696 * completely idle or all activity is in areas that are not of interest
3903 * conditions. In specific, the case where the group was idle and we start the
3913 * That is, the sum collapses because all other CPUs are idle; the UP scenario.
4020 * It will not get called when we go idle, because the idle
4458 * time. Hence, limiting to the case where the source CPU is idle and
4940 * we cannot grant there is idle time in this CPU.
5660 cfs_b->idle = 0;
5961 /* Determine whether we need to wake up potentially idle CPU: */
5962 if (rq->curr == rq->idle && rq->cfs.nr_running)
6116 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
6130 /* Refill extra burst quota even if cfs_b->idle */
6134 * idle depends on !throttled (for the case of a large deficit), and if
6137 if (cfs_b->idle && !throttled)
6141 /* mark as potentially idle for the upcoming period */
6142 cfs_b->idle = 1;
6165 cfs_b->idle = 0;
6364 int idle = 0;
6373 idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
6406 if (idle)
6410 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
6749 * When waking from idle, we don't need to check to preempt at wakeup
6750 * the idle thread and don't set next buddy as a candidate for being
6752 * In case of simultaneous wakeup from idle, the latency sensitive tasks
6854 if (rq->curr == rq->idle)
6955 int needs_update; /* Newly idle CPUs need their next_balance collated */
7079 * cache-affine and is (or will be) idle.
7089 * If this_cpu is idle, it implies the wakeup is from interrupt
7094 * If the prev_cpu is idle and cache affine then avoid a migration.
7147 * idle.
7209 struct cpuidle_state *idle = idle_get_state(rq);
7210 if (idle && idle->exit_latency < min_exit_latency) {
7212 * We give priority to a CPU whose idle state
7214 * of any idle timestamp.
7216 min_exit_latency = idle->exit_latency;
7219 } else if ((!idle || idle->exit_latency == min_exit_latency) &&
7222 * If equal or no active idle state, then
7328 * Scans the local SMT mask to see if the entire core is idle, and records this
7357 * Scan the entire LLC domain for idle cores; this dynamically switches off if
7358 * there are no idle cores left in the system; tracked through
7367 bool idle = true;
7372 idle = false;
7386 if (idle)
7394 * Scan the local SMT mask for idle CPUs.
7442 * Scan the LLC domain for idle CPUs; this is dynamically regulated by
7444 * average idle time for this rq (as found in rq->avg_idle).
7467 * If we're busy, the assumption that the last idle period
7469 * predicted idle time.
7495 /* overloaded LLC is unlikely to have idle cpu/core */
7526 * idle time.
7537 * Scan the asym_capacity domain for idle CPUs; pick the first idle one on which
7538 * the task fits. If no CPU is big enough, but there are idle ones, try to
7608 * Try and locate an idle core/thread in the LLC cache domain.
7638 * If the previous CPU is cache affine and idle, don't be stupid:
7661 /* Check a recently used CPU as a potential idle candidate: */
7858 * WALT does not decay idle tasks in the same manner
8041 * small tasks on a CPU in order to let other CPUs go in deeper idle states,
8042 * but that could also hurt our chances to go cluster idle, and we have no
8256 * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set.
8351 * updated in a while, a substantial idle time will be missed,
8521 /* Idle tasks are by definition preempted by non-idle tasks. */
8527 * Batch and idle tasks do not preempt non-idle tasks (their preemption
8540 * Preempt an idle group in favor of a non-idle group (and don't preempt
8606 goto idle;
8645 goto idle;
8715 idle:
8733 * rq is about to be idle, check if we need to update the
8896 * In order to avoid CPUs going idle while there's still work to do, new idle
8897 * balancing is more aggressive and has the newly idle CPU iterate up the domain
8900 * This adds some complexity to both (5) and (8) but it reduces the total idle
8953 * a task on SMT with busy sibling to another CPU on idle core.
9000 enum cpu_idle_type idle;
9096 /* Leaving a core idle is often worse than degrading locality. */
9097 if (env->idle == CPU_IDLE)
9161 if (env->idle == CPU_NEWLY_IDLE ||
9209 schedstat_inc(env->sd->lb_hot_gained[env->idle]);
9259 * lb_gained[env->idle] is updated (other is detach_tasks)
9263 schedstat_inc(env->sd->lb_gained[env->idle]);
9310 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
9394 if (env->idle == CPU_NEWLY_IDLE)
9424 schedstat_add(env->sd->lb_gained[env->idle], detached);
9583 * There can be a lot of idle CPU cgroups. Don't let fully
9999 * balancing load between cores, it is not sufficient that @cpu is idle. Only
10000 * use CPU priority if the whole core is idle.
10023 * can do asym_packing balance only if all its SMT siblings are idle. Also, it
10037 /* Ensure that the whole local core is idle, if applicable. */
10067 if (env->idle == CPU_NOT_IDLE)
10091 if (env->idle == CPU_NOT_IDLE || !busiest->sum_nr_running)
10193 } else if ((env->idle != CPU_NOT_IDLE) &&
10214 /* Check if dst CPU is idle and preferred to this group */
10216 env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
10350 * and make the core idle.
10361 * Select not overloaded group with lowest number of idle cpus
10364 * that the group has less spare capacity but finally more idle
10440 * idle_cpu_without - would a given CPU be idle without p ?
10444 * Return: 1 if the CPU would be idle. 0 otherwise.
10450 if (rq->curr != rq->idle && rq->curr != p)
10567 /* Select group with most idle CPUs */
10750 * Select group with highest number of idle CPUs. We could also
10753 * idle CPUs which means more opportunity to run task.
10777 if (!sched_feat(SIS_UTIL) || env->idle == CPU_NEWLY_IDLE)
10857 if (env->idle != CPU_NEWLY_IDLE ||
10993 * local CPU is (newly) idle. There is at least one
10997 if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) {
11016 * idle cpus.
11107 * nr_idle : dst_cpu is not busy and the number of idle CPUs is quite
11212 if (env->idle == CPU_NOT_IDLE) {
11216 * busy, let another idle CPU try to pull task.
11232 * group wrt idle CPUs, it is balanced. The imbalance
11420 * whole core is idle.
11426 return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
11460 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
11465 if ((env->idle != CPU_NOT_IDLE) &&
11504 * In the newly idle case, we will allow all the CPUs
11505 * to do the newly idle load balance.
11510 if (env->idle == CPU_NEWLY_IDLE) {
11517 /* Try to find first idle CPU */
11523 * Don't balance to idle SMT in busy core right away when
11524 * balancing cores, but remember the first idle SMT CPU for
11525 * later consideration. Find CPU on an idle core first.
11531 * If the core is not idle, and first SMT sibling which is
11532 * idle has been found, then its not needed to check other
11542 * Are we the first idle core in a non-SMT domain or higher,
11543 * or the first idle CPU in a SMT domain?
11548 /* Are we the first idle CPU with busy siblings? */
11561 struct sched_domain *sd, enum cpu_idle_type idle,
11575 .idle = idle,
11584 schedstat_inc(sd->lb_count[idle]);
11594 schedstat_inc(sd->lb_nobusyg[idle]);
11600 schedstat_inc(sd->lb_nobusyq[idle]);
11606 schedstat_add(sd->lb_imbalance[idle], env.imbalance);
11667 * nohz-idle), we now have balance_cpu in a position to move
11725 schedstat_inc(sd->lb_failed[idle]);
11732 if (idle != CPU_NEWLY_IDLE)
11804 schedstat_inc(sd->lb_balanced[idle]);
11817 if (env.idle == CPU_NEWLY_IDLE)
11858 /* used by idle balance, so cpu_busy = 0 */
11868 * running tasks off the busiest CPU onto idle CPUs. It requires at
11920 .idle = CPU_IDLE,
11950 .idle = CPU_IDLE,
12045 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
12049 int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
12087 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
12090 * env->dst_cpu, so we can't know our idle
12093 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
12094 busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
12134 * idle load balancing details
12135 * - When one of the busy CPUs notice that there may be an idle rebalancing
12136 * needed, they will kick the idle load balancer, which then does idle
12137 * load balancing for all the idle CPUs.
12166 * idle CPU in the HK_TYPE_MISC housekeeping set (if there is one).
12194 * is idle. And the softirq performing nohz idle load balance
12201 * Current decision point for kicking the idle load balancer in the presence
12202 * of idle CPUs in the system.
12217 * We may be recently in ticked or tickless idle mode. At the first
12218 * busy tick after returning from idle, we will update the busy stats.
12223 * None are in tickless mode and hence no need for NOHZ idle load
12267 * currently idle; in which case, kick the ILB to move tasks
12271 * preferred CPU must be idle.
12310 * other CPUs are idle). We can't really know from here how busy
12377 * This routine will record that the CPU is going idle with tick stopped.
12378 * This info will be used in performing idle load balancing in the future.
12390 /* Spare idle load balancing on CPUs that don't want to be disturbed: */
12431 * Each time a cpu enter idle, we assume that it has blocked load and
12432 * enable the periodic update of the load of idle cpus
12456 * Internal function that runs load balance for all idle cpus. The load balance
12475 * We assume there will be no idle load after this update and clear
12476 * the has_blocked flag. If a cpu enters idle in the mean time, it will
12477 * set the has_blocked flag and trigger another update of idle load.
12478 * Because a cpu that becomes idle, is added to idle_cpus_mask before
12480 * check the load of an idle cpu.
12503 * chance for other idle cpu to pull load.
12567 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
12570 static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
12579 if (idle != CPU_IDLE)
12589 * idle state.
12620 /* Don't need to update blocked load of idle CPUs*/
12627 * before entering idle state.
12635 static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
12645 * idle. Attempts to pull tasks from other CPUs.
12667 * Return 0; the task will be enqueued when switching to idle.
12674 * measure the duration of idle_balance() as idle time.
12752 * have been enqueued in the meantime. Since we're not going idle,
12779 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
12784 enum cpu_idle_type idle = this_rq->idle_balance ?
12797 * balancing on behalf of the other idle CPUs whose ticks are
12799 * give the idle CPUs a chance to load balance. Else we may
12803 if (nohz_idle_balance(this_rq, idle))
12808 rebalance_domains(this_rq, idle);
12956 * if the sibling is forced idle, then trigger schedule to
12957 * give forced idle task a chance.
12960 * whole slice. But during force idle, we have siblings acting
12962 * tasks on this CPU and the forced idle CPU. Ideally, we should
12963 * go through the forced idle rq, but that would be a perf hit.
12964 * We can assume that the forced idle CPU has at least
13484 int sched_group_set_idle(struct task_group *tg, long idle)
13491 if (idle < 0 || idle > 1)
13496 if (tg->idle == idle) {
13501 tg->idle = idle;
13513 grp_cfs_rq->idle = idle;
13580 * idle runqueue: