Lines Matching refs:idle
1718 /* Find alternative idle CPU. */
1732 /* Failed to find an alternative idle CPU */
1939 /* Evaluate an idle CPU for a task numa move. */
1943 /* Nothing cached so current CPU went idle since the search. */
1949 * If the CPU is no longer truly idle and the previous best CPU
1962 * If a move to idle is allowed because there is capacity or load
2006 /* Use idle CPU if there is no imbalance */
2262 * completely idle or all activity is areas that are not of interest
3276 * conditions. In specific, the case where the group was idle and we start the
3286 * That is, the sum collapses because all other CPUs are idle; the UP scenario.
3401 * It will not get called when we go idle, because the idle
4182 * we cannot grant there is idle time in this CPU.
4871 cfs_b->idle = 0;
5144 /* Determine whether we need to wake up potentially idle CPU: */
5145 if (rq->curr == rq->idle && rq->cfs.nr_running) {
5198 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
5214 * idle depends on !throttled (for the case of a large deficit), and if
5217 if (cfs_b->idle && !throttled) {
5224 /* mark as potentially idle for the upcoming period */
5225 cfs_b->idle = 1;
5250 cfs_b->idle = 0;
5464 int idle = 0;
5474 idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
5504 if (idle) {
5509 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
5881 if (rq->curr == rq->idle)
6116 * cache-affine and is (or will be) idle.
6125 * If this_cpu is idle, it implies the wakeup is from interrupt
6130 * If the prev_cpu is idle and cache affine then avoid a migration.
6182 * idle.
6245 struct cpuidle_state *idle = idle_get_state(rq);
6246 if (idle && idle->exit_latency < min_exit_latency) {
6248 * We give priority to a CPU whose idle state
6250 * of any idle timestamp.
6252 min_exit_latency = idle->exit_latency;
6255 } else if ((!idle || idle->exit_latency == min_exit_latency) && rq->idle_stamp > latest_idle_timestamp) {
6257 * If equal or no active idle state, then
6360 * Scans the local SMT mask to see if the entire core is idle, and records this
6393 * Scan the entire LLC domain for idle cores; this dynamically switches off if
6394 * there are no idle cores left in the system; tracked through
6417 bool idle = true;
6422 idle = false;
6428 if (idle) {
6434 * Failed to find an idle core; stop looking for one.
6442 * Scan the local SMT mask for idle CPUs.
6483 * Scan the LLC domain for idle CPUs; this is dynamically regulated by
6485 * average idle time for this rq (as found in rq->avg_idle).
6545 * Scan the asym_capacity domain for idle CPUs; pick the first idle one on which
6546 * the task fits. If no CPU is big enough, but there are idle ones, try to
6594 * Try and locate an idle core/thread in the LLC cache domain.
6617 * If the previous CPU is cache affine and idle, don't be stupid:
6633 /* Check a recently used CPU as a potential idle candidate: */
6754 * WALT does not decay idle tasks in the same manner
6969 * small tasks on a CPU in order to let other CPUs go in deeper idle states,
6970 * but that could also hurt our chances to go cluster idle, and we have no
7110 * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set.
7410 /* Idle tasks are by definition preempted by non-idle tasks. */
7416 * Batch and idle tasks do not preempt non-idle tasks (their preemption
7447 * Also, during early boot the idle thread is in the fair class,
7450 if (unlikely(!se->on_rq || curr == rq->idle)) {
7468 goto idle;
7510 goto idle;
7582 idle:
7604 * rq is about to be idle, check if we need to update the
7773 * In order to avoid CPUs going idle while there's still work to do, new idle
7774 * balancing is more aggressive and has the newly idle CPU iterate up the domain
7777 * This adds some complexity to both (5) and (8) but it reduces the total idle
7867 enum cpu_idle_type idle;
7965 /* Leaving a core idle is often worse than degrading locality. */
7966 if (env->idle == CPU_IDLE) {
8029 if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED)) {
8073 schedstat_inc(env->sd->lb_hot_gained[env->idle]);
8125 * lb_gained[env->idle] is updated (other is detach_tasks)
8129 schedstat_inc(env->sd->lb_gained[env->idle]);
8171 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) {
8256 if (env->idle == CPU_NEWLY_IDLE) {
8288 schedstat_add(env->sd->lb_gained[env->idle], detached);
8478 * There can be a lot of idle CPU cgroups. Don't let fully
9007 /* Check if dst CPU is idle and preferred to this group */
9008 if (env->sd->flags & SD_ASYM_PACKING && env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
9122 * Select not overloaded group with lowest number of idle cpus
9125 * that the group has less spare capacity but finally more idle
9206 * idle_cpu_without - would a given CPU be idle without p ?
9210 * Return: 1 if the CPU would be idle. 0 otherwise.
9216 if (rq->curr != rq->idle && rq->curr != p) {
9329 /* Select group with most idle CPUs */
9492 * Select group with highest number of idle CPUs. We could also
9495 * idle CPUs which means more opportunity to run task.
9521 if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked)) {
9535 if (env->idle != CPU_NEWLY_IDLE || time_after_eq(jiffies, sg->sgc->next_update)) {
9595 * tasks that remain local when the source domain is almost idle.
9667 * local CPU is (newly) idle. There is at least one
9671 if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) {
9691 * idle cpus.
9760 * nr_idle : dst_cpu is not busy and the number of idle CPUs is quite
9871 if (env->idle == CPU_NOT_IDLE) {
9875 * busy, let another idle CPU try to pull task.
9884 * group wrt idle CPUs, it is balanced. The imbalance
10055 return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
10068 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
10073 if ((env->idle != CPU_NOT_IDLE) && (env->src_rq->cfs.h_nr_running == 1)) {
10125 * In the newly idle case, we will allow all the CPUs
10126 * to do the newly idle load balance.
10128 if (env->idle == CPU_NEWLY_IDLE) {
10132 /* Try to find first idle CPU */
10139 /* Are we the first idle CPU? */
10151 static int load_balance(int this_cpu, struct rq *this_rq, struct sched_domain *sd, enum cpu_idle_type idle,
10166 .idle = idle,
10175 schedstat_inc(sd->lb_count[idle]);
10185 schedstat_inc(sd->lb_nobusyg[idle]);
10191 schedstat_inc(sd->lb_nobusyq[idle]);
10197 schedstat_add(sd->lb_imbalance[idle], env.imbalance);
10255 * nohz-idle), we now have balance_cpu in a position to move
10313 schedstat_inc(sd->lb_failed[idle]);
10320 if (idle != CPU_NEWLY_IDLE) {
10401 schedstat_inc(sd->lb_balanced[idle]);
10414 if (env.idle == CPU_NEWLY_IDLE) {
10456 /* used by idle balance, so cpu_busy = 0 */
10467 * running tasks off the busiest CPU onto idle CPUs. It requires at
10522 .idle = CPU_IDLE,
10552 .idle = CPU_IDLE,
10632 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
10636 int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
10681 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
10684 * env->dst_cpu, so we can't know our idle
10687 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
10688 busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
10721 * If this CPU has been elected to perform the nohz idle
10722 * balance. Other idle CPUs have already rebalanced with
10724 * updated accordingly. This CPU is now running the idle load
10728 if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance)) {
10742 * idle load balancing details
10743 * - When one of the busy CPUs notice that there may be an idle rebalancing
10744 * needed, they will kick the idle load balancer, which then does idle
10745 * load balancing for all the idle CPUs.
10770 * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one).
10800 * is idle. And the softirq performing nohz idle load balance
10807 * Current decision point for kicking the idle load balancer in the presence
10808 * of idle CPUs in the system.
10824 * We may be recently in ticked or tickless idle mode. At the first
10825 * busy tick after returning from idle, we will update the busy stats.
10830 * None are in tickless mode and hence no need for NOHZ idle load
10877 * currently idle; in which case, kick the ILB to move tasks
10917 * other CPUs are idle). We can't really know from here how busy
10983 * This routine will record that the CPU is going idle with tick stopped.
10984 * This info will be used in performing idle load balancing in the future.
10994 * A CPU can be paused while it is idle with it's tick
10998 * the nohz idle balance, which should be avoided.
11000 * When the paused CPU exits idle and enters again,
11007 /* Spare idle load balancing on CPUs that don't want to be disturbed: */
11050 * Each time a cpu enter idle, we assume that it has blocked load and
11051 * enable the periodic update of the load of idle cpus
11057 * Internal function that runs load balance for all idle cpus. The load balance
11061 * through all idle CPUs.
11063 static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, enum cpu_idle_type idle)
11079 * We assume there will be no idle load after this update and clear
11080 * the has_blocked flag. If a cpu enters idle in the mean time, it will
11081 * set the has_blocked flag and trig another update of idle load.
11082 * Because a cpu that becomes idle, is added to idle_cpus_mask before
11084 * check the load of an idle cpu.
11151 /* Newly idle CPU doesn't need an update */
11152 if (idle != CPU_NEWLY_IDLE) {
11163 /* The full idle balance loop has been done */
11176 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
11179 static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
11189 if (idle != CPU_IDLE) {
11193 _nohz_idle_balance(this_rq, flags, idle);
11215 /* Don't need to update blocked load of idle CPUs */
11222 * This CPU is going to be idle and blocked load of idle CPUs
11224 * candidate for ilb instead of waking up another idle CPU.
11238 static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
11250 * idle. Attempts to pull tasks from other CPUs.
11272 * measure the duration of idle_balance() as idle time.
11352 * have been enqueued in the meantime. Since we're not going idle,
11380 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
11385 enum cpu_idle_type idle = this_rq->idle_balance ? CPU_IDLE : CPU_NOT_IDLE;
11398 * balancing on behalf of the other idle CPUs whose ticks are
11400 * give the idle CPUs a chance to load balance. Else we may
11404 if (nohz_idle_balance(this_rq, idle)) {
11410 rebalance_domains(this_rq, idle);
12054 * idle runqueue: