Lines Matching refs:local

159  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
2237 * increments. The more local the fault statistics are, the higher the scan
2238 * period will be for the next scan window. If local/(local+remote) ratio is
2240 * the scan period will decrease. Aim for 70% local accesses.
2247 * our memory is already on our local node, or if the majority of
2258 unsigned long local = p->numa_faults_locality[1];
2264 * migration then it implies we are migrating too quickly or the local
2267 if (local + shared == 0 || p->numa_faults_locality[0x2]) {
2282 lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
2287 * Most memory accesses are local. There is no need to
2288 * do fast NUMA scanning, since memory is already local.
2309 * yet they are not on the local NUMA node. Speed up
2735 int local = !!(flags & TNF_FAULT_LOCAL);
2777 * actively using should be counted as local. This allows the
2781 if (!priv && !local && ng && ng->active_nodes > 1 && numa_is_active_node(cpu_node, ng) &&
2783 local = 1;
2804 p->numa_faults_locality[local] += pages;
3173 * Remove and clamp on negative, from a local variable.
3176 * and is thus optimized for local variable updates.
6360 * Scans the local SMT mask to see if the entire core is idle, and records this
6442 * Scan the local SMT mask for idle CPUs.
7725 * - local vs global optima in the discrete case. ]
7797 * The big problem is S_k, its a global sum needed to compute a local (W_i)
7829 * SD_ASYM_PACKING only: One local CPU with higher capacity is available,
8609 struct sched_group *local; /* Local group in this sd */
8616 struct sg_lb_stats local_stat; /* Statistics of the local group */
8630 .local = NULL,
9055 (!group_smaller_max_cpu_capacity(sg, sds->local) || sds->local_stat.group_type != group_has_spare)) {
9144 (group_smaller_min_cpu_capacity(sds->local, sg))) {
9252 unsigned int local;
9257 local = task_running_on_cpu(i, p);
9258 sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
9260 nr_running = rq->nr_running - local;
9353 struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups;
9383 local = group;
9401 /* The local group has been skipped because of CPU affinity */
9402 if (!local) {
9407 * If the local group is idler than the selected idlest group
9415 * If the local group is busier than the selected idlest group
9427 * the local domain to be very lightly loaded relative to the
9431 * and consider staying local.
9439 * If the local group is less loaded than the selected
9458 if (local->sgc->max_capacity >= idlest->sgc->max_capacity) {
9516 struct sg_lb_stats *local = &sds->local_stat;
9532 sds->local = sg;
9533 sgs = local;
9595 * tasks that remain local when the source domain is almost idle.
9613 struct sg_lb_stats *local, *busiest;
9615 local = &sds->local_stat;
9648 * Try to use spare capacity of local group without overloading it or
9651 if (local->group_type == group_has_spare) {
9662 env->imbalance = max(local->group_capacity, local->group_util) - local->group_util;
9667 * local CPU is (newly) idle. There is at least one
9686 lsub_positive(&nr_diff, local->sum_nr_running);
9694 env->imbalance = max_t(long, 0, (local->idle_cpus - busiest->idle_cpus) >> 1);
9709 if (local->group_type < group_overloaded) {
9715 local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) / local->group_capacity;
9719 * If the local group is more loaded than the selected
9722 if (local->avg_load >= busiest->avg_load) {
9738 (sds->avg_load - local->avg_load) * local->group_capacity) /
9745 * Decision matrix according to the local and busiest group type:
9747 * busiest \ local has_spare fully_busy misfit asym imbalanced overloaded
9777 struct sg_lb_stats *local, *busiest;
9796 local = &sds.local_stat;
9824 * If the local group is busier than the selected busiest group
9827 if (local->group_type > busiest->group_type) {
9835 if (local->group_type == group_overloaded) {
9837 * If the local group is more loaded than the selected
9840 if (local->avg_load >= busiest->avg_load) {
9851 if (local->avg_load >= sds.avg_load) {
9859 if (FAIR_ONEHUNDRED * busiest->avg_load <= env->sd->imbalance_pct * local->avg_load) {
9865 if (sds.prefer_sibling && local->group_type == group_has_spare &&
9866 busiest->sum_nr_running > local->sum_nr_running + 1) {
9874 * result the local one too) but this CPU is already
9880 if (busiest->group_weight > 1 && local->idle_cpus <= (busiest->idle_cpus + 1)) {
10996 * from the local CPU, so it can't be called during
11401 * load balance only within the local sched_domain hierarchy
11540 * goes along full dynticks. Therefore no local assumption can be made