Lines Matching defs:flags

1055 static inline void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1069 if (flags & ENQUEUE_WAKEUP) {
1074 static inline void update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1088 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
1816 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur))) {
2570 static void task_numa_group(struct task_struct *p, int cpupid, int flags, int *priv)
2641 if (flags & TNF_SHARED) {
2696 unsigned long flags;
2704 spin_lock_irqsave(&grp->lock, flags);
2711 spin_unlock_irqrestore(&grp->lock, flags);
2730 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2733 bool migrated = flags & TNF_MIGRATED;
2735 int local = !!(flags & TNF_FAULT_LOCAL);
2769 if (!priv && !(flags & TNF_NO_GROUP)) {
2770 task_numa_group(p, last_cpupid, flags, &priv);
2798 if (flags & TNF_MIGRATE_FAIL) {
2847 if (p->flags & PF_EXITING) {
3026 if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) {
3060 if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING)) {
3391 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
3410 cpufreq_update_util(rq, flags);
3926 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3935 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) {
3942 if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
3955 if (flags & UPDATE_TG) {
4002 unsigned long flags;
4012 raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
4017 raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags);
4416 static void enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
4418 bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
4454 if (flags & ENQUEUE_WAKEUP) {
4459 update_stats_enqueue(cfs_rq, se, flags);
4533 static void dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
4551 update_stats_dequeue(cfs_rq, se, flags);
4567 if (!(flags & DEQUEUE_SLEEP)) {
4582 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) {
5201 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
5236 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5239 raw_spin_lock_irqsave(&cfs_b->lock, flags);
5354 unsigned long flags;
5357 raw_spin_lock_irqsave(&cfs_b->lock, flags);
5361 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5369 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5377 raw_spin_lock_irqsave(&cfs_b->lock, flags);
5378 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5462 unsigned long flags;
5467 raw_spin_lock_irqsave(&cfs_b->lock, flags);
5474 idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
5507 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5792 static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
5797 int task_new = !(flags & ENQUEUE_WAKEUP);
5821 enqueue_entity(cfs_rq, se, flags);
5832 flags = ENQUEUE_WAKEUP;
5912 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
5916 int task_sleep = flags & DEQUEUE_SLEEP;
5924 dequeue_entity(cfs_rq, se, flags);
5948 flags |= DEQUEUE_SLEEP;
6297 if (!(sd->flags & sd_flag)) {
6324 if (tmp->flags & sd_flag) {
7122 int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
7152 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
7157 sd = NULL; /* Prefer wake_affine over balance flags */
7161 if (tmp->flags & sd_flag) {
7872 unsigned int flags;
7901 if (env->sd->flags & SD_SHARE_CPUCAPACITY) {
7941 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) {
8019 env->flags |= LBF_SOME_PINNED;
8029 if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED)) {
8037 env->flags |= LBF_DST_PINNED;
8047 env->flags &= ~LBF_ALL_PINNED;
8050 if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS && !preferred_cluster(cpu_rq(env->dst_cpu)->cluster, p)) {
8161 env->flags |= LBF_IGNORE_PREFERRED_CLUSTER_TASKS;
8186 env->flags |= LBF_NEED_BREAK;
8275 if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS && !detached) {
8277 env->flags &= ~LBF_IGNORE_PREFERRED_CLUSTER_TASKS;
8710 if (child->flags & SD_OVERLAP) {
8954 if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false)) {
8955 env->flags |= LBF_NOHZ_AGAIN;
8992 if (env->sd->flags & SD_ASYM_CPUCAPACITY && sgs->group_misfit_task_load < rq->misfit_task_load) {
9008 if (env->sd->flags & SD_ASYM_PACKING && env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
9143 if ((env->sd->flags & SD_ASYM_CPUCAPACITY) && (sgs->group_type <= group_fully_busy) &&
9272 if (sd->flags & SD_ASYM_CPUCAPACITY && !task_fits_capacity(p, group->sgc->max_capacity)) {
9434 if ((sd->flags & SD_NUMA) && ((idlest_sgs.avg_load + imbalance) >= local_sgs.avg_load)) {
9464 if (sd->flags & SD_NUMA) {
9522 env->flags |= LBF_NOHZ_STATS;
9560 sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
9563 if ((env->flags & LBF_NOHZ_AGAIN) && cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd))) {
9568 if (env->sd->flags & SD_NUMA) {
9652 if ((busiest->group_type > group_fully_busy) && !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) {
9698 if (env->sd->flags & SD_NUMA) {
9965 if (env->sd->flags & SD_ASYM_CPUCAPACITY && capacity_of(env->dst_cpu) < capacity && nr_running == 1) {
10055 return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
10210 env.flags |= LBF_ALL_PINNED;
10238 local_irq_restore(rf.flags);
10240 if (env.flags & LBF_NEED_BREAK) {
10241 env.flags &= ~LBF_NEED_BREAK;
10264 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
10270 env.flags &= ~LBF_DST_PINNED;
10287 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
10293 if (unlikely(env.flags & LBF_ALL_PINNED)) {
10325 unsigned long flags;
10327 raw_spin_lock_irqsave(&busiest->lock, flags);
10335 raw_spin_unlock_irqrestore(&busiest->lock, flags);
10336 env.flags |= LBF_ALL_PINNED;
10350 raw_spin_unlock_irqrestore(&busiest->lock, flags);
10387 if (sd_parent && !(env.flags & LBF_ALL_PINNED)) {
10419 if ((env.flags & LBF_ALL_PINNED && sd->balance_interval < MAX_PINNED_INTERVAL) ||
10523 .flags = 0,
10559 .flags = LBF_DST_PINNED,
10673 need_serialize = sd->flags & SD_SERIALIZE;
10772 static void kick_ilb(unsigned int flags)
10780 if (flags & NOHZ_BALANCE_KICK) {
10793 flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu));
10794 if (flags & NOHZ_KICK_MASK) {
10816 unsigned int flags = 0;
10846 flags = NOHZ_STATS_KICK;
10854 flags = NOHZ_KICK_MASK;
10868 flags = NOHZ_KICK_MASK;
10883 flags = NOHZ_KICK_MASK;
10896 flags = NOHZ_KICK_MASK;
10923 flags = NOHZ_KICK_MASK;
10930 if (flags) {
10931 kick_ilb(flags);
11059 * tasks movement depending of flags.
11063 static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, enum cpu_idle_type idle)
11076 SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK);
11131 if (flags & NOHZ_BALANCE_KICK) {
11157 if (flags & NOHZ_BALANCE_KICK) {
11181 unsigned int flags = this_rq->nohz_idle_balance;
11183 if (!flags) {
11193 _nohz_idle_balance(this_rq, flags, idle);
11318 if (sd->flags & SD_BALANCE_NEWIDLE) {
11450 unsigned long flags;
11458 raw_spin_lock_irqsave(&rq->lock, flags);
11466 raw_spin_unlock_irqrestore(&rq->lock, flags);
11632 * the dequeue_entity(.flags=0) will already have normalized the
11929 unsigned long flags;
11949 raw_spin_lock_irqsave(&rq->lock, flags);
11951 raw_spin_unlock_irqrestore(&rq->lock, flags);