Lines Matching defs:lock

1133     spinlock_t lock; /* nr_tasks, tasks */
1157 p == current || (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu)));
2475 group_lock = &ng->lock;
2589 spin_lock_init(&grp->lock);
2659 double_lock_irq(&my_grp->lock, &grp->lock);
2671 spin_unlock(&my_grp->lock);
2672 spin_unlock_irq(&grp->lock);
2704 spin_lock_irqsave(&grp->lock, flags);
2711 spin_unlock_irqrestore(&grp->lock, flags);
3450 * including the state of rq->lock, should be made.
3784 raw_spin_lock(&cfs_rq->removed.lock);
3789 raw_spin_unlock(&cfs_rq->removed.lock);
4012 raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
4017 raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags);
4837 * around rq->lock.
4839 * requires cfs_b->lock
4858 lockdep_assert_held(&cfs_b->lock);
4886 raw_spin_lock(&cfs_b->lock);
4888 raw_spin_unlock(&cfs_b->lock);
4989 raw_spin_lock(&cfs_b->lock);
5004 raw_spin_unlock(&cfs_b->lock);
5049 * throttled-list. rq->lock protects completion.
5070 raw_spin_lock(&cfs_b->lock);
5073 raw_spin_unlock(&cfs_b->lock);
5169 raw_spin_lock(&cfs_b->lock);
5176 raw_spin_unlock(&cfs_b->lock);
5233 * This check is repeated as we release cfs_b->lock while we unthrottle.
5236 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5237 /* we can't nest cfs_b->lock while distributing bandwidth */
5239 raw_spin_lock_irqsave(&cfs_b->lock, flags);
5268 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
5319 raw_spin_lock(&cfs_b->lock);
5323 /* we are under rq->lock, defer unthrottling using a timer */
5328 raw_spin_unlock(&cfs_b->lock);
5357 raw_spin_lock_irqsave(&cfs_b->lock, flags);
5361 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5369 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5377 raw_spin_lock_irqsave(&cfs_b->lock, flags);
5378 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5467 raw_spin_lock_irqsave(&cfs_b->lock, flags);
5507 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5514 raw_spin_lock_init(&cfs_b->lock);
5536 lockdep_assert_held(&cfs_b->lock);
5570 lockdep_assert_held(&rq->lock);
5578 raw_spin_lock(&cfs_b->lock);
5580 raw_spin_unlock(&cfs_b->lock);
5590 lockdep_assert_held(&rq->lock);
7190 * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
7223 * rq->lock and can modify state directly.
7225 lockdep_assert_held(&task_rq(p)->lock);
7445 * point, either of which can * drop the rq lock.
7589 * Because newidle_balance() releases (and re-acquires) rq->lock, it is
7890 lockdep_assert_held(&env->src_rq->lock);
7996 lockdep_assert_held(&env->src_rq->lock);
8088 lockdep_assert_held(&env->src_rq->lock);
8113 lockdep_assert_held(&env->src_rq->lock);
8153 lockdep_assert_held(&env->src_rq->lock);
8298 lockdep_assert_held(&rq->lock);
10226 * unlock busiest->lock, and we are able to be sure
10327 raw_spin_lock_irqsave(&busiest->lock, flags);
10335 raw_spin_unlock_irqrestore(&busiest->lock, flags);
10350 raw_spin_unlock_irqrestore(&busiest->lock, flags);
11013 * Can be set safely without rq->lock held
11015 * rq->lock is held during the check and the clear
11220 raw_spin_unlock(&this_rq->lock);
11230 raw_spin_lock(&this_rq->lock);
11253 * < 0 - we released the lock and there are !fair tasks present
11304 raw_spin_unlock(&this_rq->lock);
11343 raw_spin_lock(&this_rq->lock);
11351 * While browsing the domains, we released the rq lock, a task could
11458 raw_spin_lock_irqsave(&rq->lock, flags);
11466 raw_spin_unlock_irqrestore(&rq->lock, flags);
11806 raw_spin_lock_init(&cfs_rq->removed.lock);
11949 raw_spin_lock_irqsave(&rq->lock, flags);
11951 raw_spin_unlock_irqrestore(&rq->lock, flags);