Lines Matching defs:lock
324 /* nests inside the rq lock: */
364 raw_spinlock_t lock;
427 raw_spinlock_t lock;
626 raw_spinlock_t lock ____cacheline_aligned;
721 /* Nests inside the rq lock: */
894 /* These atomics are updated outside of a lock */
975 * Locking rule: those places that want to lock multiple runqueues
976 * (such as the load balancing or the thread migration code), lock
980 /* runqueue lock: */
981 raw_spinlock_t lock;
1029 * it on another CPU. Always updated under the runqueue lock:
1165 /* Must be inspected within a rcu lock section */
1237 * made to update_rq_clock() since the last time rq::lock was pinned.
1264 lockdep_assert_held(&rq->lock);
1272 lockdep_assert_held(&rq->lock);
1298 lockdep_assert_held(&rq->lock);
1308 lockdep_assert_held(&rq->lock);
1337 rf->cookie = lockdep_pin_lock(&rq->lock);
1353 lockdep_unpin_lock(&rq->lock, rf->cookie);
1358 lockdep_repin_lock(&rq->lock, rf->cookie);
1368 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) __acquires(rq->lock);
1370 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) __acquires(p->pi_lock) __acquires(rq->lock);
1372 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) __releases(rq->lock)
1375 raw_spin_unlock(&rq->lock);
1378 static inline void task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) __releases(rq->lock)
1382 raw_spin_unlock(&rq->lock);
1386 static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock)
1388 raw_spin_lock_irqsave(&rq->lock, rf->flags);
1392 static inline void rq_lock_irq(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock)
1394 raw_spin_lock_irq(&rq->lock);
1398 static inline void rq_lock(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock)
1400 raw_spin_lock(&rq->lock);
1404 static inline void rq_relock(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock)
1406 raw_spin_lock(&rq->lock);
1410 static inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) __releases(rq->lock)
1413 raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
1416 static inline void rq_unlock_irq(struct rq *rq, struct rq_flags *rf) __releases(rq->lock)
1419 raw_spin_unlock_irq(&rq->lock);
1422 static inline void rq_unlock(struct rq *rq, struct rq_flags *rf) __releases(rq->lock)
1425 raw_spin_unlock(&rq->lock);
1428 static inline struct rq *this_rq_lock_irq(struct rq_flags *rf) __acquires(rq->lock)
1484 lockdep_assert_held(&rq->lock);
1653 * holding both task_struct::pi_lock and rq::lock.
1916 * The switched_from() call is allowed to drop rq->lock, therefore we
1918 * rq->lock. They are however serialized by p->pi_lock.
2212 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) __releases(this_rq->lock)
2213 __acquires(busiest->lock) __acquires(this_rq->lock)
2215 raw_spin_unlock(&this_rq->lock);
2226 * grant the double lock to lower CPUs over higher ids under contention,
2229 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) __releases(this_rq->lock)
2230 __acquires(busiest->lock) __acquires(this_rq->lock)
2234 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
2236 raw_spin_unlock(&this_rq->lock);
2237 raw_spin_lock(&busiest->lock);
2238 raw_spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
2241 raw_spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
2250 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
2255 /* printk() doesn't work well under rq->lock */
2256 raw_spin_unlock(&this_rq->lock);
2263 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) __releases(busiest->lock)
2265 raw_spin_unlock(&busiest->lock);
2266 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
2300 * double_rq_lock - safely lock two runqueues
2305 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) __acquires(rq1->lock) __acquires(rq2->lock)
2309 raw_spin_lock(&rq1->lock);
2310 __acquire(rq2->lock); /* Fake it out ;) */
2313 raw_spin_lock(&rq1->lock);
2314 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
2316 raw_spin_lock(&rq2->lock);
2317 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
2328 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) __releases(rq1->lock) __releases(rq2->lock)
2330 raw_spin_unlock(&rq1->lock);
2332 raw_spin_unlock(&rq2->lock);
2334 __release(rq2->lock);
2345 * double_rq_lock - safely lock two runqueues
2350 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) __acquires(rq1->lock) __acquires(rq2->lock)
2354 raw_spin_lock(&rq1->lock);
2355 __acquire(rq2->lock); /* Fake it out ;) */
2364 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) __releases(rq1->lock) __releases(rq2->lock)
2367 raw_spin_unlock(&rq1->lock);
2368 __release(rq2->lock);