Lines Matching refs:sem
105 # define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
108 #c, atomic_long_read(&(sem)->count), \
109 (unsigned long) sem->magic, \
110 atomic_long_read(&(sem)->owner), (long)current, \
111 list_empty(&(sem)->wait_list) ? "" : "not ")) \
115 # define DEBUG_RWSEMS_WARN_ON(c, sem)
174 static inline void rwsem_set_owner(struct rw_semaphore *sem)
176 atomic_long_set(&sem->owner, (long)current);
179 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
181 atomic_long_set(&sem->owner, 0);
187 static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags)
189 return atomic_long_read(&sem->owner) & flags;
202 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
206 (atomic_long_read(&sem->owner) & RWSEM_RD_NONSPINNABLE);
208 atomic_long_set(&sem->owner, val);
211 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
213 __rwsem_set_reader_owned(sem, current);
219 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
225 long count = atomic_long_read(&sem->count);
230 return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
240 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
242 unsigned long val = atomic_long_read(&sem->owner);
245 if (atomic_long_try_cmpxchg(&sem->owner, &val,
251 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
260 static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
262 unsigned long owner = atomic_long_read(&sem->owner);
269 } while (!atomic_long_try_cmpxchg(&sem->owner, &owner,
273 static inline bool rwsem_read_trylock(struct rw_semaphore *sem)
275 long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
277 rwsem_set_nonspinnable(sem);
284 static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
287 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
295 rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
297 unsigned long owner = atomic_long_read(&sem->owner);
323 void __init_rwsem(struct rw_semaphore *sem, const char *name,
330 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
331 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
334 sem->magic = sem;
336 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
337 raw_spin_lock_init(&sem->wait_lock);
338 INIT_LIST_HEAD(&sem->wait_list);
339 atomic_long_set(&sem->owner, 0L);
341 osq_lock_init(&sem->osq);
358 #define rwsem_first_waiter(sem) \
359 list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
400 static void rwsem_mark_wake(struct rw_semaphore *sem,
408 lockdep_assert_held(&sem->wait_lock);
414 waiter = rwsem_first_waiter(sem);
435 if (unlikely(atomic_long_read(&sem->count) < 0))
447 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
460 atomic_long_add(-adjustment, &sem->count);
474 __rwsem_set_reader_owned(sem, owner);
501 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
517 if (list_empty(&sem->wait_list)) {
526 if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF))
530 atomic_long_add(adjustment, &sem->count);
555 * This function must be called with the sem->wait_lock held to prevent
557 * sem->count accordingly.
562 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
567 lockdep_assert_held(&sem->wait_lock);
569 count = atomic_long_read(&sem->count);
587 if (list_is_singular(&sem->wait_list))
590 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
599 rwsem_set_owner(sem);
609 static inline bool rwsem_try_read_lock_unqueued(struct rw_semaphore *sem)
611 long count = atomic_long_read(&sem->count);
616 count = atomic_long_fetch_add_acquire(RWSEM_READER_BIAS, &sem->count);
618 rwsem_set_reader_owned(sem);
624 atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
631 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
633 long count = atomic_long_read(&sem->count);
636 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
638 rwsem_set_owner(sem);
655 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
669 owner = rwsem_owner_flags(sem, &flags);
715 rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
721 owner = rwsem_owner_flags(sem, &flags);
734 new = rwsem_owner_flags(sem, &new_flags);
742 * checking sem->owner still matches owner, if that fails,
772 static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
774 long count = atomic_long_read(&sem->count);
785 static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
796 /* sem->wait_lock should not be held when doing optimistic spinning */
797 if (!osq_lock(&sem->osq))
809 owner_state = rwsem_spin_on_owner(sem, nonspinnable);
816 taken = wlock ? rwsem_try_write_lock_unqueued(sem)
817 : rwsem_try_read_lock_unqueued(sem);
834 if (rwsem_test_oflags(sem, nonspinnable))
836 rspin_threshold = rwsem_rspin_threshold(sem);
848 rwsem_set_nonspinnable(sem);
867 * lock, sem->owner is cleared but the lock has not
902 osq_unlock(&sem->osq);
919 static inline void clear_wr_nonspinnable(struct rw_semaphore *sem)
921 if (rwsem_test_oflags(sem, RWSEM_WR_NONSPINNABLE))
922 atomic_long_andnot(RWSEM_WR_NONSPINNABLE, &sem->owner);
944 static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
947 unsigned long owner = atomic_long_read(&sem->owner);
953 rwsem_try_read_lock_unqueued(sem)) {
961 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
967 static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
972 static inline void clear_wr_nonspinnable(struct rw_semaphore *sem) { }
974 static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
981 rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
992 rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
1003 waiter.last_rowner = atomic_long_read(&sem->owner);
1007 if (!rwsem_can_spin_on_owner(sem, RWSEM_RD_NONSPINNABLE))
1013 atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
1015 if (rwsem_optimistic_spin(sem, false)) {
1021 if ((atomic_long_read(&sem->count) & RWSEM_FLAG_WAITERS)) {
1022 raw_spin_lock_irq(&sem->wait_lock);
1023 if (!list_empty(&sem->wait_list))
1024 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
1026 raw_spin_unlock_irq(&sem->wait_lock);
1029 return sem;
1030 } else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
1032 return sem;
1040 raw_spin_lock_irq(&sem->wait_lock);
1041 if (list_empty(&sem->wait_list)) {
1048 if (adjustment && !(atomic_long_read(&sem->count) &
1052 raw_spin_unlock_irq(&sem->wait_lock);
1053 rwsem_set_reader_owned(sem);
1055 return sem;
1059 list_add_tail(&waiter.list, &sem->wait_list);
1063 count = atomic_long_add_return(adjustment, &sem->count);
1065 count = atomic_long_read(&sem->count);
1074 clear_wr_nonspinnable(sem);
1079 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1081 raw_spin_unlock_irq(&sem->wait_lock);
1092 raw_spin_lock_irq(&sem->wait_lock);
1095 raw_spin_unlock_irq(&sem->wait_lock);
1096 /* Ordered by sem->wait_lock against rwsem_mark_wake(). */
1105 return sem;
1109 if (list_empty(&sem->wait_list)) {
1111 &sem->count);
1113 raw_spin_unlock_irq(&sem->wait_lock);
1123 static inline void rwsem_disable_reader_optspin(struct rw_semaphore *sem,
1127 atomic_long_or(RWSEM_RD_NONSPINNABLE, &sem->owner);
1136 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
1142 struct rw_semaphore *ret = sem;
1146 if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
1147 rwsem_optimistic_spin(sem, true)) {
1149 return sem;
1157 disable_rspin = atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE;
1161 * and block until we can acquire the sem.
1167 raw_spin_lock_irq(&sem->wait_lock);
1170 wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST;
1172 list_add_tail(&waiter.list, &sem->wait_list);
1176 count = atomic_long_read(&sem->count);
1189 rwsem_mark_wake(sem, (count & RWSEM_READER_MASK)
1198 raw_spin_unlock_irq(&sem->wait_lock);
1201 raw_spin_lock_irq(&sem->wait_lock);
1204 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
1211 if (rwsem_try_write_lock(sem, wstate)) {
1216 raw_spin_unlock_irq(&sem->wait_lock);
1227 rwsem_spin_on_owner(sem, RWSEM_NONSPINNABLE) == OWNER_NULL)
1246 (rwsem_first_waiter(sem) == &waiter))
1249 count = atomic_long_read(&sem->count);
1265 raw_spin_lock_irq(&sem->wait_lock);
1269 rwsem_disable_reader_optspin(sem, disable_rspin);
1270 raw_spin_unlock_irq(&sem->wait_lock);
1277 raw_spin_lock_irq(&sem->wait_lock);
1281 atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count);
1283 if (list_empty(&sem->wait_list))
1284 atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
1286 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1287 raw_spin_unlock_irq(&sem->wait_lock);
1298 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem, long count)
1303 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1305 if (!list_empty(&sem->wait_list))
1306 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1308 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1311 return sem;
1319 static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1324 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1326 if (!list_empty(&sem->wait_list))
1327 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
1329 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1332 return sem;
1338 static inline void __down_read(struct rw_semaphore *sem)
1340 if (!rwsem_read_trylock(sem)) {
1341 rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE);
1342 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1344 rwsem_set_reader_owned(sem);
1348 static inline int __down_read_interruptible(struct rw_semaphore *sem)
1350 if (!rwsem_read_trylock(sem)) {
1351 if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_INTERRUPTIBLE)))
1353 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1355 rwsem_set_reader_owned(sem);
1360 static inline int __down_read_killable(struct rw_semaphore *sem)
1362 if (!rwsem_read_trylock(sem)) {
1363 if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE)))
1365 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1367 rwsem_set_reader_owned(sem);
1372 static inline int __down_read_trylock(struct rw_semaphore *sem)
1376 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1383 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1385 rwsem_set_reader_owned(sem);
1395 static inline void __down_write(struct rw_semaphore *sem)
1399 if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1401 rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE);
1403 rwsem_set_owner(sem);
1406 static inline int __down_write_killable(struct rw_semaphore *sem)
1410 if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1412 if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE)))
1415 rwsem_set_owner(sem);
1420 static inline int __down_write_trylock(struct rw_semaphore *sem)
1424 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1427 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1429 rwsem_set_owner(sem);
1438 static inline void __up_read(struct rw_semaphore *sem)
1442 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1443 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1445 rwsem_clear_reader_owned(sem);
1446 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
1447 DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
1450 clear_wr_nonspinnable(sem);
1451 rwsem_wake(sem, tmp);
1458 static inline void __up_write(struct rw_semaphore *sem)
1462 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1464 * sem->owner may differ from current if the ownership is transferred
1467 DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
1468 !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
1470 rwsem_clear_owner(sem);
1471 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
1473 rwsem_wake(sem, tmp);
1479 static inline void __downgrade_write(struct rw_semaphore *sem)
1490 DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
1492 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
1493 rwsem_set_reader_owned(sem);
1495 rwsem_downgrade_wake(sem);
1501 void __sched down_read(struct rw_semaphore *sem)
1504 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1506 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1510 int __sched down_read_interruptible(struct rw_semaphore *sem)
1513 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1515 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
1516 rwsem_release(&sem->dep_map, _RET_IP_);
1524 int __sched down_read_killable(struct rw_semaphore *sem)
1527 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1529 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1530 rwsem_release(&sem->dep_map, _RET_IP_);
1541 int down_read_trylock(struct rw_semaphore *sem)
1543 int ret = __down_read_trylock(sem);
1546 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
1554 void __sched down_write(struct rw_semaphore *sem)
1557 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1558 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1565 int __sched down_write_killable(struct rw_semaphore *sem)
1568 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1570 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1572 rwsem_release(&sem->dep_map, _RET_IP_);
1583 int down_write_trylock(struct rw_semaphore *sem)
1585 int ret = __down_write_trylock(sem);
1588 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
1597 void up_read(struct rw_semaphore *sem)
1599 rwsem_release(&sem->dep_map, _RET_IP_);
1600 __up_read(sem);
1607 void up_write(struct rw_semaphore *sem)
1609 rwsem_release(&sem->dep_map, _RET_IP_);
1610 __up_write(sem);
1617 void downgrade_write(struct rw_semaphore *sem)
1619 lock_downgrade(&sem->dep_map, _RET_IP_);
1620 __downgrade_write(sem);
1626 void down_read_nested(struct rw_semaphore *sem, int subclass)
1629 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1630 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1634 int down_read_killable_nested(struct rw_semaphore *sem, int subclass)
1637 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1639 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1640 rwsem_release(&sem->dep_map, _RET_IP_);
1648 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
1651 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
1652 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1656 void down_read_non_owner(struct rw_semaphore *sem)
1659 __down_read(sem);
1660 __rwsem_set_reader_owned(sem, NULL);
1664 void down_write_nested(struct rw_semaphore *sem, int subclass)
1667 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1668 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1672 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
1675 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1677 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1679 rwsem_release(&sem->dep_map, _RET_IP_);
1687 void up_read_non_owner(struct rw_semaphore *sem)
1689 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1690 __up_read(sem);