Lines Matching refs:sem

47 static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
51 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
56 * cannot both change sem->state from readers_fast and start checking
57 * counters while we are here. So if we see !sem->state, we know that
62 if (likely(rcu_sync_is_idle(&sem->rss)))
63 this_cpu_inc(*sem->read_count);
65 __percpu_down_read(sem, false); /* Unconditional memory barrier */
73 static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
81 if (likely(rcu_sync_is_idle(&sem->rss)))
82 this_cpu_inc(*sem->read_count);
84 ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
92 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
97 static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
99 rwsem_release(&sem->dep_map, _RET_IP_);
105 if (likely(rcu_sync_is_idle(&sem->rss))) {
106 this_cpu_dec(*sem->read_count);
118 this_cpu_dec(*sem->read_count);
119 rcuwait_wake_up(&sem->writer);
128 static inline bool percpu_is_write_locked(struct percpu_rw_semaphore *sem)
130 return atomic_read(&sem->block);
138 #define percpu_init_rwsem(sem) \
141 __percpu_init_rwsem(sem, #sem, &rwsem_key); \
144 #define percpu_rwsem_is_held(sem) lockdep_is_held(sem)
145 #define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem)
147 static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
150 lock_release(&sem->dep_map, ip);
153 static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
156 lock_acquire(&sem->dep_map, 0, 1, read, 1, NULL, ip);