Lines Matching refs:sem
57 void __init_ldsem(struct ld_semaphore *sem, const char *name,
64 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
65 lockdep_init_map(&sem->dep_map, name, key, 0);
67 atomic_long_set(&sem->count, LDSEM_UNLOCKED);
68 sem->wait_readers = 0;
69 raw_spin_lock_init(&sem->wait_lock);
70 INIT_LIST_HEAD(&sem->read_wait);
71 INIT_LIST_HEAD(&sem->write_wait);
74 static void __ldsem_wake_readers(struct ld_semaphore *sem)
85 adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
86 count = atomic_long_add_return(adjust, &sem->count);
90 if (atomic_long_try_cmpxchg(&sem->count, &count, count - adjust))
94 list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
100 INIT_LIST_HEAD(&sem->read_wait);
101 sem->wait_readers = 0;
104 static inline int writer_trylock(struct ld_semaphore *sem)
110 long count = atomic_long_add_return(LDSEM_ACTIVE_BIAS, &sem->count);
114 if (atomic_long_try_cmpxchg(&sem->count, &count, count - LDSEM_ACTIVE_BIAS))
119 static void __ldsem_wake_writer(struct ld_semaphore *sem)
123 waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list);
135 static void __ldsem_wake(struct ld_semaphore *sem)
137 if (!list_empty(&sem->write_wait))
138 __ldsem_wake_writer(sem);
139 else if (!list_empty(&sem->read_wait))
140 __ldsem_wake_readers(sem);
143 static void ldsem_wake(struct ld_semaphore *sem)
147 raw_spin_lock_irqsave(&sem->wait_lock, flags);
148 __ldsem_wake(sem);
149 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
156 down_read_failed(struct ld_semaphore *sem, long count, long timeout)
162 raw_spin_lock_irq(&sem->wait_lock);
170 if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust)) {
175 raw_spin_unlock_irq(&sem->wait_lock);
176 return sem;
180 list_add_tail(&waiter.list, &sem->read_wait);
181 sem->wait_readers++;
188 __ldsem_wake(sem);
190 raw_spin_unlock_irq(&sem->wait_lock);
211 raw_spin_lock_irq(&sem->wait_lock);
213 atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
214 sem->wait_readers--;
216 raw_spin_unlock_irq(&sem->wait_lock);
220 raw_spin_unlock_irq(&sem->wait_lock);
223 return sem;
230 down_write_failed(struct ld_semaphore *sem, long count, long timeout)
237 raw_spin_lock_irq(&sem->wait_lock);
245 if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust))
248 raw_spin_unlock_irq(&sem->wait_lock);
249 return sem;
253 list_add_tail(&waiter.list, &sem->write_wait);
261 raw_spin_unlock_irq(&sem->wait_lock);
263 raw_spin_lock_irq(&sem->wait_lock);
265 locked = writer_trylock(sem);
271 atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
280 if (!locked && list_empty(&sem->write_wait))
281 __ldsem_wake_readers(sem);
283 raw_spin_unlock_irq(&sem->wait_lock);
290 return sem;
295 static int __ldsem_down_read_nested(struct ld_semaphore *sem,
300 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
302 count = atomic_long_add_return(LDSEM_READ_BIAS, &sem->count);
304 lock_contended(&sem->dep_map, _RET_IP_);
305 if (!down_read_failed(sem, count, timeout)) {
306 rwsem_release(&sem->dep_map, _RET_IP_);
310 lock_acquired(&sem->dep_map, _RET_IP_);
314 static int __ldsem_down_write_nested(struct ld_semaphore *sem,
319 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
321 count = atomic_long_add_return(LDSEM_WRITE_BIAS, &sem->count);
323 lock_contended(&sem->dep_map, _RET_IP_);
324 if (!down_write_failed(sem, count, timeout)) {
325 rwsem_release(&sem->dep_map, _RET_IP_);
329 lock_acquired(&sem->dep_map, _RET_IP_);
337 int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
340 return __ldsem_down_read_nested(sem, 0, timeout);
346 int ldsem_down_read_trylock(struct ld_semaphore *sem)
348 long count = atomic_long_read(&sem->count);
351 if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_READ_BIAS)) {
352 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
353 lock_acquired(&sem->dep_map, _RET_IP_);
363 int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
366 return __ldsem_down_write_nested(sem, 0, timeout);
372 int ldsem_down_write_trylock(struct ld_semaphore *sem)
374 long count = atomic_long_read(&sem->count);
377 if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_WRITE_BIAS)) {
378 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
379 lock_acquired(&sem->dep_map, _RET_IP_);
389 void ldsem_up_read(struct ld_semaphore *sem)
393 rwsem_release(&sem->dep_map, _RET_IP_);
395 count = atomic_long_add_return(-LDSEM_READ_BIAS, &sem->count);
397 ldsem_wake(sem);
403 void ldsem_up_write(struct ld_semaphore *sem)
407 rwsem_release(&sem->dep_map, _RET_IP_);
409 count = atomic_long_add_return(-LDSEM_WRITE_BIAS, &sem->count);
411 ldsem_wake(sem);
417 int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout)
420 return __ldsem_down_read_nested(sem, subclass, timeout);
423 int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
427 return __ldsem_down_write_nested(sem, subclass, timeout);