Lines Matching defs:lock
50 * Take the heavyweight lock.
52 * \param lock lock pointer.
54 * \return one if the lock is held, or zero otherwise.
56 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
63 volatile unsigned int *lock = &lock_data->hw_lock->lock;
67 old = *lock;
75 prev = cmpxchg(lock, old, new);
82 DRM_ERROR("%d holds heavyweight lock\n",
90 /* Have lock */
97 * This takes a lock forcibly and hands it to context. Should ONLY be used
98 * inside *_unlock to give lock to kernel before calling *_dma_schedule.
101 * \param lock lock pointer.
105 * Resets the lock file pointer.
106 * Marks the lock as held by the given context, via the \p cmpxchg instruction.
112 volatile unsigned int *lock = &lock_data->hw_lock->lock;
116 old = *lock;
118 prev = cmpxchg(lock, old, new);
127 volatile unsigned int *lock = &lock_data->hw_lock->lock;
139 old = *lock;
141 prev = cmpxchg(lock, old, new);
145 DRM_ERROR("%d freed heavyweight lock held by %d\n",
162 * Add the current task to the lock wait queue, and attempt to take to lock.
168 struct drm_lock *lock = data;
177 if (lock->context == DRM_KERNEL_CONTEXT) {
179 task_pid_nr(current), lock->context);
183 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
184 lock->context, task_pid_nr(current),
185 master->lock.hw_lock ? master->lock.hw_lock->lock : -1,
186 lock->flags);
188 add_wait_queue(&master->lock.lock_queue, &entry);
189 spin_lock_bh(&master->lock.spinlock);
190 master->lock.user_waiters++;
191 spin_unlock_bh(&master->lock.spinlock);
195 if (!master->lock.hw_lock) {
201 if (drm_lock_take(&master->lock, lock->context)) {
202 master->lock.file_priv = file_priv;
203 master->lock.lock_time = jiffies;
204 break; /* Got lock */
216 spin_lock_bh(&master->lock.spinlock);
217 master->lock.user_waiters--;
218 spin_unlock_bh(&master->lock.spinlock);
220 remove_wait_queue(&master->lock.lock_queue, &entry);
222 DRM_DEBUG("%d %s\n", lock->context,
223 ret ? "interrupted" : "has lock");
230 dev->sigdata.context = lock->context;
231 dev->sigdata.lock = master->lock.hw_lock;
234 if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
238 lock->context);
255 * Transfer and free the lock.
259 struct drm_lock *lock = data;
265 if (lock->context == DRM_KERNEL_CONTEXT) {
267 task_pid_nr(current), lock->context);
271 if (drm_legacy_lock_free(&master->lock, lock->context)) {
279 * This function returns immediately and takes the hw lock
283 * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
284 * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
312 volatile unsigned int *lock = &lock_data->hw_lock->lock;
318 old = *lock;
319 prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
334 return (file_priv->lock_count && master->lock.hw_lock &&
335 _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
336 master->lock.file_priv == file_priv);
343 /* if the master has gone away we can't do anything with the lock */
348 DRM_DEBUG("File %p released, freeing lock for context %d\n",
349 filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
350 drm_legacy_lock_free(&file_priv->master->lock,
351 _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
362 * possibility to lock.
365 if (master->lock.hw_lock) {
366 if (dev->sigdata.lock == master->lock.hw_lock)
367 dev->sigdata.lock = NULL;
368 master->lock.hw_lock = NULL;
369 master->lock.file_priv = NULL;
370 wake_up_interruptible_all(&master->lock.lock_queue);