Lines Matching refs:vma_lock
1275 * the vma_lock for each device, and only then get each
1294 mutex_unlock(&tmp->vma_lock);
1311 mutex_unlock(&tmp->vma_lock);
1457 /* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
1464 * vma_lock is nested under mmap_lock for vm_ops callback paths.
1469 * When zapping vmas we need to maintain the mmap_lock => vma_lock
1470 * ordering, which requires using vma_lock to walk vma_list to
1471 * acquire an mm, then dropping vma_lock to get the mmap_lock and
1472 * reacquiring vma_lock. This logic is derived from similar
1479 * vma_lock, thus memory_lock is nested under vma_lock.
1481 * This enables the vm_ops.fault callback to acquire vma_lock,
1489 if (!mutex_trylock(&vdev->vma_lock))
1492 mutex_lock(&vdev->vma_lock);
1508 mutex_unlock(&vdev->vma_lock);
1519 if (!mutex_trylock(&vdev->vma_lock)) {
1525 mutex_lock(&vdev->vma_lock);
1540 mutex_unlock(&vdev->vma_lock);
1550 mutex_unlock(&vdev->vma_lock);
1572 /* Caller holds vma_lock */
1602 mutex_lock(&vdev->vma_lock);
1610 mutex_unlock(&vdev->vma_lock);
1620 mutex_lock(&vdev->vma_lock);
1654 mutex_unlock(&vdev->vma_lock);
2046 mutex_init(&vdev->vma_lock);