Lines Matching refs:vma_lock
263 * hugetlb vma_lock helper routines
268 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
270 down_read(&vma_lock->rw_sema);
281 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
283 up_read(&vma_lock->rw_sema);
294 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
296 down_write(&vma_lock->rw_sema);
307 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
309 up_write(&vma_lock->rw_sema);
321 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
323 return down_write_trylock(&vma_lock->rw_sema);
336 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
338 lockdep_assert_held(&vma_lock->rw_sema);
348 struct hugetlb_vma_lock *vma_lock = container_of(kref,
351 kfree(vma_lock);
354 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
356 struct vm_area_struct *vma = vma_lock->vma;
359 * vma_lock structure may or not be released as a result of put,
361 * Semaphore synchronizes access to vma_lock->vma field.
363 vma_lock->vma = NULL;
365 up_write(&vma_lock->rw_sema);
366 kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
372 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
374 __hugetlb_vma_unlock_write_put(vma_lock);
392 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
394 down_write(&vma_lock->rw_sema);
395 __hugetlb_vma_unlock_write_put(vma_lock);
401 struct hugetlb_vma_lock *vma_lock;
411 vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
412 if (!vma_lock) {
427 kref_init(&vma_lock->refs);
428 init_rwsem(&vma_lock->rw_sema);
429 vma_lock->vma = vma;
430 vma->vm_private_data = vma_lock;
1215 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1217 if (vma_lock && vma_lock->vma != vma)
4857 * vma_lock structure for sharable mappings is vma specific.
4859 * new structure. Before clearing, make sure vma_lock is not
4863 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
4865 if (vma_lock) {
4866 if (vma_lock->vma != vma) {
4870 pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
5470 * When the vma_lock is freed, this makes the vma ineligible
5663 * Drop hugetlb_fault_mutex and vma_lock before
5664 * unmapping. unmapping needs to hold vma_lock
5665 * in write mode. Dropping vma_lock in read mode
5825 * vma_lock and hugetlb_fault_mutex must be dropped before handling
6929 * Also, vma_lock (vm_private_data) is required for sharing.