Lines Matching defs:anon_vma

405 		struct anon_vma *anon_vma = vma->anon_vma;
408 if (anon_vma) {
409 anon_vma_lock_read(anon_vma);
412 anon_vma_unlock_read(anon_vma);
503 * vma has some anon_vma assigned, and is already inserted on that
504 * anon_vma's interval trees.
507 * vma must be removed from the anon_vma's interval trees using
514 * the root anon_vma's mutex.
522 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
531 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
754 struct anon_vma *anon_vma = NULL;
801 * If next doesn't have anon_vma, import from vma after
804 if (remove_next == 2 && !next->anon_vma)
830 * make sure the expanding vma has anon_vma set if the
833 if (exporter && exporter->anon_vma && !importer->anon_vma) {
836 importer->anon_vma = exporter->anon_vma;
865 anon_vma = vma->anon_vma;
866 if (!anon_vma && adjust_next)
867 anon_vma = next->anon_vma;
868 if (anon_vma) {
869 VM_WARN_ON(adjust_next && next->anon_vma &&
870 anon_vma != next->anon_vma);
871 anon_vma_lock_write(anon_vma);
943 if (anon_vma) {
947 anon_vma_unlock_write(anon_vma);
963 if (next->anon_vma)
1062 static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
1063 struct anon_vma *anon_vma2,
1068 * parents. This can improve scalability caused by anon_vma lock.
1077 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
1081 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
1089 struct anon_vma *anon_vma, struct file *file,
1095 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
1103 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
1107 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
1111 struct anon_vma *anon_vma, struct file *file,
1117 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
1172 struct anon_vma *anon_vma, struct file *file,
1204 anon_vma, file, pgoff,
1212 anon_vma, file,
1215 is_mergeable_anon_vma(prev->anon_vma,
1216 next->anon_vma, NULL)) {
1236 anon_vma, file, pgoff+pglen,
1262 * at sharing an anon_vma.
1267 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1271 * really matter for the anon_vma sharing case.
1283 * Do some basic sanity checking to see if we can re-use the anon_vma
1286 * to share the anon_vma.
1289 * the anon_vma of 'old' is concurrently in the process of being set up
1296 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1297 * is to return an anon_vma that is "complex" due to having gone through
1304 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1307 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1309 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1310 return anon_vma;
1317 * neighbouring vmas for a suitable anon_vma, before it goes off
1318 * to allocate a new anon_vma. It checks because a repetitive
1323 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1325 struct anon_vma *anon_vma = NULL;
1329 anon_vma = reusable_anon_vma(vma->vm_next, vma, vma->vm_next);
1330 if (anon_vma)
1331 return anon_vma;
1336 anon_vma = reusable_anon_vma(vma->vm_prev, vma->vm_prev, vma);
1339 * We might reach here with anon_vma == NULL if we can't find
1340 * any reusable anon_vma.
1344 * or lead to too many vmas hanging off the same anon_vma.
1348 return anon_vma;
1578 * Set pgoff according to addr for anon_vma.
2479 /* Check that both stack segments have the same anon_vma? */
2482 /* We must make sure the anon_vma is allocated. */
2489 * anon_vma lock to serialize against concurrent expand_stacks.
2491 anon_vma_lock_write(vma->anon_vma);
2532 anon_vma_unlock_write(vma->anon_vma);
2555 /* Check that both stack segments have the same anon_vma? */
2562 /* We must make sure the anon_vma is allocated. */
2569 * anon_vma lock to serialize against concurrent expand_stacks.
2571 anon_vma_lock_write(vma->anon_vma);
2610 anon_vma_unlock_write(vma->anon_vma);
3325 * until its first write fault, when page's anon_vma and index
3336 BUG_ON(vma->anon_vma);
3363 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
3371 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
3611 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
3613 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3618 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
3621 * anon_vma->root->rwsem. If some other vma in this mm shares
3622 * the same anon_vma we won't take it again.
3626 * anon_vma->root->rwsem.
3629 &anon_vma->root->rb_root.rb_root.rb_node))
3669 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
3671 * vma in this mm is backed by the same anon_vma or address_space.
3678 * - all anon_vma->rwseml
3717 if (vma->anon_vma)
3719 vm_lock_anon_vma(mm, avc->anon_vma);
3729 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3731 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3737 * the vma so the users using the anon_vma->rb_root will
3742 * anon_vma->root->rwsem.
3745 &anon_vma->root->rb_root.rb_root.rb_node))
3747 anon_vma_unlock_write(anon_vma);
3778 if (vma->anon_vma)
3780 vm_unlock_anon_vma(avc->anon_vma);