Lines Matching defs:anon_vma
297 struct anon_vma *anon_vma = vma->anon_vma;
321 if (anon_vma) {
322 anon_vma_lock_read(anon_vma);
325 anon_vma_unlock_read(anon_vma);
342 * vma has some anon_vma assigned, and is already inserted on that
343 * anon_vma's interval trees.
346 * vma must be removed from the anon_vma's interval trees using
353 * the root anon_vma's mutex.
361 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
370 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
440 vp->anon_vma = vma->anon_vma;
444 if (!vp->anon_vma && next)
445 vp->anon_vma = next->anon_vma;
491 if (vp->anon_vma) {
492 anon_vma_lock_write(vp->anon_vma);
542 if (vp->anon_vma) {
546 anon_vma_unlock_write(vp->anon_vma);
565 if (vp->remove->anon_vma)
589 * dup_anon_vma() - Helper function to duplicate anon_vma
601 * expanding vma has anon_vma set if the shrinking vma had, to cover any
604 if (src->anon_vma && !dst->anon_vma) {
608 dst->anon_vma = src->anon_vma;
752 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
753 struct anon_vma *anon_vma2, struct vm_area_struct *vma)
757 * parents. This can improve scalability caused by anon_vma lock.
766 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
770 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
780 struct anon_vma *anon_vma, struct file *file,
785 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
793 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
797 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
803 struct anon_vma *anon_vma, struct file *file,
808 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
872 struct anon_vma *anon_vma, struct file *file,
913 && can_vma_merge_after(prev, vm_flags, anon_vma, file,
922 can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen,
943 is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) {
952 if (!next->anon_vma)
993 /* Error in anon_vma clone. */
1011 VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
1012 vp.anon_vma != adjust->anon_vma);
1049 * at sharing an anon_vma.
1054 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1058 * really matter for the anon_vma sharing case.
1070 * Do some basic sanity checking to see if we can re-use the anon_vma
1073 * to share the anon_vma.
1076 * the anon_vma of 'old' is concurrently in the process of being set up
1083 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1084 * is to return an anon_vma that is "complex" due to having gone through
1091 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1094 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1096 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1097 return anon_vma;
1104 * neighbouring vmas for a suitable anon_vma, before it goes off
1105 * to allocate a new anon_vma. It checks because a repetitive
1110 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1113 struct anon_vma *anon_vma = NULL;
1119 anon_vma = reusable_anon_vma(next, vma, next);
1120 if (anon_vma)
1121 return anon_vma;
1129 anon_vma = reusable_anon_vma(prev, prev, vma);
1132 * We might reach here with anon_vma == NULL if we can't find
1133 * any reusable anon_vma.
1137 * or lead to too many vmas hanging off the same anon_vma.
1141 return anon_vma;
1357 * Set pgoff according to addr for anon_vma.
2000 /* Check that both stack segments have the same anon_vma? */
2010 /* We must make sure the anon_vma is allocated. */
2021 * anon_vma lock to serialize against concurrent expand_stacks.
2023 anon_vma_lock_write(vma->anon_vma);
2060 anon_vma_unlock_write(vma->anon_vma);
2088 /* Check that both stack segments have the same anon_vma? */
2103 /* We must make sure the anon_vma is allocated. */
2114 * anon_vma lock to serialize against concurrent expand_stacks.
2116 anon_vma_lock_write(vma->anon_vma);
2154 anon_vma_unlock_write(vma->anon_vma);
2751 (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file,
3310 * until its first write fault, when page's anon_vma and index
3321 BUG_ON(vma->anon_vma);
3352 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
3362 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
3624 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
3626 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3631 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
3634 * anon_vma->root->rwsem. If some other vma in this mm shares
3635 * the same anon_vma we won't take it again.
3639 * anon_vma->root->rwsem.
3642 &anon_vma->root->rb_root.rb_root.rb_node))
3682 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
3684 * vma in this mm is backed by the same anon_vma or address_space.
3692 * - all anon_vma->rwseml
3747 if (vma->anon_vma)
3749 vm_lock_anon_vma(mm, avc->anon_vma);
3759 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3761 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3767 * the vma so the users using the anon_vma->rb_root will
3772 * anon_vma->root->rwsem.
3775 &anon_vma->root->rb_root.rb_root.rb_node))
3777 anon_vma_unlock_write(anon_vma);
3809 if (vma->anon_vma)
3811 vm_unlock_anon_vma(avc->anon_vma);