Lines Matching defs:anon_vma

29  *           anon_vma->rwsem
44 * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon)
86 static inline struct anon_vma *anon_vma_alloc(void)
88 struct anon_vma *anon_vma;
90 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
91 if (anon_vma) {
92 atomic_set(&anon_vma->refcount, 1);
93 anon_vma->num_children = 0;
94 anon_vma->num_active_vmas = 0;
95 anon_vma->parent = anon_vma;
97 * Initialise the anon_vma root to point to itself. If called
98 * from fork, the root will be reset to the parents anon_vma.
100 anon_vma->root = anon_vma;
103 return anon_vma;
106 static inline void anon_vma_free(struct anon_vma *anon_vma)
108 VM_BUG_ON(atomic_read(&anon_vma->refcount));
112 * we can safely hold the lock without the anon_vma getting
128 if (rwsem_is_locked(&anon_vma->root->rwsem)) {
129 anon_vma_lock_write(anon_vma);
130 anon_vma_unlock_write(anon_vma);
133 kmem_cache_free(anon_vma_cachep, anon_vma);
148 struct anon_vma *anon_vma)
151 avc->anon_vma = anon_vma;
153 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
157 * __anon_vma_prepare - attach an anon_vma to a memory region
161 * an 'anon_vma' attached to it, so that we can associate the
162 * anonymous pages mapped into it with that anon_vma.
167 * can re-use the anon_vma from (very common when the only
172 * optimistically looked up an anon_vma in page_lock_anon_vma_read()
175 * anon_vma isn't actually destroyed).
177 * As a result, we need to do proper anon_vma locking even
180 * an anon_vma.
187 struct anon_vma *anon_vma, *allocated;
196 anon_vma = find_mergeable_anon_vma(vma);
198 if (!anon_vma) {
199 anon_vma = anon_vma_alloc();
200 if (unlikely(!anon_vma))
202 anon_vma->num_children++; /* self-parent link for new root */
203 allocated = anon_vma;
206 anon_vma_lock_write(anon_vma);
209 if (likely(!vma->anon_vma)) {
210 vma->anon_vma = anon_vma;
211 anon_vma_chain_link(vma, avc, anon_vma);
212 anon_vma->num_active_vmas++;
217 anon_vma_unlock_write(anon_vma);
233 * This is a useful helper function for locking the anon_vma root as
234 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
237 * Such anon_vma's should have the same root, so you'd expect to see
240 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
242 struct anon_vma *new_root = anon_vma->root;
252 static inline void unlock_anon_vma_root(struct anon_vma *root)
264 * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent
265 * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call,
266 * we can identify this case by checking (!dst->anon_vma && src->anon_vma).
268 * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find
269 * and reuse existing anon_vma which has no vmas and only one child anon_vma.
270 * This prevents degradation of anon_vma hierarchy to endless linear chain in
271 * case of constantly forking task. On the other hand, an anon_vma with more
279 struct anon_vma *root = NULL;
282 struct anon_vma *anon_vma;
292 anon_vma = pavc->anon_vma;
293 root = lock_anon_vma_root(root, anon_vma);
294 anon_vma_chain_link(dst, avc, anon_vma);
297 * Reuse existing anon_vma if it has no vma and only one
298 * anon_vma child.
300 * Root anon_vma is never reused:
303 if (!dst->anon_vma && src->anon_vma &&
304 anon_vma->num_children < 2 &&
305 anon_vma->num_active_vmas == 0)
306 dst->anon_vma = anon_vma;
308 if (dst->anon_vma)
309 dst->anon_vma->num_active_vmas++;
315 * dst->anon_vma is dropped here otherwise its degree can be incorrectly
318 * about dst->anon_vma if anon_vma_clone() failed.
320 dst->anon_vma = NULL;
326 * Attach vma to its own anon_vma, as well as to the anon_vmas that
333 struct anon_vma *anon_vma;
336 /* Don't bother if the parent process has no anon_vma here. */
337 if (!pvma->anon_vma)
340 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */
341 vma->anon_vma = NULL;
351 /* An existing anon_vma has been reused, all done then. */
352 if (vma->anon_vma)
355 /* Then add our own anon_vma. */
356 anon_vma = anon_vma_alloc();
357 if (!anon_vma)
359 anon_vma->num_active_vmas++;
365 * The root anon_vma's spinlock is the lock actually used when we
366 * lock any of the anon_vmas in this anon_vma tree.
368 anon_vma->root = pvma->anon_vma->root;
369 anon_vma->parent = pvma->anon_vma;
371 * With refcounts, an anon_vma can stay around longer than the
372 * process it belongs to. The root anon_vma needs to be pinned until
373 * this anon_vma is freed, because the lock lives in the root.
375 get_anon_vma(anon_vma->root);
376 /* Mark this anon_vma as the one where our new (COWed) pages go. */
377 vma->anon_vma = anon_vma;
378 anon_vma_lock_write(anon_vma);
379 anon_vma_chain_link(vma, avc, anon_vma);
380 anon_vma->parent->num_children++;
381 anon_vma_unlock_write(anon_vma);
386 put_anon_vma(anon_vma);
395 struct anon_vma *root = NULL;
398 * Unlink each anon_vma chained to the VMA. This list is ordered
399 * from newest to oldest, ensuring the root anon_vma gets freed last.
402 struct anon_vma *anon_vma = avc->anon_vma;
404 root = lock_anon_vma_root(root, anon_vma);
405 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
411 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
412 anon_vma->parent->num_children--;
419 if (vma->anon_vma) {
420 vma->anon_vma->num_active_vmas--;
423 * vma would still be needed after unlink, and anon_vma will be prepared
426 vma->anon_vma = NULL;
433 * needing to write-acquire the anon_vma->root->rwsem.
436 struct anon_vma *anon_vma = avc->anon_vma;
438 VM_WARN_ON(anon_vma->num_children);
439 VM_WARN_ON(anon_vma->num_active_vmas);
440 put_anon_vma(anon_vma);
449 struct anon_vma *anon_vma = data;
451 init_rwsem(&anon_vma->rwsem);
452 atomic_set(&anon_vma->refcount, 0);
453 anon_vma->rb_root = RB_ROOT_CACHED;
458 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
466 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
469 * the best this function can do is return a locked anon_vma that might
472 * The page might have been remapped to a different anon_vma or the anon_vma
475 * In case it was remapped to a different anon_vma, the new anon_vma will be a
476 * child of the old anon_vma, and the anon_vma lifetime rules will therefore
477 * ensure that any anon_vma obtained from the page will still be valid for as
480 * All users of this function must be very careful when walking the anon_vma
484 * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
485 * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
486 * if there is a mapcount, we can dereference the anon_vma after observing
489 struct anon_vma *page_get_anon_vma(struct page *page)
491 struct anon_vma *anon_vma = NULL;
501 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
502 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
503 anon_vma = NULL;
508 * If this page is still mapped, then its anon_vma cannot have been
510 * anon_vma structure being freed and reused (for another anon_vma:
516 put_anon_vma(anon_vma);
522 return anon_vma;
526 * Similar to page_get_anon_vma() except it locks the anon_vma.
532 struct anon_vma *page_lock_anon_vma_read(struct page *page)
534 struct anon_vma *anon_vma = NULL;
535 struct anon_vma *root_anon_vma;
545 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
546 root_anon_vma = READ_ONCE(anon_vma->root);
549 * If the page is still mapped, then this anon_vma is still
550 * its anon_vma, and holding the mutex ensures that it will
555 anon_vma = NULL;
561 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
562 anon_vma = NULL;
568 put_anon_vma(anon_vma);
572 /* we pinned the anon_vma, its safe to sleep */
574 anon_vma_lock_read(anon_vma);
576 if (atomic_dec_and_test(&anon_vma->refcount)) {
582 anon_vma_unlock_read(anon_vma);
583 __put_anon_vma(anon_vma);
584 anon_vma = NULL;
587 return anon_vma;
591 return anon_vma;
594 void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
596 anon_vma_unlock_read(anon_vma);
715 struct anon_vma *page__anon_vma = page_anon_vma(page);
718 * check, and needs it to match anon_vma when KSM is active.
720 if (!vma->anon_vma || !page__anon_vma ||
721 vma->anon_vma->root != page__anon_vma->root)
755 * without holding anon_vma lock for write. So when looking for a
1018 * page_move_anon_rmap - move a page to our anon_vma
1019 * @page: the page to move to our anon_vma
1023 * that page can be moved into the anon_vma that belongs to just that
1029 struct anon_vma *anon_vma = vma->anon_vma;
1034 VM_BUG_ON_VMA(!anon_vma, vma);
1036 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1038 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
1042 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
1055 struct anon_vma *anon_vma = vma->anon_vma;
1057 BUG_ON(!anon_vma);
1064 * we must use the _oldest_ possible anon_vma for the
1068 anon_vma = anon_vma->root;
1070 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1071 page->mapping = (struct address_space *) anon_vma;
1096 VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page);
1109 * the anon_vma case: to serialize mapping,index checking after setting,
1801 * The VMA is moved under the anon_vma lock but not the
1850 void __put_anon_vma(struct anon_vma *anon_vma)
1852 struct anon_vma *root = anon_vma->root;
1854 anon_vma_free(anon_vma);
1855 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1859 static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1862 struct anon_vma *anon_vma;
1871 * take a reference count to prevent the anon_vma disappearing
1873 anon_vma = page_anon_vma(page);
1874 if (!anon_vma)
1877 anon_vma_lock_read(anon_vma);
1878 return anon_vma;
1888 * contained in the anon_vma struct it points to.
1898 struct anon_vma *anon_vma;
1903 anon_vma = page_anon_vma(page);
1904 /* anon_vma disappear under us? */
1905 VM_BUG_ON_PAGE(!anon_vma, page);
1907 anon_vma = rmap_walk_anon_lock(page, rwc);
1909 if (!anon_vma)
1914 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
1932 anon_vma_unlock_read(anon_vma);
2021 struct anon_vma *anon_vma = vma->anon_vma;
2025 BUG_ON(!anon_vma);