Lines Matching defs:anon_vma
30 * anon_vma->rwsem
45 * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon)
91 static inline struct anon_vma *anon_vma_alloc(void)
93 struct anon_vma *anon_vma;
95 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
96 if (anon_vma) {
97 atomic_set(&anon_vma->refcount, 1);
98 anon_vma->num_children = 0;
99 anon_vma->num_active_vmas = 0;
100 anon_vma->parent = anon_vma;
102 * Initialise the anon_vma root to point to itself. If called
103 * from fork, the root will be reset to the parents anon_vma.
105 anon_vma->root = anon_vma;
108 return anon_vma;
111 static inline void anon_vma_free(struct anon_vma *anon_vma)
113 VM_BUG_ON(atomic_read(&anon_vma->refcount));
117 * we can safely hold the lock without the anon_vma getting
133 if (rwsem_is_locked(&anon_vma->root->rwsem)) {
134 anon_vma_lock_write(anon_vma);
135 anon_vma_unlock_write(anon_vma);
138 kmem_cache_free(anon_vma_cachep, anon_vma);
153 struct anon_vma *anon_vma)
156 avc->anon_vma = anon_vma;
158 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
162 * __anon_vma_prepare - attach an anon_vma to a memory region
166 * an 'anon_vma' attached to it, so that we can associate the
167 * anonymous pages mapped into it with that anon_vma.
172 * can re-use the anon_vma from (very common when the only
177 * optimistically looked up an anon_vma in folio_lock_anon_vma_read()
180 * anon_vma isn't actually destroyed).
182 * As a result, we need to do proper anon_vma locking even
185 * an anon_vma.
192 struct anon_vma *anon_vma, *allocated;
201 anon_vma = find_mergeable_anon_vma(vma);
203 if (!anon_vma) {
204 anon_vma = anon_vma_alloc();
205 if (unlikely(!anon_vma))
207 anon_vma->num_children++; /* self-parent link for new root */
208 allocated = anon_vma;
211 anon_vma_lock_write(anon_vma);
214 if (likely(!vma->anon_vma)) {
215 vma->anon_vma = anon_vma;
216 anon_vma_chain_link(vma, avc, anon_vma);
217 anon_vma->num_active_vmas++;
222 anon_vma_unlock_write(anon_vma);
238 * This is a useful helper function for locking the anon_vma root as
239 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
242 * Such anon_vma's should have the same root, so you'd expect to see
245 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
247 struct anon_vma *new_root = anon_vma->root;
257 static inline void unlock_anon_vma_root(struct anon_vma *root)
269 * while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to
270 * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before
271 * call, we can identify this case by checking (!dst->anon_vma &&
272 * src->anon_vma).
274 * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find
275 * and reuse existing anon_vma which has no vmas and only one child anon_vma.
276 * This prevents degradation of anon_vma hierarchy to endless linear chain in
277 * case of constantly forking task. On the other hand, an anon_vma with more
285 struct anon_vma *root = NULL;
288 struct anon_vma *anon_vma;
298 anon_vma = pavc->anon_vma;
299 root = lock_anon_vma_root(root, anon_vma);
300 anon_vma_chain_link(dst, avc, anon_vma);
303 * Reuse existing anon_vma if it has no vma and only one
304 * anon_vma child.
306 * Root anon_vma is never reused:
309 if (!dst->anon_vma && src->anon_vma &&
310 anon_vma->num_children < 2 &&
311 anon_vma->num_active_vmas == 0)
312 dst->anon_vma = anon_vma;
314 if (dst->anon_vma)
315 dst->anon_vma->num_active_vmas++;
321 * dst->anon_vma is dropped here otherwise its num_active_vmas can
324 * about dst->anon_vma if anon_vma_clone() failed.
326 dst->anon_vma = NULL;
332 * Attach vma to its own anon_vma, as well as to the anon_vmas that
339 struct anon_vma *anon_vma;
342 /* Don't bother if the parent process has no anon_vma here. */
343 if (!pvma->anon_vma)
346 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */
347 vma->anon_vma = NULL;
357 /* An existing anon_vma has been reused, all done then. */
358 if (vma->anon_vma)
361 /* Then add our own anon_vma. */
362 anon_vma = anon_vma_alloc();
363 if (!anon_vma)
365 anon_vma->num_active_vmas++;
371 * The root anon_vma's rwsem is the lock actually used when we
372 * lock any of the anon_vmas in this anon_vma tree.
374 anon_vma->root = pvma->anon_vma->root;
375 anon_vma->parent = pvma->anon_vma;
377 * With refcounts, an anon_vma can stay around longer than the
378 * process it belongs to. The root anon_vma needs to be pinned until
379 * this anon_vma is freed, because the lock lives in the root.
381 get_anon_vma(anon_vma->root);
382 /* Mark this anon_vma as the one where our new (COWed) pages go. */
383 vma->anon_vma = anon_vma;
384 anon_vma_lock_write(anon_vma);
385 anon_vma_chain_link(vma, avc, anon_vma);
386 anon_vma->parent->num_children++;
387 anon_vma_unlock_write(anon_vma);
392 put_anon_vma(anon_vma);
401 struct anon_vma *root = NULL;
404 * Unlink each anon_vma chained to the VMA. This list is ordered
405 * from newest to oldest, ensuring the root anon_vma gets freed last.
408 struct anon_vma *anon_vma = avc->anon_vma;
410 root = lock_anon_vma_root(root, anon_vma);
411 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
417 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
418 anon_vma->parent->num_children--;
425 if (vma->anon_vma) {
426 vma->anon_vma->num_active_vmas--;
429 * vma would still be needed after unlink, and anon_vma will be prepared
432 vma->anon_vma = NULL;
439 * needing to write-acquire the anon_vma->root->rwsem.
442 struct anon_vma *anon_vma = avc->anon_vma;
444 VM_WARN_ON(anon_vma->num_children);
445 VM_WARN_ON(anon_vma->num_active_vmas);
446 put_anon_vma(anon_vma);
455 struct anon_vma *anon_vma = data;
457 init_rwsem(&anon_vma->rwsem);
458 atomic_set(&anon_vma->refcount, 0);
459 anon_vma->rb_root = RB_ROOT_CACHED;
464 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
472 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
475 * the best this function can do is return a refcount increased anon_vma
478 * The page might have been remapped to a different anon_vma or the anon_vma
481 * In case it was remapped to a different anon_vma, the new anon_vma will be a
482 * child of the old anon_vma, and the anon_vma lifetime rules will therefore
483 * ensure that any anon_vma obtained from the page will still be valid for as
486 * All users of this function must be very careful when walking the anon_vma
490 * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
491 * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
492 * if there is a mapcount, we can dereference the anon_vma after observing
495 struct anon_vma *folio_get_anon_vma(struct folio *folio)
497 struct anon_vma *anon_vma = NULL;
507 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
508 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
509 anon_vma = NULL;
514 * If this folio is still mapped, then its anon_vma cannot have been
516 * anon_vma structure being freed and reused (for another anon_vma:
522 put_anon_vma(anon_vma);
528 return anon_vma;
532 * Similar to folio_get_anon_vma() except it locks the anon_vma.
539 struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
542 struct anon_vma *anon_vma = NULL;
543 struct anon_vma *root_anon_vma;
553 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
554 root_anon_vma = READ_ONCE(anon_vma->root);
557 * If the folio is still mapped, then this anon_vma is still
558 * its anon_vma, and holding the mutex ensures that it will
563 anon_vma = NULL;
569 anon_vma = NULL;
575 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
576 anon_vma = NULL;
582 put_anon_vma(anon_vma);
586 /* we pinned the anon_vma, its safe to sleep */
588 anon_vma_lock_read(anon_vma);
590 if (atomic_dec_and_test(&anon_vma->refcount)) {
596 anon_vma_unlock_read(anon_vma);
597 __put_anon_vma(anon_vma);
598 anon_vma = NULL;
601 return anon_vma;
605 return anon_vma;
750 struct anon_vma *page__anon_vma = folio_anon_vma(folio);
753 * check, and needs it to match anon_vma when KSM is active.
755 if (!vma->anon_vma || !page__anon_vma ||
756 vma->anon_vma->root != page__anon_vma->root)
1108 * page_move_anon_rmap - move a page to our anon_vma
1109 * @page: the page to move to our anon_vma
1113 * that page can be moved into the anon_vma that belongs to just that
1119 void *anon_vma = vma->anon_vma;
1123 VM_BUG_ON_VMA(!anon_vma, vma);
1125 anon_vma += PAGE_MAPPING_ANON;
1127 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
1131 WRITE_ONCE(folio->mapping, anon_vma);
1146 struct anon_vma *anon_vma = vma->anon_vma;
1148 BUG_ON(!anon_vma);
1155 * we must use the _oldest_ possible anon_vma for the
1159 anon_vma = anon_vma->root;
1163 * Make sure the compiler doesn't split the stores of anon_vma and
1167 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1168 WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma);
1196 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
1210 * the anon_vma case: to serialize mapping,index checking after setting,
2187 * The VMA is moved under the anon_vma lock but not the
2389 void __put_anon_vma(struct anon_vma *anon_vma)
2391 struct anon_vma *root = anon_vma->root;
2393 anon_vma_free(anon_vma);
2394 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
2398 static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
2401 struct anon_vma *anon_vma;
2410 * take a reference count to prevent the anon_vma disappearing
2412 anon_vma = folio_anon_vma(folio);
2413 if (!anon_vma)
2416 if (anon_vma_trylock_read(anon_vma))
2420 anon_vma = NULL;
2425 anon_vma_lock_read(anon_vma);
2427 return anon_vma;
2438 * chains contained in the anon_vma struct it points to.
2443 struct anon_vma *anon_vma;
2448 anon_vma = folio_anon_vma(folio);
2449 /* anon_vma disappear under us? */
2450 VM_BUG_ON_FOLIO(!anon_vma, folio);
2452 anon_vma = rmap_walk_anon_lock(folio, rwc);
2454 if (!anon_vma)
2459 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
2477 anon_vma_unlock_read(anon_vma);
2575 struct anon_vma *anon_vma = vma->anon_vma;
2579 BUG_ON(!anon_vma);