Lines Matching refs:stable_node
85 * using the same struct stable_node structure.
145 * struct stable_node - node of the stable rbtree
148 * @hlist_dup: linked into the stable_node->hlist with a stable_node chain
156 struct stable_node {
193 * @head: pointer to stable_node heading this list in the stable tree
194 * @hlist: link into hlist of rmap_items hanging off that stable_node
210 struct stable_node *head;
258 /* The number of stable_node chains */
261 /* The number of stable_node dups linked to the stable_node chains */
313 stable_node_cache = KSM_KMEM_CACHE(stable_node, 0);
339 static __always_inline bool is_stable_node_chain(struct stable_node *chain)
344 static __always_inline bool is_stable_node_dup(struct stable_node *dup)
349 static inline void stable_node_chain_add_dup(struct stable_node *dup,
350 struct stable_node *chain)
359 static inline void __stable_node_dup_del(struct stable_node *dup)
366 static inline void stable_node_dup_del(struct stable_node *dup)
396 static inline struct stable_node *alloc_stable_node(void)
406 static inline void free_stable_node(struct stable_node *stable_node)
408 VM_BUG_ON(stable_node->rmap_hlist_len &&
409 !is_stable_node_chain(stable_node));
410 kmem_cache_free(stable_node_cache, stable_node);
592 static struct stable_node *alloc_stable_node_chain(struct stable_node *dup,
595 struct stable_node *chain = alloc_stable_node();
625 static inline void free_stable_node_chain(struct stable_node *chain,
633 static void remove_node_from_stable_tree(struct stable_node *stable_node)
638 BUG_ON(stable_node->rmap_hlist_len < 0);
640 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
645 VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
646 stable_node->rmap_hlist_len--;
664 if (stable_node->head == &migrate_nodes)
665 list_del(&stable_node->list);
667 stable_node_dup_del(stable_node);
668 free_stable_node(stable_node);
685 * You would expect the stable_node to hold a reference to the ksm page.
696 static struct page *get_ksm_page(struct stable_node *stable_node,
703 expected_mapping = (void *)((unsigned long)stable_node |
706 kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
765 if (READ_ONCE(stable_node->kpfn) != kpfn)
767 remove_node_from_stable_tree(stable_node);
778 struct stable_node *stable_node;
781 stable_node = rmap_item->head;
782 page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
790 if (!hlist_empty(&stable_node->hlist))
794 VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
795 stable_node->rmap_hlist_len--;
863 static inline struct stable_node *page_stable_node(struct page *page)
869 struct stable_node *stable_node)
871 page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
878 static int remove_stable_node(struct stable_node *stable_node)
883 page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
907 remove_node_from_stable_tree(stable_node);
916 static int remove_stable_node_chain(struct stable_node *stable_node,
919 struct stable_node *dup;
922 if (!is_stable_node_chain(stable_node)) {
923 VM_BUG_ON(is_stable_node_dup(stable_node));
924 if (remove_stable_node(stable_node))
931 &stable_node->hlist, hlist_dup) {
936 BUG_ON(!hlist_empty(&stable_node->hlist));
937 free_stable_node_chain(stable_node, root);
943 struct stable_node *stable_node, *next;
949 stable_node = rb_entry(root_stable_tree[nid].rb_node,
950 struct stable_node, node);
951 if (remove_stable_node_chain(stable_node,
959 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
960 if (remove_stable_node(stable_node))
1244 * PageAnon+anon_vma to PageKsm+NULL stable_node:
1245 * stable_tree_insert() will update stable_node.
1341 bool __is_page_sharing_candidate(struct stable_node *stable_node, int offset)
1343 VM_BUG_ON(stable_node->rmap_hlist_len < 0);
1347 * stable_node, as the underlying tree_page of the other
1350 return stable_node->rmap_hlist_len &&
1351 stable_node->rmap_hlist_len + offset < ksm_max_page_sharing;
1355 bool is_page_sharing_candidate(struct stable_node *stable_node)
1357 return __is_page_sharing_candidate(stable_node, 0);
1360 static struct page *stable_node_dup(struct stable_node **_stable_node_dup,
1361 struct stable_node **_stable_node,
1365 struct stable_node *dup, *found = NULL, *stable_node = *_stable_node;
1372 time_before(jiffies, stable_node->chain_prune_time +
1377 stable_node->chain_prune_time = jiffies;
1380 &stable_node->hlist, hlist_dup) {
1387 * stable_node->hlist if they point to freed pages
1389 * stable_node parameter itself will be freed from
1428 BUG_ON(stable_node->hlist.first->next);
1434 rb_replace_node(&stable_node->node, &found->node,
1436 free_stable_node(stable_node);
1440 * NOTE: the caller depends on the stable_node
1446 * Just for robustneess as stable_node is
1451 stable_node = NULL;
1452 } else if (stable_node->hlist.first != &found->hlist_dup &&
1455 * If the found stable_node dup can accept one
1471 &stable_node->hlist);
1479 static struct stable_node *stable_node_dup_any(struct stable_node *stable_node,
1482 if (!is_stable_node_chain(stable_node))
1483 return stable_node;
1484 if (hlist_empty(&stable_node->hlist)) {
1485 free_stable_node_chain(stable_node, root);
1488 return hlist_entry(stable_node->hlist.first,
1489 typeof(*stable_node), hlist_dup);
1506 static struct page *__stable_node_chain(struct stable_node **_stable_node_dup,
1507 struct stable_node **_stable_node,
1511 struct stable_node *stable_node = *_stable_node;
1512 if (!is_stable_node_chain(stable_node)) {
1513 if (is_page_sharing_candidate(stable_node)) {
1514 *_stable_node_dup = stable_node;
1515 return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK);
1518 * _stable_node_dup set to NULL means the stable_node
1528 static __always_inline struct page *chain_prune(struct stable_node **s_n_d,
1529 struct stable_node **s_n,
1535 static __always_inline struct page *chain(struct stable_node **s_n_d,
1536 struct stable_node *s_n,
1539 struct stable_node *old_stable_node = s_n;
1563 struct stable_node *stable_node, *stable_node_dup, *stable_node_any;
1564 struct stable_node *page_node;
1584 stable_node = rb_entry(*new, struct stable_node, node);
1586 tree_page = chain_prune(&stable_node_dup, &stable_node, root);
1588 * NOTE: stable_node may have been freed by
1591 * the rbtree instead as a regular stable_node (in
1592 * order to collapse the stable_node chain if a single
1593 * stable_node dup was found in it). In such case the
1594 * stable_node is overwritten by the calleee to point
1596 * stable rbtree and stable_node will be equal to
1601 * Either all stable_node dups were full in
1602 * this stable_node chain, or this chain was
1605 stable_node_any = stable_node_dup_any(stable_node,
1612 * Take any of the stable_node dups page of
1613 * this stable_node chain to let the tree walk
1615 * stable_node dups in a stable_node chain
1626 * If we walked over a stale stable_node,
1632 * stable_node was stale.
1660 * If the stable_node is a chain and
1664 * stable_node dups because they're
1669 * the dups of this stable_node.
1675 * Lock and unlock the stable_node's page (which
1678 * It would be more elegant to return stable_node
1720 * If stable_node was a chain and chain_prune collapsed it,
1721 * stable_node has been updated to be the new regular
1722 * stable_node. A collapse of the chain is indistinguishable
1724 * rbtree. Otherwise stable_node is the chain and
1727 if (stable_node_dup == stable_node) {
1747 VM_BUG_ON(!is_stable_node_chain(stable_node));
1753 stable_node_chain_add_dup(page_node, stable_node);
1771 * If stable_node was a chain and chain_prune collapsed it,
1772 * stable_node has been updated to be the new regular
1773 * stable_node. A collapse of the chain is indistinguishable
1775 * rbtree. Otherwise stable_node is the chain and
1778 if (stable_node_dup == stable_node) {
1782 stable_node = alloc_stable_node_chain(stable_node_dup,
1784 if (!stable_node)
1788 * Add this stable_node dup that was
1789 * migrated to the stable_node chain
1793 VM_BUG_ON(!is_stable_node_chain(stable_node));
1798 stable_node_chain_add_dup(page_node, stable_node);
1809 static struct stable_node *stable_tree_insert(struct page *kpage)
1816 struct stable_node *stable_node, *stable_node_dup, *stable_node_any;
1831 stable_node = rb_entry(*new, struct stable_node, node);
1833 tree_page = chain(&stable_node_dup, stable_node, root);
1836 * Either all stable_node dups were full in
1837 * this stable_node chain, or this chain was
1840 stable_node_any = stable_node_dup_any(stable_node,
1847 * Take any of the stable_node dups page of
1848 * this stable_node chain to let the tree walk
1850 * stable_node dups in a stable_node chain
1861 * If we walked over a stale stable_node,
1867 * stable_node was stale.
1899 if (!is_stable_node_chain(stable_node)) {
1900 struct stable_node *orig = stable_node;
1902 stable_node = alloc_stable_node_chain(orig, root);
1903 if (!stable_node) {
1908 stable_node_chain_add_dup(stable_node_dup, stable_node);
2001 struct stable_node *stable_node,
2006 * rmap_item in the right stable_node
2012 * would be sign of memory corruption in the stable_node.
2014 BUG_ON(stable_node->rmap_hlist_len < 0);
2016 stable_node->rmap_hlist_len++;
2019 WARN_ON_ONCE(stable_node->rmap_hlist_len >
2022 rmap_item->head = stable_node;
2024 hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
2046 struct stable_node *stable_node;
2052 stable_node = page_stable_node(page);
2053 if (stable_node) {
2054 if (stable_node->head != &migrate_nodes &&
2055 get_kpfn_nid(READ_ONCE(stable_node->kpfn)) !=
2056 NUMA(stable_node->nid)) {
2057 stable_node_dup_del(stable_node);
2058 stable_node->head = &migrate_nodes;
2059 list_add(&stable_node->list, stable_node->head);
2061 if (stable_node->head != &migrate_nodes &&
2062 rmap_item->head == stable_node)
2068 if (!is_page_sharing_candidate(stable_node))
2074 if (kpage == page && rmap_item->head == stable_node) {
2165 stable_node = stable_tree_insert(kpage);
2166 if (stable_node) {
2167 stable_tree_append(tree_rmap_item, stable_node,
2169 stable_tree_append(rmap_item, stable_node,
2180 if (!stable_node) {
2262 struct stable_node *stable_node, *next;
2265 list_for_each_entry_safe(stable_node, next,
2267 page = get_ksm_page(stable_node,
2611 struct stable_node *stable_node;
2623 stable_node = page_stable_node(page);
2624 if (!stable_node)
2627 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
2676 struct stable_node *stable_node;
2682 stable_node = page_stable_node(newpage);
2683 if (stable_node) {
2684 VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage);
2685 stable_node->kpfn = page_to_pfn(newpage);
2688 * to make sure that the new stable_node->kpfn is visible
2709 static bool stable_node_dup_remove_range(struct stable_node *stable_node,
2713 if (stable_node->kpfn >= start_pfn &&
2714 stable_node->kpfn < end_pfn) {
2719 remove_node_from_stable_tree(stable_node);
2725 static bool stable_node_chain_remove_range(struct stable_node *stable_node,
2730 struct stable_node *dup;
2733 if (!is_stable_node_chain(stable_node)) {
2734 VM_BUG_ON(is_stable_node_dup(stable_node));
2735 return stable_node_dup_remove_range(stable_node, start_pfn,
2740 &stable_node->hlist, hlist_dup) {
2744 if (hlist_empty(&stable_node->hlist)) {
2745 free_stable_node_chain(stable_node, root);
2754 struct stable_node *stable_node, *next;
2761 stable_node = rb_entry(node, struct stable_node, node);
2762 if (stable_node_chain_remove_range(stable_node,
2772 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
2773 if (stable_node->kpfn >= start_pfn &&
2774 stable_node->kpfn < end_pfn)
2775 remove_node_from_stable_tree(stable_node);