Lines Matching defs:stable_node

149  * @hlist_dup: linked into the stable_node->hlist with a stable_node chain
194 * @head: pointer to stable_node heading this list in the stable tree
195 * @hlist: link into hlist of rmap_items hanging off that stable_node
260 /* The number of stable_node chains */
263 /* The number of stable_node dups linked to the stable_node chains */
412 static inline void free_stable_node(struct ksm_stable_node *stable_node)
414 VM_BUG_ON(stable_node->rmap_hlist_len &&
415 !is_stable_node_chain(stable_node));
416 kmem_cache_free(stable_node_cache, stable_node);
668 static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node)
673 BUG_ON(stable_node->rmap_hlist_len < 0);
675 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
678 trace_ksm_remove_rmap_item(stable_node->kpfn, rmap_item, rmap_item->mm);
685 VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
686 stable_node->rmap_hlist_len--;
702 trace_ksm_remove_ksm_page(stable_node->kpfn);
703 if (stable_node->head == &migrate_nodes)
704 list_del(&stable_node->list);
706 stable_node_dup_del(stable_node);
707 free_stable_node(stable_node);
724 * You would expect the stable_node to hold a reference to the ksm page.
735 static struct page *get_ksm_page(struct ksm_stable_node *stable_node,
742 expected_mapping = (void *)((unsigned long)stable_node |
745 kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
804 if (READ_ONCE(stable_node->kpfn) != kpfn)
806 remove_node_from_stable_tree(stable_node);
817 struct ksm_stable_node *stable_node;
820 stable_node = rmap_item->head;
821 page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
829 if (!hlist_empty(&stable_node->hlist))
836 VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
837 stable_node->rmap_hlist_len--;
915 struct ksm_stable_node *stable_node)
918 page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
925 static int remove_stable_node(struct ksm_stable_node *stable_node)
930 page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
954 remove_node_from_stable_tree(stable_node);
963 static int remove_stable_node_chain(struct ksm_stable_node *stable_node,
969 if (!is_stable_node_chain(stable_node)) {
970 VM_BUG_ON(is_stable_node_dup(stable_node));
971 if (remove_stable_node(stable_node))
978 &stable_node->hlist, hlist_dup) {
983 BUG_ON(!hlist_empty(&stable_node->hlist));
984 free_stable_node_chain(stable_node, root);
990 struct ksm_stable_node *stable_node, *next;
996 stable_node = rb_entry(root_stable_tree[nid].rb_node,
998 if (remove_stable_node_chain(stable_node,
1006 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
1007 if (remove_stable_node(stable_node))
1326 * PageAnon+anon_vma to PageKsm+NULL stable_node:
1327 * stable_tree_insert() will update stable_node.
1415 bool __is_page_sharing_candidate(struct ksm_stable_node *stable_node, int offset)
1417 VM_BUG_ON(stable_node->rmap_hlist_len < 0);
1421 * stable_node, as the underlying tree_page of the other
1424 return stable_node->rmap_hlist_len &&
1425 stable_node->rmap_hlist_len + offset < ksm_max_page_sharing;
1429 bool is_page_sharing_candidate(struct ksm_stable_node *stable_node)
1431 return __is_page_sharing_candidate(stable_node, 0);
1439 struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node;
1446 time_before(jiffies, stable_node->chain_prune_time +
1451 stable_node->chain_prune_time = jiffies;
1454 &stable_node->hlist, hlist_dup) {
1461 * stable_node->hlist if they point to freed pages
1463 * stable_node parameter itself will be freed from
1502 BUG_ON(stable_node->hlist.first->next);
1508 rb_replace_node(&stable_node->node, &found->node,
1510 free_stable_node(stable_node);
1514 * NOTE: the caller depends on the stable_node
1520 * Just for robustness, as stable_node is
1525 stable_node = NULL;
1526 } else if (stable_node->hlist.first != &found->hlist_dup &&
1529 * If the found stable_node dup can accept one
1545 &stable_node->hlist);
1553 static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stable_node,
1556 if (!is_stable_node_chain(stable_node))
1557 return stable_node;
1558 if (hlist_empty(&stable_node->hlist)) {
1559 free_stable_node_chain(stable_node, root);
1562 return hlist_entry(stable_node->hlist.first,
1563 typeof(*stable_node), hlist_dup);
1585 struct ksm_stable_node *stable_node = *_stable_node;
1586 if (!is_stable_node_chain(stable_node)) {
1587 if (is_page_sharing_candidate(stable_node)) {
1588 *_stable_node_dup = stable_node;
1589 return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK);
1592 * _stable_node_dup set to NULL means the stable_node
1637 struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any;
1658 stable_node = rb_entry(*new, struct ksm_stable_node, node);
1660 tree_page = chain_prune(&stable_node_dup, &stable_node, root);
1662 * NOTE: stable_node may have been freed by
1665 * the rbtree instead as a regular stable_node (in
1666 * order to collapse the stable_node chain if a single
1667 * stable_node dup was found in it). In such case the
1668 * stable_node is overwritten by the callee to point
1670 * stable rbtree and stable_node will be equal to
1675 * Either all stable_node dups were full in
1676 * this stable_node chain, or this chain was
1679 stable_node_any = stable_node_dup_any(stable_node,
1686 * Take any of the stable_node dups page of
1687 * this stable_node chain to let the tree walk
1689 * stable_node dups in a stable_node chain
1700 * If we walked over a stale stable_node,
1706 * stable_node was stale.
1734 * If the stable_node is a chain and
1738 * stable_node dups because they're
1743 * the dups of this stable_node.
1749 * Lock and unlock the stable_node's page (which
1752 * It would be more elegant to return stable_node
1794 * If stable_node was a chain and chain_prune collapsed it,
1795 * stable_node has been updated to be the new regular
1796 * stable_node. A collapse of the chain is indistinguishable
1798 * rbtree. Otherwise stable_node is the chain and
1801 if (stable_node_dup == stable_node) {
1821 VM_BUG_ON(!is_stable_node_chain(stable_node));
1827 stable_node_chain_add_dup(page_node, stable_node);
1845 * If stable_node was a chain and chain_prune collapsed it,
1846 * stable_node has been updated to be the new regular
1847 * stable_node. A collapse of the chain is indistinguishable
1849 * rbtree. Otherwise stable_node is the chain and
1852 if (stable_node_dup == stable_node) {
1855 stable_node = alloc_stable_node_chain(stable_node_dup,
1857 if (!stable_node)
1861 * Add this stable_node dup that was
1862 * migrated to the stable_node chain
1870 stable_node_chain_add_dup(page_node, stable_node);
1888 struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any;
1903 stable_node = rb_entry(*new, struct ksm_stable_node, node);
1905 tree_page = chain(&stable_node_dup, stable_node, root);
1908 * Either all stable_node dups were full in
1909 * this stable_node chain, or this chain was
1912 stable_node_any = stable_node_dup_any(stable_node,
1919 * Take any of the stable_node dups page of
1920 * this stable_node chain to let the tree walk
1922 * stable_node dups in a stable_node chain
1933 * If we walked over a stale stable_node,
1939 * stable_node was stale.
1971 if (!is_stable_node_chain(stable_node)) {
1972 struct ksm_stable_node *orig = stable_node;
1974 stable_node = alloc_stable_node_chain(orig, root);
1975 if (!stable_node) {
1980 stable_node_chain_add_dup(stable_node_dup, stable_node);
2073 struct ksm_stable_node *stable_node,
2078 * rmap_item in the right stable_node
2084 * would be sign of memory corruption in the stable_node.
2086 BUG_ON(stable_node->rmap_hlist_len < 0);
2088 stable_node->rmap_hlist_len++;
2091 WARN_ON_ONCE(stable_node->rmap_hlist_len >
2094 rmap_item->head = stable_node;
2096 hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
2120 struct ksm_stable_node *stable_node;
2126 stable_node = page_stable_node(page);
2127 if (stable_node) {
2128 if (stable_node->head != &migrate_nodes &&
2129 get_kpfn_nid(READ_ONCE(stable_node->kpfn)) !=
2130 NUMA(stable_node->nid)) {
2131 stable_node_dup_del(stable_node);
2132 stable_node->head = &migrate_nodes;
2133 list_add(&stable_node->list, stable_node->head);
2135 if (stable_node->head != &migrate_nodes &&
2136 rmap_item->head == stable_node)
2142 if (!is_page_sharing_candidate(stable_node))
2148 if (kpage == page && rmap_item->head == stable_node) {
2242 stable_node = stable_tree_insert(kpage);
2243 if (stable_node) {
2244 stable_tree_append(tree_rmap_item, stable_node,
2246 stable_tree_append(rmap_item, stable_node,
2257 if (!stable_node) {
2344 struct ksm_stable_node *stable_node, *next;
2347 list_for_each_entry_safe(stable_node, next,
2349 page = get_ksm_page(stable_node,
2840 struct ksm_stable_node *stable_node;
2852 stable_node = folio_stable_node(folio);
2853 if (!stable_node)
2856 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
2915 struct ksm_stable_node *stable_node;
2921 stable_node = folio_stable_node(folio);
2922 if (!stable_node)
2924 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
2956 struct ksm_stable_node *stable_node;
2962 stable_node = folio_stable_node(folio);
2963 if (stable_node) {
2964 VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio);
2965 stable_node->kpfn = folio_pfn(newfolio);
2968 * to make sure that the new stable_node->kpfn is visible
2989 static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node,
2993 if (stable_node->kpfn >= start_pfn &&
2994 stable_node->kpfn < end_pfn) {
2999 remove_node_from_stable_tree(stable_node);
3005 static bool stable_node_chain_remove_range(struct ksm_stable_node *stable_node,
3013 if (!is_stable_node_chain(stable_node)) {
3014 VM_BUG_ON(is_stable_node_dup(stable_node));
3015 return stable_node_dup_remove_range(stable_node, start_pfn,
3020 &stable_node->hlist, hlist_dup) {
3024 if (hlist_empty(&stable_node->hlist)) {
3025 free_stable_node_chain(stable_node, root);
3034 struct ksm_stable_node *stable_node, *next;
3041 stable_node = rb_entry(node, struct ksm_stable_node, node);
3042 if (stable_node_chain_remove_range(stable_node,
3052 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
3053 if (stable_node->kpfn >= start_pfn &&
3054 stable_node->kpfn < end_pfn)
3055 remove_node_from_stable_tree(stable_node);