Lines Matching defs:rmap_item
154 * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN
187 * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
190 * @mm: the memory structure this rmap_item is pointing into
191 * @address: the virtual address this rmap_item tracks (+ flags in low bits)
193 * @node: rb node of this rmap_item in the unstable tree
385 struct ksm_rmap_item *rmap_item;
387 rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
389 if (rmap_item)
391 return rmap_item;
394 static inline void free_rmap_item(struct ksm_rmap_item *rmap_item)
397 rmap_item->mm->ksm_rmap_items--;
398 rmap_item->mm = NULL; /* debug safety */
399 kmem_cache_free(rmap_item_cache, rmap_item);
567 static void break_cow(struct ksm_rmap_item *rmap_item)
569 struct mm_struct *mm = rmap_item->mm;
570 unsigned long addr = rmap_item->address;
577 put_anon_vma(rmap_item->anon_vma);
586 static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item)
588 struct mm_struct *mm = rmap_item->mm;
589 unsigned long addr = rmap_item->address;
670 struct ksm_rmap_item *rmap_item;
675 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
676 if (rmap_item->hlist.next) {
678 trace_ksm_remove_rmap_item(stable_node->kpfn, rmap_item, rmap_item->mm);
683 rmap_item->mm->ksm_merging_pages--;
687 put_anon_vma(rmap_item->anon_vma);
688 rmap_item->address &= PAGE_MASK;
811 * Removing rmap_item from stable or unstable tree.
814 static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item)
816 if (rmap_item->address & STABLE_FLAG) {
820 stable_node = rmap_item->head;
825 hlist_del(&rmap_item->hlist);
834 rmap_item->mm->ksm_merging_pages--;
839 put_anon_vma(rmap_item->anon_vma);
840 rmap_item->head = NULL;
841 rmap_item->address &= PAGE_MASK;
843 } else if (rmap_item->address & UNSTABLE_FLAG) {
849 * if this rmap_item was inserted by this scan, rather
852 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
855 rb_erase(&rmap_item->node,
856 root_unstable_tree + NUMA(rmap_item->nid));
858 rmap_item->address &= PAGE_MASK;
867 struct ksm_rmap_item *rmap_item = *rmap_list;
868 *rmap_list = rmap_item->rmap_list;
869 remove_rmap_item_from_tree(rmap_item);
870 free_rmap_item(rmap_item);
877 * that - an rmap_item is assigned to the stable tree after inserting ksm
1354 static int try_to_merge_with_ksm_page(struct ksm_rmap_item *rmap_item,
1357 struct mm_struct *mm = rmap_item->mm;
1362 vma = find_mergeable_vma(mm, rmap_item->address);
1371 remove_rmap_item_from_tree(rmap_item);
1374 rmap_item->anon_vma = vma->anon_vma;
1379 rmap_item, mm, err);
1393 static struct page *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item,
1400 err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
1409 break_cow(rmap_item);
1988 * else insert rmap_item into the unstable tree.
1992 * tree, we insert rmap_item as a new object into the unstable tree.
1994 * This function returns pointer to rmap_item found to be identical
2001 struct ksm_rmap_item *unstable_tree_search_insert(struct ksm_rmap_item *rmap_item,
2057 rmap_item->address |= UNSTABLE_FLAG;
2058 rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
2059 DO_NUMA(rmap_item->nid = nid);
2060 rb_link_node(&rmap_item->node, parent, new);
2061 rb_insert_color(&rmap_item->node, root);
2068 * stable_tree_append - add another rmap_item to the linked list of
2072 static void stable_tree_append(struct ksm_rmap_item *rmap_item,
2078 * rmap_item in the right stable_node
2094 rmap_item->head = stable_node;
2095 rmap_item->address |= STABLE_FLAG;
2096 hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
2098 if (rmap_item->hlist.next)
2103 rmap_item->mm->ksm_merging_pages++;
2113 * @rmap_item: the reverse mapping into the virtual address of this page
2115 static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item)
2117 struct mm_struct *mm = rmap_item->mm;
2136 rmap_item->head == stable_node)
2148 if (kpage == page && rmap_item->head == stable_node) {
2153 remove_rmap_item_from_tree(rmap_item);
2159 err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
2163 * add its rmap_item to the stable tree.
2166 stable_tree_append(rmap_item, page_stable_node(kpage),
2181 if (rmap_item->oldchecksum != checksum) {
2182 rmap_item->oldchecksum = checksum;
2194 vma = find_mergeable_vma(mm, rmap_item->address);
2197 ZERO_PAGE(rmap_item->address));
2199 page_to_pfn(ZERO_PAGE(rmap_item->address)),
2200 rmap_item, mm, err);
2217 unstable_tree_search_insert(rmap_item, page, &tree_page);
2221 kpage = try_to_merge_two_pages(rmap_item, page,
2246 stable_tree_append(rmap_item, stable_node,
2259 break_cow(rmap_item);
2283 struct ksm_rmap_item *rmap_item;
2286 rmap_item = *rmap_list;
2287 if ((rmap_item->address & PAGE_MASK) == addr)
2288 return rmap_item;
2289 if (rmap_item->address > addr)
2291 *rmap_list = rmap_item->rmap_list;
2292 remove_rmap_item_from_tree(rmap_item);
2293 free_rmap_item(rmap_item);
2296 rmap_item = alloc_rmap_item();
2297 if (rmap_item) {
2299 rmap_item->mm = mm_slot->slot.mm;
2300 rmap_item->mm->ksm_rmap_items++;
2301 rmap_item->address = addr;
2302 rmap_item->rmap_list = *rmap_list;
2303 *rmap_list = rmap_item;
2305 return rmap_item;
2314 struct ksm_rmap_item *rmap_item;
2407 rmap_item = get_next_rmap_item(mm_slot,
2409 if (rmap_item) {
2411 &rmap_item->rmap_list;
2416 return rmap_item;
2487 struct ksm_rmap_item *rmap_item;
2493 rmap_item = scan_get_next_rmap_item(&page);
2494 if (!rmap_item)
2496 cmp_and_merge_page(page, rmap_item);
2841 struct ksm_rmap_item *rmap_item;
2856 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
2857 struct anon_vma *anon_vma = rmap_item->anon_vma;
2877 addr = rmap_item->address & PAGE_MASK;
2883 * rmap_item; but later, if there is still work to do,
2887 if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
2916 struct ksm_rmap_item *rmap_item;
2924 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
2925 struct anon_vma *av = rmap_item->anon_vma;
2941 addr = rmap_item->address & PAGE_MASK;