Lines Matching defs:memmap

317  * Decode mem_map from the coded memmap
481 * especially the case for VMEMMAP which maps memmap to PMDs
665 static void free_map_bootmem(struct page *memmap)
667 unsigned long start = (unsigned long)memmap;
668 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
735 static void free_map_bootmem(struct page *memmap)
739 struct page *page = virt_to_page(memmap);
755 * from page allocator. If removing section's memmap is placed
802 struct page *memmap = NULL;
830 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
834 * The memmap of early sections is always fully populated. See
839 else if (memmap)
840 free_map_bootmem(memmap);
851 struct page *memmap;
873 * do not need to populate the memmap and can simply reuse what
879 memmap = populate_section_memmap(pfn, nr_pages, nid, altmap);
880 if (!memmap) {
885 return memmap;
911 struct page *memmap;
918 memmap = section_activate(nid, start_pfn, nr_pages, altmap);
919 if (IS_ERR(memmap))
920 return PTR_ERR(memmap);
926 page_init_poison(memmap, sizeof(struct page) * nr_pages);
932 /* Align memmap to section boundary in the subsection case */
934 memmap = pfn_to_page(section_nr_to_pfn(section_nr));
935 sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
941 static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
947 * num_poisoned_pages. But that would need more space per memmap, so
955 if (PageHWPoison(&memmap[i])) {
957 ClearPageHWPoison(&memmap[i]);
962 static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)