Lines Matching defs:memmap
290 * Decode mem_map from the coded memmap
464 * especially the case for VMEMMAP which maps memmap to PMDs
646 static void free_map_bootmem(struct page *memmap)
648 unsigned long start = (unsigned long)memmap;
649 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
717 static void free_map_bootmem(struct page *memmap)
721 struct page *page = virt_to_page(memmap);
737 * from page allocator. If removing section's memmap is placed
784 struct page *memmap = NULL;
812 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
816 * The memmap of early sections is always fully populated. See
821 else if (memmap)
822 free_map_bootmem(memmap);
834 struct page *memmap;
856 * do not need to populate the memmap and can simply reuse what
862 memmap = populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap);
863 if (!memmap) {
868 return memmap;
876 * @altmap: alternate pfns to allocate the memmap backing store
896 struct page *memmap;
903 memmap = section_activate(nid, start_pfn, nr_pages, altmap, pgmap);
904 if (IS_ERR(memmap))
905 return PTR_ERR(memmap);
911 page_init_poison(memmap, sizeof(struct page) * nr_pages);
917 /* Align memmap to section boundary in the subsection case */
919 memmap = pfn_to_page(section_nr_to_pfn(section_nr));
920 sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);