Lines Matching defs:section
64 struct mem_section *section = NULL;
69 section = kzalloc_node(array_size, GFP_KERNEL, nid);
71 section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
73 if (!section)
78 return section;
84 struct mem_section *section;
87 * An existing section is possible in the sub-section hotplug
89 * the existing section.
96 section = sparse_index_alloc(nid);
97 if (!section)
100 mem_section[root] = section;
139 * mem_map, we use section_mem_map to store the section's NUMA
148 static inline int sparse_early_nid(struct mem_section *section)
150 return (section->section_mem_map >> SECTION_NID_SHIFT);
272 unsigned long section = pfn_to_section_nr(pfn);
275 sparse_index_init(section, nid);
276 set_section_nid(section, nid);
278 ms = __nr_to_section(section);
357 * page being freed and making a section unremovable while
359 * a pgdat can prevent a section being removed. If section A
360 * contains a pgdat and section B contains the usemap, both
362 * from the same section as the pgdat where possible to avoid
406 pr_info("node %d must be removed before remove section %ld\n",
412 * Some platforms allow un-removable section because they will just
414 * Just notify un-removable section's number here.
480 * and we want it to be properly aligned to the section size - this is
574 * for each and record the physical to section mapping.
686 "section already deactivated (%#lx + %ld)\n",
753 * When this function is called, the removing section is
755 * from page allocator. If removing section's memmap is placed
756 * on the same section, it must not be freed.
785 * 1. deactivation of a partial hot-added section (only possible in
787 * a) section was present at memory init.
788 * b) section was hot-added post memory init.
789 * 2. deactivation of a complete hot-added section.
790 * 3. deactivation of a complete section from memory init.
813 * Mark the section invalid so that valid_section()
820 * When removing an early section, the usage map is kept (as the
822 * will be re-used when re-adding the section - which is then no
823 * longer an early section. If the usage map is PageReserved, it
872 * referenced. If we hot-add memory into such a section then we
889 * sparse_add_section - add a memory section, or populate an existing one
890 * @nid: The node to add section on
892 * @nr_pages: number of pfns to add in the section
897 * Note that only VMEMMAP supports sub-section aligned hotplug,
932 /* Align memmap to section boundary in the subsection case */
946 * A further optimization is to have per section refcounted