Lines Matching refs:usage
235 subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
329 struct mem_section_usage *usage, unsigned long flags)
334 ms->usage = usage;
352 struct mem_section_usage *usage;
369 usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid);
370 if (!usage && limit) {
374 return usage;
378 struct mem_section_usage *usage)
392 usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT);
428 struct mem_section_usage *usage)
527 struct mem_section_usage *usage;
531 usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid),
533 if (!usage) {
553 check_usemap_section_nr(nid, usage);
554 sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage,
556 usage = (void *) usage + mem_section_usage_size();
678 unsigned long *subsection_map = ms->usage
679 ? &ms->usage->subsection_map[0] : NULL;
696 return bitmap_empty(&ms->usage->subsection_map[0],
709 subsection_map = &ms->usage->subsection_map[0];
793 * usage map, but still need to free the vmemmap range.
815 * ms->usage array.
820 * When removing an early section, the usage map is kept (as the
821 * usage maps of other sections fall into the same page). It
823 * longer an early section. If the usage map is PageReserved, it
826 if (!PageReserved(virt_to_page(ms->usage))) {
827 kfree_rcu(ms->usage, rcu);
828 WRITE_ONCE(ms->usage, NULL);
850 struct mem_section_usage *usage = NULL;
854 if (!ms->usage) {
855 usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
856 if (!usage)
858 ms->usage = usage;
863 if (usage)
864 ms->usage = NULL;
865 kfree(usage);
935 sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);