Lines Matching refs:pfn

211 static void subsection_mask_set(unsigned long *map, unsigned long pfn,
214 int idx = subsection_map_index(pfn);
215 int end = subsection_map_index(pfn + nr_pages - 1);
220 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
222 int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
223 unsigned long nr, start_sec = pfn_to_section_nr(pfn);
233 - (pfn & ~PAGE_SECTION_MASK));
235 subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
238 pfns, subsection_map_index(pfn),
239 subsection_map_index(pfn + pfns - 1));
241 pfn += pfns;
246 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
254 unsigned long pfn;
271 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
272 unsigned long section = pfn_to_section_nr(pfn);
302 * Subtle, we encode the real pfn into the mem_map such that
303 * the identity pfn - section_mem_map will return the actual
445 struct page __init *__populate_section_memmap(unsigned long pfn,
539 unsigned long pfn = section_nr_to_pfn(pnum);
544 map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
609 /* Mark all memory sections within the pfn range as online */
612 unsigned long pfn;
614 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
615 unsigned long section_nr = pfn_to_section_nr(pfn);
628 /* Mark all memory sections within the pfn range as offline */
631 unsigned long pfn;
633 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
634 unsigned long section_nr = pfn_to_section_nr(pfn);
651 static struct page * __meminit populate_section_memmap(unsigned long pfn,
654 return __populate_section_memmap(pfn, nr_pages, nid, altmap);
657 static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
660 unsigned long start = (unsigned long) pfn_to_page(pfn);
673 static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
677 struct mem_section *ms = __pfn_to_section(pfn);
681 subsection_mask_set(map, pfn, nr_pages);
687 pfn, nr_pages))
700 static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
702 struct mem_section *ms = __pfn_to_section(pfn);
707 subsection_mask_set(map, pfn, nr_pages);
722 struct page * __meminit populate_section_memmap(unsigned long pfn,
729 static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
732 kvfree(pfn_to_page(pfn));
765 static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
775 static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
797 static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
800 struct mem_section *ms = __pfn_to_section(pfn);
805 if (clear_subsection_map(pfn, nr_pages))
810 unsigned long section_nr = pfn_to_section_nr(pfn);
838 depopulate_section_memmap(pfn, nr_pages, altmap);
846 static struct page * __meminit section_activate(int nid, unsigned long pfn,
849 struct mem_section *ms = __pfn_to_section(pfn);
861 rc = fill_subsection_map(pfn, nr_pages);
877 return pfn_to_page(pfn);
879 memmap = populate_section_memmap(pfn, nr_pages, nid, altmap);
881 section_deactivate(pfn, nr_pages, altmap);
891 * @start_pfn: start pfn of the memory range
967 void sparse_remove_section(struct mem_section *ms, unsigned long pfn,
971 clear_hwpoisoned_pages(pfn_to_page(pfn) + map_offset,
973 section_deactivate(pfn, nr_pages, altmap);