Lines Matching refs:nr_pages

60 	unsigned long nr_pages = PFN_UP(memory_block_memmap_size());
69 return pageblock_align(nr_pages);
70 return nr_pages;
300 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages)
317 if (!IS_ALIGNED(pfn | nr_pages, min_align))
370 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
373 const unsigned long end_pfn = pfn + nr_pages;
381 VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false));
388 || vmem_altmap_offset(altmap) > nr_pages) {
395 if (check_pfn_span(pfn, nr_pages)) {
396 WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1);
530 unsigned long nr_pages)
532 const unsigned long end_pfn = start_pfn + nr_pages;
557 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
566 * @nr_pages: number of pages to remove (must be multiple of section size)
574 void __remove_pages(unsigned long pfn, unsigned long nr_pages,
577 const unsigned long end_pfn = pfn + nr_pages;
580 if (check_pfn_span(pfn, nr_pages)) {
581 WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1);
645 static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
647 const unsigned long end_pfn = start_pfn + nr_pages;
682 static void node_states_check_changes_online(unsigned long nr_pages,
706 unsigned long nr_pages)
713 zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn;
717 unsigned long nr_pages)
724 pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
751 unsigned long nr_pages,
760 init_currently_empty_zone(zone, start_pfn, nr_pages);
761 resize_zone_range(zone, start_pfn, nr_pages);
762 resize_pgdat_range(pgdat, start_pfn, nr_pages);
773 if (!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION))
774 section_taint_zone_device(start_pfn + nr_pages);
783 memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0,
844 unsigned long nr_pages)
888 movable_pages += nr_pages;
898 unsigned long nr_pages)
906 if (zone_intersects(zone, start_pfn, nr_pages))
966 unsigned long nr_pages)
981 } else if (!group || group->d.unit_pages == nr_pages) {
982 max_pages = nr_pages;
1009 nr_pages = max_pages - online_pages;
1010 if (!auto_movable_can_online_movable(NUMA_NO_NODE, group, nr_pages))
1015 !auto_movable_can_online_movable(nid, group, nr_pages))
1021 return default_kernel_zone_for_pfn(nid, pfn, nr_pages);
1025 unsigned long nr_pages)
1028 nr_pages);
1030 bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages);
1031 bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages);
1050 unsigned long nr_pages)
1053 return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages);
1059 return auto_movable_zone_for_pfn(nid, group, start_pfn, nr_pages);
1061 return default_zone_for_pfn(nid, start_pfn, nr_pages);
1069 long nr_pages)
1079 zone->present_early_pages += nr_pages;
1080 zone->present_pages += nr_pages;
1081 zone->zone_pgdat->node_present_pages += nr_pages;
1084 group->present_movable_pages += nr_pages;
1086 group->present_kernel_pages += nr_pages;
1089 int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
1092 unsigned long end_pfn = pfn + nr_pages;
1095 ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
1099 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE);
1101 for (i = 0; i < nr_pages; i++)
1109 if (nr_pages >= PAGES_PER_SECTION)
1115 void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages)
1117 unsigned long end_pfn = pfn + nr_pages;
1124 if (nr_pages >= PAGES_PER_SECTION)
1131 remove_pfn_range_from_zone(page_zone(pfn_to_page(pfn)), pfn, nr_pages);
1132 kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
1138 int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
1154 if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(pfn) ||
1155 !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION)))
1160 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE);
1163 arg.nr_pages = nr_pages;
1164 node_states_check_changes_online(nr_pages, zone, &arg);
1176 zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages;
1189 online_pages_range(pfn, nr_pages);
1190 adjust_present_page_count(pfn_to_page(pfn), group, nr_pages);
1197 undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE);
1224 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
1226 remove_pfn_range_from_zone(zone, pfn, nr_pages);
1816 static void node_states_check_changes_offline(unsigned long nr_pages,
1836 if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages)
1843 * If after having accounted all the pages, we see that the nr_pages
1850 if (nr_pages >= present_pages)
1864 unsigned long nr_pages, void *data)
1868 *nr_system_ram_pages += nr_pages;
1875 int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
1878 const unsigned long end_pfn = start_pfn + nr_pages;
1893 if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(start_pfn) ||
1894 !IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)))
1905 walk_system_ram_range(start_pfn, nr_pages, &system_ram_pages,
1907 if (system_ram_pages != nr_pages) {
1943 arg.nr_pages = nr_pages;
1944 node_states_check_changes_offline(nr_pages, zone, &arg);
2001 pr_debug("Offlined Pages %ld\n", nr_pages);
2009 zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages;
2016 adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
2017 adjust_present_page_count(pfn_to_page(start_pfn), group, -nr_pages);
2039 remove_pfn_range_from_zone(zone, start_pfn, nr_pages);