Lines Matching refs:pfn
30 #include <linux/pfn.h>
234 unsigned long i, pfn, end_pfn, nr_pages;
244 pfn = pgdat->node_start_pfn;
248 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
250 * Some platforms can assign the same pfn to multiple nodes - on
251 * node0 as well as nodeN. To avoid registering a pfn against
252 * multiple nodes we check that this pfn does not already
255 if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
256 register_page_bootmem_info_section(pfn);
261 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages,
279 if (!IS_ALIGNED(pfn, min_align)
282 reason, pfn, pfn + nr_pages - 1);
288 static int check_hotplug_memory_addressable(unsigned long pfn,
291 const u64 max_addr = PFN_PHYS(pfn + nr_pages) - 1;
297 (u64)PFN_PHYS(pfn), max_addr, max_allowed);
310 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
313 const unsigned long end_pfn = pfn + nr_pages;
321 err = check_hotplug_memory_addressable(pfn, nr_pages);
329 if (altmap->base_pfn != pfn
337 err = check_pfn_span(pfn, nr_pages, "add");
341 for (; pfn < end_pfn; pfn += cur_nr_pages) {
343 cur_nr_pages = min(end_pfn - pfn,
344 SECTION_ALIGN_UP(pfn + 1) - pfn);
345 err = sparse_add_section(nid, pfn, cur_nr_pages, altmap);
354 /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
375 /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
380 unsigned long pfn;
382 /* pfn is the end pfn of a memory section. */
383 pfn = end_pfn - 1;
384 for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
385 if (unlikely(!pfn_to_online_page(pfn)))
388 if (unlikely(pfn_to_nid(pfn) != nid))
391 if (zone != page_zone(pfn_to_page(pfn)))
394 return pfn;
403 unsigned long pfn;
414 pfn = find_smallest_section_pfn(nid, zone, end_pfn,
416 if (pfn) {
417 zone->spanned_pages = zone_end_pfn(zone) - pfn;
418 zone->zone_start_pfn = pfn;
430 pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
432 if (pfn)
433 zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
477 unsigned long pfn, cur_nr_pages, flags;
480 for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
485 min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn);
486 page_init_poison(pfn_to_page(pfn),
510 static void __remove_section(unsigned long pfn, unsigned long nr_pages,
514 struct mem_section *ms = __pfn_to_section(pfn);
519 sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap);
524 * @pfn: starting pageframe (must be aligned to start of a section)
533 void __remove_pages(unsigned long pfn, unsigned long nr_pages,
536 const unsigned long end_pfn = pfn + nr_pages;
542 if (check_pfn_span(pfn, nr_pages, "remove"))
545 for (; pfn < end_pfn; pfn += cur_nr_pages) {
548 cur_nr_pages = min(end_pfn - pfn,
549 SECTION_ALIGN_UP(pfn + 1) - pfn);
550 __remove_section(pfn, cur_nr_pages, map_offset, altmap);
614 unsigned long pfn;
622 for (pfn = start_pfn; pfn < end_pfn; pfn += MAX_ORDER_NR_PAGES)
623 (*online_page_callback)(pfn_to_page(pfn), MAX_ORDER - 1);
684 * Associate the pfn range with the given zone, initializing the memmaps
715 * expects the zone spans the pfn range. All the pages in the range
725 * Returns a default kernel memory zone for the given pfn range.
726 * If no kernel zone covers this pfn range it will automatically go
781 int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
792 !IS_ALIGNED(pfn | nr_pages, PAGES_PER_SECTION)))
797 /* associate pfn range with the zone */
798 zone = zone_for_pfn_range(online_type, nid, pfn, nr_pages);
799 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE);
801 arg.start_pfn = pfn;
828 online_pages_range(pfn, nr_pages);
841 undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE);
867 (unsigned long long) pfn << PAGE_SHIFT,
868 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
870 remove_pfn_range_from_zone(zone, pfn, nr_pages);
1196 unsigned long pfn, sec_end_pfn;
1200 for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
1201 pfn < end_pfn;
1202 pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
1204 if (!present_section_nr(pfn_to_section_nr(pfn)))
1206 for (; pfn < sec_end_pfn && pfn < end_pfn;
1207 pfn += MAX_ORDER_NR_PAGES) {
1211 !pfn_valid_within(pfn + i))
1213 if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
1216 if (zone && !zone_spans_pfn(zone, pfn + i))
1218 page = pfn_to_page(pfn + i);
1229 * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
1242 unsigned long pfn;
1244 for (pfn = start; pfn < end; pfn++) {
1248 if (!pfn_valid(pfn))
1250 page = pfn_to_page(pfn);
1270 skip = compound_nr(head) - (pfn - page_to_pfn(head));
1271 pfn += skip - 1;
1275 *movable_pfn = pfn;
1282 unsigned long pfn;
1289 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1290 if (!pfn_valid(pfn))
1292 page = pfn_to_page(pfn);
1296 pfn = page_to_pfn(head) + compound_nr(head) - 1;
1300 pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
1335 pr_warn("failed to isolate pfn %lx\n", pfn);
1367 pr_warn("migrating pfn %lx failed ret:%d\n",
1465 unsigned long pfn, system_ram_pages = 0;
1526 pfn = start_pfn;
1537 ret = scan_movable_pages(pfn, end_pfn, &pfn);
1543 do_migrate_range(pfn, end_pfn);