Lines Matching defs:start_pfn
597 unsigned long sp, start_pfn;
601 start_pfn = zone->zone_start_pfn;
610 start_pfn, start_pfn + sp);
1499 unsigned long start_pfn = PFN_DOWN(start);
1502 for (; start_pfn < end_pfn; start_pfn++) {
1503 if (pfn_valid(start_pfn)) {
1504 struct page *page = pfn_to_page(start_pfn);
1506 init_reserved_page(start_pfn);
1580 unsigned long start_pfn, end_pfn;
1586 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
1588 state->last_start = start_pfn;
1622 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1627 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1637 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1646 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1649 start_page = pfn_to_online_page(start_pfn);
1844 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
1847 unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
1848 unsigned long spfn = *start_pfn, epfn = *end_pfn;
1853 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
1856 if (mo_pfn <= *start_pfn)
1860 nr_pages += deferred_init_pages(zone, *start_pfn, t);
1863 *start_pfn = mo_pfn;
1888 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
1895 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
2373 unsigned long start_pfn, unsigned long end_pfn,
2381 for (pfn = start_pfn; pfn <= end_pfn;) {
2417 unsigned long start_pfn, end_pfn, pfn;
2423 start_pfn = pfn & ~(pageblock_nr_pages - 1);
2424 end_pfn = start_pfn + pageblock_nr_pages - 1;
2427 if (!zone_spans_pfn(zone, start_pfn))
2428 start_pfn = pfn;
2432 return move_freepages(zone, start_pfn, end_pfn, migratetype,
6212 unsigned long start_pfn, unsigned long zone_end_pfn,
6216 unsigned long pfn, end_pfn = start_pfn + size;
6234 if (start_pfn == altmap->base_pfn)
6235 start_pfn += altmap->reserve;
6240 for (pfn = start_pfn; pfn < end_pfn; ) {
6272 unsigned long start_pfn,
6276 unsigned long pfn, end_pfn = start_pfn + nr_pages;
6292 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6293 nr_pages = end_pfn - start_pfn;
6296 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
6402 unsigned long start_pfn,
6410 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
6413 if (start_pfn >= end_pfn)
6416 memmap_init_zone(end_pfn - start_pfn, nid, zone_id, start_pfn,
6419 if (*hole_pfn < start_pfn)
6420 init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
6427 unsigned long start_pfn, end_pfn;
6431 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6440 memmap_init_zone_range(zone, start_pfn, end_pfn,
6682 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
6691 unsigned long *start_pfn, unsigned long *end_pfn)
6696 *start_pfn = -1UL;
6700 *start_pfn = min(*start_pfn, this_start_pfn);
6704 if (*start_pfn == -1UL)
6705 *start_pfn = 0;
6811 unsigned long start_pfn, end_pfn;
6814 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6815 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
6817 nr_absent -= end_pfn - start_pfn;
6824 * @start_pfn: The start PFN to start searching for holes
6829 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
6832 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
6864 unsigned long start_pfn, end_pfn;
6868 start_pfn = clamp(memblock_region_memory_base_pfn(r),
6875 nr_absent += end_pfn - start_pfn;
6879 nr_absent += end_pfn - start_pfn;
7237 unsigned long start_pfn = 0;
7243 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
7246 pgdat->node_start_pfn = start_pfn;
7250 (u64)start_pfn << PAGE_SHIFT,
7252 calculate_node_totalpages(pgdat, start_pfn, end_pfn);
7347 unsigned long start_pfn, end_pfn;
7350 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7351 unsigned long pages = end_pfn - start_pfn;
7478 unsigned long start_pfn, end_pfn;
7496 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
7499 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
7500 if (start_pfn >= end_pfn)
7504 if (start_pfn < usable_startpfn) {
7507 - start_pfn;
7526 start_pfn = usable_startpfn;
7531 * start_pfn->end_pfn. Calculate size_pages as the
7534 size_pages = end_pfn - start_pfn;
7537 zone_movable_pfn[nid] = start_pfn + size_pages;
7565 unsigned long start_pfn, end_pfn;
7570 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
7621 unsigned long start_pfn, end_pfn;
7631 start_pfn = find_min_pfn_with_active_regions();
7643 end_pfn = max(max_zone_pfn[zone], start_pfn);
7644 arch_zone_lowest_possible_pfn[zone] = start_pfn;
7647 start_pfn = end_pfn;
7685 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7687 (u64)start_pfn << PAGE_SHIFT,
7689 subsection_map_init(start_pfn, end_pfn - start_pfn);
8791 static int __alloc_contig_pages(unsigned long start_pfn,
8794 unsigned long end_pfn = start_pfn + nr_pages;
8796 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
8800 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
8803 unsigned long i, end_pfn = start_pfn + nr_pages;
8806 for (i = start_pfn; i < end_pfn; i++) {
8827 unsigned long start_pfn, unsigned long nr_pages)
8829 unsigned long last_pfn = start_pfn + nr_pages - 1;
8942 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
8944 unsigned long pfn = start_pfn;