/kernel/linux/linux-6.6/arch/x86/mm/ |
H A D | highmem_32.c | 19 unsigned long zone_start_pfn, zone_end_pfn; in set_highmem_pages_init() local 24 zone_start_pfn = zone->zone_start_pfn; in set_highmem_pages_init() 25 zone_end_pfn = zone_start_pfn + zone->spanned_pages; in set_highmem_pages_init() 29 zone->name, nid, zone_start_pfn, zone_end_pfn); in set_highmem_pages_init() 31 add_highpages_with_active_regions(nid, zone_start_pfn, in set_highmem_pages_init()
|
/kernel/linux/linux-5.10/arch/x86/mm/ |
H A D | highmem_32.c | 77 unsigned long zone_start_pfn, zone_end_pfn; in set_highmem_pages_init() local 82 zone_start_pfn = zone->zone_start_pfn; in set_highmem_pages_init() 83 zone_end_pfn = zone_start_pfn + zone->spanned_pages; in set_highmem_pages_init() 87 zone->name, nid, zone_start_pfn, zone_end_pfn); in set_highmem_pages_init() 89 add_highpages_with_active_regions(nid, zone_start_pfn, in set_highmem_pages_init()
|
/kernel/linux/linux-6.6/mm/ |
H A D | mm_init.c | 914 unsigned long zone_start_pfn = zone->zone_start_pfn; in memmap_init_zone_range() local 915 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; in memmap_init_zone_range() 918 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn); in memmap_init_zone_range() 919 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn); in memmap_init_zone_range() 1119 unsigned long *zone_start_pfn, in adjust_zone_range_for_zone_movable() 1126 *zone_start_pfn = zone_movable_pfn[nid]; in adjust_zone_range_for_zone_movable() 1132 *zone_start_pfn < zone_movable_pfn[nid] && in adjust_zone_range_for_zone_movable() 1137 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) in adjust_zone_range_for_zone_movable() 1138 *zone_start_pfn in adjust_zone_range_for_zone_movable() 1116 adjust_zone_range_for_zone_movable(int nid, unsigned long zone_type, unsigned long node_end_pfn, unsigned long *zone_start_pfn, unsigned long *zone_end_pfn) adjust_zone_range_for_zone_movable() argument 1176 zone_absent_pages_in_node(int nid, unsigned long zone_type, unsigned long zone_start_pfn, unsigned long zone_end_pfn) zone_absent_pages_in_node() argument 1221 zone_spanned_pages_in_node(int nid, unsigned long zone_type, unsigned long node_start_pfn, unsigned long node_end_pfn, unsigned long *zone_start_pfn, unsigned long *zone_end_pfn) zone_spanned_pages_in_node() argument 1276 unsigned long zone_start_pfn, zone_end_pfn; calculate_node_totalpages() local 1403 init_currently_empty_zone(struct zone *zone, unsigned long zone_start_pfn, unsigned long size) init_currently_empty_zone() argument 1433 usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) usemap_size() argument [all...] |
H A D | compaction.c | 226 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; in reset_cached_positions() 227 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; in reset_cached_positions() 335 block_pfn = max(block_pfn, zone->zone_start_pfn); in __reset_isolation_pfn() 378 unsigned long migrate_pfn = zone->zone_start_pfn; in __reset_isolation_suitable() 730 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_freepages_range() 731 block_start_pfn = cc->zone->zone_start_pfn; in isolate_freepages_range() 941 low_pfn == cc->zone->zone_start_pfn)) { in isolate_migratepages_block() 1295 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages_range() 1296 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages_range() 1436 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); in fast_isolate_around() [all...] |
H A D | shuffle.c | 83 unsigned long start_pfn = z->zone_start_pfn; in __shuffle_zone() 98 * page_j randomly selected in the span @zone_start_pfn to in __shuffle_zone() 112 j = z->zone_start_pfn + in __shuffle_zone()
|
H A D | memory_hotplug.c | 466 if (zone->zone_start_pfn == start_pfn) { in shrink_zone_span() 469 * shrink zone->zone_start_pfn and zone->zone_spanned_pages. in shrink_zone_span() 477 zone->zone_start_pfn = pfn; in shrink_zone_span() 479 zone->zone_start_pfn = 0; in shrink_zone_span() 489 pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn, in shrink_zone_span() 492 zone->spanned_pages = pfn - zone->zone_start_pfn + 1; in shrink_zone_span() 494 zone->zone_start_pfn = 0; in shrink_zone_span() 513 node_start_pfn = zone->zone_start_pfn; in update_pgdat_span() 520 if (zone->zone_start_pfn < node_start_pfn) in update_pgdat_span() 521 node_start_pfn = zone->zone_start_pfn; in update_pgdat_span() [all...] |
H A D | page_owner.c | 288 pfn = zone->zone_start_pfn; in pagetypeinfo_showmixedcount_print() 623 unsigned long pfn = zone->zone_start_pfn; in init_pages_in_zone()
|
H A D | page_isolation.c | 330 zone->zone_start_pfn); in isolate_single_pageblock()
|
H A D | vmstat.c | 1581 unsigned long start_pfn = zone->zone_start_pfn; in pagetypeinfo_showblockcount_print() 1782 zone->zone_start_pfn); in zoneinfo_show_print()
|
H A D | memblock.c | 1337 if (zone->zone_start_pfn < epfn && spfn < epfn) { in __next_mem_pfn_range_in_zone() 1345 *out_spfn = max(zone->zone_start_pfn, spfn); in __next_mem_pfn_range_in_zone()
|
H A D | kmemleak.c | 1580 unsigned long start_pfn = zone->zone_start_pfn; in kmemleak_scan()
|
/kernel/linux/linux-5.10/mm/ |
H A D | page_alloc.c | 502 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages); in pfn_to_bitidx() 601 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries() 1480 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) in init_reserved_page() 1667 unsigned long block_start_pfn = zone->zone_start_pfn; in set_zone_contiguous() 3153 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in mark_free_pages() 6406 unsigned long zone_start_pfn = zone->zone_start_pfn; in memmap_init_zone_range() local 6407 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; in memmap_init_zone_range() 6410 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn); in memmap_init_zone_range() 6411 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pf in memmap_init_zone_range() 6657 init_currently_empty_zone(struct zone *zone, unsigned long zone_start_pfn, unsigned long size) init_currently_empty_zone() argument 6739 adjust_zone_range_for_zone_movable(int nid, unsigned long zone_type, unsigned long node_start_pfn, unsigned long node_end_pfn, unsigned long *zone_start_pfn, unsigned long *zone_end_pfn) adjust_zone_range_for_zone_movable() argument 6770 zone_spanned_pages_in_node(int nid, unsigned long zone_type, unsigned long node_start_pfn, unsigned long node_end_pfn, unsigned long *zone_start_pfn, unsigned long *zone_end_pfn) zone_spanned_pages_in_node() argument 6843 unsigned long zone_start_pfn, zone_end_pfn; zone_absent_pages_in_node() local 6895 unsigned long zone_start_pfn, zone_end_pfn; calculate_node_totalpages() local 6936 usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) usemap_size() argument 6949 setup_usemap(struct pglist_data *pgdat, struct zone *zone, unsigned long zone_start_pfn, unsigned long zonesize) setup_usemap() argument 6966 setup_usemap(struct pglist_data *pgdat, struct zone *zone, unsigned long zone_start_pfn, unsigned long zonesize) setup_usemap() argument 7121 unsigned long zone_start_pfn = zone->zone_start_pfn; free_area_init_core() local [all...] |
H A D | memory_hotplug.c | 407 if (zone->zone_start_pfn == start_pfn) { in shrink_zone_span() 410 * shrink zone->zone_start_pfn and zone->zone_spanned_pages. in shrink_zone_span() 418 zone->zone_start_pfn = pfn; in shrink_zone_span() 420 zone->zone_start_pfn = 0; in shrink_zone_span() 430 pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn, in shrink_zone_span() 433 zone->spanned_pages = pfn - zone->zone_start_pfn + 1; in shrink_zone_span() 435 zone->zone_start_pfn = 0; in shrink_zone_span() 449 unsigned long zone_end_pfn = zone->zone_start_pfn + in update_pgdat_span() 456 node_start_pfn = zone->zone_start_pfn; in update_pgdat_span() 463 if (zone->zone_start_pfn < node_start_pf in update_pgdat_span() [all...] |
H A D | compaction.c | 233 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; in reset_cached_positions() 234 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; in reset_cached_positions() 290 block_pfn = max(block_pfn, zone->zone_start_pfn); in __reset_isolation_pfn() 336 unsigned long migrate_pfn = zone->zone_start_pfn; in __reset_isolation_suitable() 708 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_freepages_range() 709 block_start_pfn = cc->zone->zone_start_pfn; in isolate_freepages_range() 1119 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages_range() 1120 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages_range() 1262 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); in fast_isolate_around() 1362 cc->zone->zone_start_pfn); in fast_isolate_freepages() [all...] |
H A D | shuffle.c | 84 unsigned long start_pfn = z->zone_start_pfn; in __shuffle_zone() 99 * page_j randomly selected in the span @zone_start_pfn to in __shuffle_zone() 113 j = z->zone_start_pfn + in __shuffle_zone()
|
H A D | page_owner.c | 270 unsigned long pfn = zone->zone_start_pfn, block_end_pfn; in pagetypeinfo_showmixedcount_print() 277 pfn = zone->zone_start_pfn; in pagetypeinfo_showmixedcount_print() 560 unsigned long pfn = zone->zone_start_pfn; in init_pages_in_zone()
|
H A D | memblock.c | 1269 if (zone->zone_start_pfn < epfn && spfn < epfn) { in __next_mem_pfn_range_in_zone() 1277 *out_spfn = max(zone->zone_start_pfn, spfn); in __next_mem_pfn_range_in_zone()
|
H A D | vmstat.c | 1524 unsigned long start_pfn = zone->zone_start_pfn; in pagetypeinfo_showblockcount_print() 1717 zone->zone_start_pfn); in zoneinfo_show_print()
|
H A D | kmemleak.c | 1451 unsigned long start_pfn = zone->zone_start_pfn; in kmemleak_scan()
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | mmzone.h | 493 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ 494 unsigned long zone_start_pfn; member 499 * spanned_pages = zone_end_pfn - zone_start_pfn; 518 * zone_start_pfn and spanned_pages are protected by span_seqlock. 633 return zone->zone_start_pfn + zone->spanned_pages; in zone_end_pfn() 638 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); in zone_spans_pfn() 661 start_pfn + nr_pages <= zone->zone_start_pfn) in zone_intersects()
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | mmzone.h | 883 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ 884 unsigned long zone_start_pfn; member 889 * spanned_pages = zone_end_pfn - zone_start_pfn; 915 * zone_start_pfn and spanned_pages are protected by span_seqlock. 1051 return zone->zone_start_pfn + zone->spanned_pages; in zone_end_pfn() 1056 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); in zone_spans_pfn() 1190 start_pfn + nr_pages <= zone->zone_start_pfn) in zone_intersects()
|
/kernel/linux/linux-6.6/kernel/power/ |
H A D | snapshot.c | 636 zone_start = zone->zone_start_pfn; in create_mem_extents() 1259 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in mark_free_pages() 1360 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in count_highmem_pages() 1427 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in count_data_pages() 1540 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in copy_data_pages()
|
/kernel/linux/linux-6.6/arch/arm64/kernel/ |
H A D | hibernate.c | 267 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { in swsusp_mte_save_tags()
|
/kernel/linux/linux-5.10/kernel/power/ |
H A D | snapshot.c | 596 zone_start = zone->zone_start_pfn; in create_mem_extents() 1252 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in count_highmem_pages() 1319 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in count_data_pages() 1413 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in copy_data_pages()
|
/kernel/linux/linux-5.10/arch/arm64/kernel/ |
H A D | hibernate.c | 339 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { in swsusp_mte_save_tags()
|