/kernel/linux/linux-6.6/arch/x86/mm/ |
H A D | highmem_32.c | 25 zone_end_pfn = zone_start_pfn + zone->spanned_pages; in set_highmem_pages_init()
|
/kernel/linux/linux-5.10/mm/ |
H A D | memory_hotplug.c | 417 zone->spanned_pages = zone_end_pfn(zone) - pfn; in shrink_zone_span() 421 zone->spanned_pages = 0; in shrink_zone_span() 426 * shrink zone->spanned_pages. in shrink_zone_span() 433 zone->spanned_pages = pfn - zone->zone_start_pfn + 1; in shrink_zone_span() 436 zone->spanned_pages = 0; in shrink_zone_span() 450 zone->spanned_pages; in update_pgdat_span() 453 if (!zone->spanned_pages) in update_pgdat_span() 669 zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; in resize_zone_range()
|
H A D | shuffle.c | 100 * @spanned_pages. in __shuffle_zone() 114 ALIGN_DOWN(get_random_long() % z->spanned_pages, in __shuffle_zone()
|
H A D | page_owner.c | 271 unsigned long end_pfn = pfn + zone->spanned_pages; in pagetypeinfo_showmixedcount_print()
|
H A D | page_alloc.c | 602 sp = zone->spanned_pages; in page_outside_zone_boundaries() 6407 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; in memmap_init_zone_range() 6915 zone->spanned_pages = size; in calculate_node_totalpages() 7007 static unsigned long __init calc_memmap_size(unsigned long spanned_pages, in calc_memmap_size() argument 7010 unsigned long pages = spanned_pages; in calc_memmap_size() 7020 if (spanned_pages > present_pages + (present_pages >> 4) && in calc_memmap_size() 7123 size = zone->spanned_pages; in free_area_init_core()
|
H A D | vmstat.c | 1667 zone->spanned_pages, in zoneinfo_show_print()
|
/kernel/linux/linux-5.10/arch/x86/mm/ |
H A D | highmem_32.c | 83 zone_end_pfn = zone_start_pfn + zone->spanned_pages; in set_highmem_pages_init()
|
/kernel/linux/linux-6.6/mm/ |
H A D | shuffle.c | 99 * @spanned_pages. in __shuffle_zone() 113 ALIGN_DOWN(get_random_long() % z->spanned_pages, in __shuffle_zone()
|
H A D | mm_init.c | 915 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; in memmap_init_zone_range() 1255 z->spanned_pages = 0; in reset_memoryless_node_totalpages() 1295 zone->spanned_pages = spanned; in calculate_node_totalpages() 1310 static unsigned long __init calc_memmap_size(unsigned long spanned_pages, in calc_memmap_size() argument 1313 unsigned long pages = spanned_pages; in calc_memmap_size() 1323 if (spanned_pages > present_pages + (present_pages >> 4) && in calc_memmap_size() 1449 zone->spanned_pages); in setup_usemap() 1572 size = zone->spanned_pages; in free_area_init_core()
|
H A D | memory_hotplug.c | 476 zone->spanned_pages = zone_end_pfn(zone) - pfn; in shrink_zone_span() 480 zone->spanned_pages = 0; in shrink_zone_span() 485 * shrink zone->spanned_pages. in shrink_zone_span() 492 zone->spanned_pages = pfn - zone->zone_start_pfn + 1; in shrink_zone_span() 495 zone->spanned_pages = 0; in shrink_zone_span() 510 if (!zone->spanned_pages) in update_pgdat_span() 713 zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; in resize_zone_range()
|
H A D | vmstat.c | 1729 zone->spanned_pages, in zoneinfo_show_print()
|
H A D | page_alloc.c | 458 sp = zone->spanned_pages; in page_outside_zone_boundaries()
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | mmzone.h | 497 * spanned_pages is the total pages spanned by the zone, including 499 * spanned_pages = zone_end_pfn - zone_start_pfn; 503 * present_pages = spanned_pages - absent_pages(pages in holes); 518 * zone_start_pfn and spanned_pages are protected by span_seqlock. 532 unsigned long spanned_pages; member 633 return zone->zone_start_pfn + zone->spanned_pages; in zone_end_pfn() 648 return zone->spanned_pages == 0; in zone_is_empty()
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | mmzone.h | 887 * spanned_pages is the total pages spanned by the zone, including 889 * spanned_pages = zone_end_pfn - zone_start_pfn; 893 * present_pages = spanned_pages - absent_pages(pages in holes); 915 * zone_start_pfn and spanned_pages are protected by span_seqlock. 929 unsigned long spanned_pages; member 1051 return zone->zone_start_pfn + zone->spanned_pages; in zone_end_pfn() 1066 return zone->spanned_pages == 0; in zone_is_empty()
|
/kernel/linux/linux-5.10/arch/powerpc/mm/ |
H A D | numa.c | 1096 u64 spanned_pages = end_pfn - start_pfn; in setup_node_data() local 1120 NODE_DATA(nid)->node_spanned_pages = spanned_pages; in setup_node_data()
|
/kernel/linux/linux-6.6/arch/powerpc/mm/ |
H A D | numa.c | 1085 u64 spanned_pages = end_pfn - start_pfn; in setup_node_data() local 1109 NODE_DATA(nid)->node_spanned_pages = spanned_pages; in setup_node_data()
|
/kernel/linux/linux-5.10/kernel/ |
H A D | crash_core.c | 494 VMCOREINFO_OFFSET(zone, spanned_pages); in crash_save_vmcoreinfo_init()
|
/kernel/linux/linux-6.6/kernel/ |
H A D | crash_core.c | 657 VMCOREINFO_OFFSET(zone, spanned_pages); in crash_save_vmcoreinfo_init()
|
/kernel/linux/linux-5.10/kernel/power/ |
H A D | snapshot.c | 1174 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); in snapshot_additional_pages()
|
/kernel/linux/linux-6.6/kernel/power/ |
H A D | snapshot.c | 1230 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); in snapshot_additional_pages()
|