Searched refs:present_pages (Results 1 - 13 of 13) sorted by relevance
/kernel/linux/linux-5.10/lib/ |
H A D | show_mem.c | 27 total += zone->present_pages; in show_mem() 28 reserved += zone->present_pages - zone_managed_pages(zone); in show_mem() 31 highmem += zone->present_pages; in show_mem()
|
/kernel/linux/linux-5.10/mm/ |
H A D | memory_hotplug.c | 829 zone->present_pages += nr_pages; in online_pages() 881 z->present_pages = 0; in reset_node_present_pages() 933 * clear all zones' present_pages because they will be updated in in hotadd_new_pgdat() 1391 unsigned long present_pages = 0; in node_states_check_changes_offline() local 1407 present_pages += pgdat->node_zones[zt].present_pages; in node_states_check_changes_offline() 1408 if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages) in node_states_check_changes_offline() 1415 * Here we add the present_pages belonging to ZONE_HIGHMEM. in node_states_check_changes_offline() 1420 present_pages += pgdat->node_zones[ZONE_HIGHMEM].present_pages; in node_states_check_changes_offline() [all...] |
H A D | page_alloc.c | 5719 K(zone->present_pages), in show_free_areas() 6653 zone->name, zone->present_pages, in zone_pcp_init() 6768 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 6916 zone->present_pages = real_size; in calculate_node_totalpages() 7008 unsigned long present_pages) in calc_memmap_size() 7018 * So the (present_pages >> 4) heuristic is a tradeoff for that. in calc_memmap_size() 7020 if (spanned_pages > present_pages + (present_pages >> 4) && in calc_memmap_size() 7022 pages = present_pages; in calc_memmap_size() 7124 freesize = zone->present_pages; in free_area_init_core() 7007 calc_memmap_size(unsigned long spanned_pages, unsigned long present_pages) calc_memmap_size() argument [all...] |
H A D | vmstat.c | 1668 zone->present_pages, in zoneinfo_show_print()
|
H A D | compaction.c | 1898 score = zone->present_pages * in fragmentation_score_zone()
|
/kernel/linux/linux-6.6/mm/ |
H A D | show_mem.c | 343 K(zone->present_pages), in show_free_areas() 413 total += zone->present_pages; in __show_mem() 414 reserved += zone->present_pages - zone_managed_pages(zone); in __show_mem() 417 highmem += zone->present_pages; in __show_mem()
|
H A D | memory_hotplug.c | 798 stats->movable_pages += zone->present_pages; in auto_movable_stats_account_zone() 1080 zone->present_pages += nr_pages; in adjust_present_page_count() 1820 unsigned long present_pages = 0; in node_states_check_changes_offline() local 1835 present_pages += pgdat->node_zones[zt].present_pages; in node_states_check_changes_offline() 1836 if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages) in node_states_check_changes_offline() 1848 present_pages += pgdat->node_zones[ZONE_MOVABLE].present_pages; in node_states_check_changes_offline() 1850 if (nr_pages >= present_pages) in node_states_check_changes_offline()
|
H A D | mm_init.c | 1219 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 1256 z->present_pages = 0; in reset_memoryless_node_totalpages() 1296 zone->present_pages = real_size; in calculate_node_totalpages() 1311 unsigned long present_pages) in calc_memmap_size() 1321 * So the (present_pages >> 4) heuristic is a tradeoff for that. in calc_memmap_size() 1323 if (spanned_pages > present_pages + (present_pages >> 4) && in calc_memmap_size() 1325 pages = present_pages; in calc_memmap_size() 1539 * clear all zones' present_pages and managed_pages because they will in free_area_init_core_hotplug() 1545 zone->present_pages in free_area_init_core_hotplug() 1310 calc_memmap_size(unsigned long spanned_pages, unsigned long present_pages) calc_memmap_size() argument [all...] |
H A D | vmstat.c | 1730 zone->present_pages, in zoneinfo_show_print()
|
H A D | compaction.c | 2117 score = zone->present_pages * fragmentation_score_zone(zone); in fragmentation_score_zone_weighted()
|
H A D | page_alloc.c | 5501 zone->present_pages, zone_batchsize(zone)); in zone_pcp_init()
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | mmzone.h | 501 * present_pages is physical pages existing within the zone, which 503 * present_pages = spanned_pages - absent_pages(pages in holes); 508 * managed_pages = present_pages - reserved_pages; 510 * So present_pages may be used by memory hotplug or memory power 512 * (present_pages - managed_pages). And managed_pages should be used 527 * Write access to present_pages at runtime should be protected by 529 * present_pages should get_online_mems() to get a stable value. 533 unsigned long present_pages; member 547 /* see spanned/present_pages for more description */ 945 return zone->present_pages; in populated_zone() [all...] |
/kernel/linux/linux-6.6/include/linux/ |
H A D | mmzone.h | 891 * present_pages is physical pages existing within the zone, which 893 * present_pages = spanned_pages - absent_pages(pages in holes); 902 * managed_pages = present_pages - reserved_pages; 907 * So present_pages may be used by memory hotplug or memory power 909 * (present_pages - managed_pages). And managed_pages should be used 924 * Write access to present_pages at runtime should be protected by 926 * present_pages should use get_online_mems() to get a stable value. 930 unsigned long present_pages; member 950 /* see spanned/present_pages for more description */ 1539 return zone->present_pages; in populated_zone() [all...] |
Completed in 46 milliseconds