Lines Matching defs:zone

39 /* zero numa counters within a zone */
40 static void zero_zone_numa_counters(struct zone *zone)
45 atomic_long_set(&zone->vm_numa_stat[item], 0);
47 per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]
55 struct zone *zone;
57 for_each_populated_zone(zone)
58 zero_zone_numa_counters(zone);
159 * Manage combined zone based / global counters
172 int calculate_pressure_threshold(struct zone *zone)
185 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
196 int calculate_normal_threshold(struct zone *zone)
203 * of memory per zone. More memory means that we can defer updates for
231 mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
244 * Refresh the thresholds for each zone.
249 struct zone *zone;
260 for_each_populated_zone(zone) {
261 struct pglist_data *pgdat = zone->zone_pgdat;
264 threshold = calculate_normal_threshold(zone);
269 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
272 /* Base nodestat threshold on the largest populated zone. */
283 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
286 zone->percpu_drift_mark = high_wmark_pages(zone) +
292 int (*calculate_pressure)(struct zone *))
294 struct zone *zone;
300 zone = &pgdat->node_zones[i];
301 if (!zone->percpu_drift_mark)
304 threshold = (*calculate_pressure)(zone);
306 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
316 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
319 struct per_cpu_pageset __percpu *pcp = zone->pageset;
329 zone_page_state_add(x, zone, item);
365 * argument instead of struct zone *. This allows the inclusion of the code
378 * However, the code must first determine the differential location in a zone
384 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
386 struct per_cpu_pageset __percpu *pcp = zone->pageset;
395 zone_page_state_add(v + overstep, zone, item);
430 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
432 struct per_cpu_pageset __percpu *pcp = zone->pageset;
441 zone_page_state_add(v - overstep, zone, item);
481 * mod_state() modifies the zone counter state through atomic per cpu
489 static inline void mod_zone_state(struct zone *zone,
492 struct per_cpu_pageset __percpu *pcp = zone->pageset;
497 z = 0; /* overflow to zone counters */
507 * for all cpus in a zone.
517 /* Overflow must be added to zone counters */
524 zone_page_state_add(z, zone, item);
527 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
530 mod_zone_state(zone, item, delta, 0);
616 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
622 __mod_zone_page_state(zone, item, delta);
630 struct zone *zone;
632 zone = page_zone(page);
634 __inc_zone_state(zone, item);
744 * Update the zone counters for the current cpu.
753 * statistics in the remote zone struct as well as the global cachelines
762 struct zone *zone;
771 for_each_populated_zone(zone) {
772 struct per_cpu_pageset __percpu *p = zone->pageset;
780 atomic_long_add(v, &zone->vm_stat[i]);
795 atomic_long_add(v, &zone->vm_numa_stat[i]);
817 if (zone_to_nid(zone) == numa_node_id()) {
826 drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
864 struct zone *zone;
872 for_each_populated_zone(zone) {
875 p = per_cpu_ptr(zone->pageset, cpu);
883 atomic_long_add(v, &zone->vm_stat[i]);
894 atomic_long_add(v, &zone->vm_numa_stat[i]);
924 * this is only called if !populated_zone(zone), which implies no other users of
927 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
935 atomic_long_add(v, &zone->vm_stat[i]);
945 atomic_long_add(v, &zone->vm_numa_stat[i]);
953 void __inc_numa_state(struct zone *zone,
956 struct per_cpu_pageset __percpu *pcp = zone->pageset;
963 zone_numa_state_add(v, zone, item);
976 struct zone *zones = NODE_DATA(node)->node_zones;
993 struct zone *zones = NODE_DATA(node)->node_zones;
1035 * Calculate the number of free pages in a zone, how many contiguous
1042 static void fill_contig_page_info(struct zone *zone,
1056 blocks = zone->free_area[order].nr_free;
1100 * Calculates external fragmentation within a zone wrt the given order.
1104 unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
1108 fill_contig_page_info(zone, order, &info);
1118 int fragmentation_index(struct zone *zone, unsigned int order)
1122 fill_contig_page_info(zone, order, &info);
1419 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1421 struct zone *zone;
1422 struct zone *node_zones = pgdat->node_zones;
1425 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1426 if (assert_populated && !populated_zone(zone))
1430 spin_lock_irqsave(&zone->lock, flags);
1431 print(m, pgdat, zone);
1433 spin_unlock_irqrestore(&zone->lock, flags);
1440 struct zone *zone)
1444 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1446 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
1451 * This walks the free areas for each zone.
1461 pg_data_t *pgdat, struct zone *zone)
1466 seq_printf(m, "Node %4d, zone %8s, type %12s ",
1468 zone->name,
1476 area = &(zone->free_area[order]);
1494 spin_unlock_irq(&zone->lock);
1496 spin_lock_irq(&zone->lock);
1520 pg_data_t *pgdat, struct zone *zone)
1524 unsigned long start_pfn = zone->zone_start_pfn;
1525 unsigned long end_pfn = zone_end_pfn(zone);
1535 if (page_zone(page) != zone)
1545 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1629 static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1634 struct zone *compare = &pgdat->node_zones[zid];
1637 return zone == compare;
1644 struct zone *zone)
1647 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1648 if (is_zone_first_populated(pgdat, zone)) {
1663 zone_page_state(zone, NR_FREE_PAGES),
1664 min_wmark_pages(zone),
1665 low_wmark_pages(zone),
1666 high_wmark_pages(zone),
1667 zone->spanned_pages,
1668 zone->present_pages,
1669 zone_managed_pages(zone));
1673 zone->lowmem_reserve[0]);
1674 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1675 seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1679 if (!populated_zone(zone)) {
1686 zone_page_state(zone, i));
1691 zone_numa_state_snapshot(zone, i));
1698 pageset = per_cpu_ptr(zone->pageset, i);
1717 zone->zone_start_pfn);
1914 struct zone *zone;
1916 for_each_populated_zone(zone) {
1917 struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
2105 pg_data_t *pgdat, struct zone *zone)
2111 seq_printf(m, "Node %d, zone %8s ",
2113 zone->name);
2115 fill_contig_page_info(zone, order, &info);
2155 pg_data_t *pgdat, struct zone *zone)
2160 /* Alloc on stack as interrupts are disabled for zone walk */
2163 seq_printf(m, "Node %d, zone %8s ",
2165 zone->name);
2167 fill_contig_page_info(zone, order, &info);