Lines Matching defs:zone
61 * the "fragmentation score" of a node/zone.
160 void defer_compaction(struct zone *zone, int order)
162 zone->compact_considered = 0;
163 zone->compact_defer_shift++;
165 if (order < zone->compact_order_failed)
166 zone->compact_order_failed = order;
168 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
169 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
171 trace_mm_compaction_defer_compaction(zone, order);
175 bool compaction_deferred(struct zone *zone, int order)
177 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
179 if (order < zone->compact_order_failed)
183 if (++zone->compact_considered >= defer_limit) {
184 zone->compact_considered = defer_limit;
188 trace_mm_compaction_deferred(zone, order);
198 void compaction_defer_reset(struct zone *zone, int order,
202 zone->compact_considered = 0;
203 zone->compact_defer_shift = 0;
205 if (order >= zone->compact_order_failed)
206 zone->compact_order_failed = order + 1;
208 trace_mm_compaction_defer_reset(zone, order);
212 bool compaction_restarting(struct zone *zone, int order)
214 if (order < zone->compact_order_failed)
217 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
218 zone->compact_considered >= 1UL << zone->compact_defer_shift;
231 static void reset_cached_positions(struct zone *zone)
233 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
234 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
235 zone->compact_cached_free_pfn =
236 pageblock_start_pfn(zone_end_pfn(zone) - 1);
258 __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
268 if (zone != page_zone(page))
288 /* Ensure the start of the pageblock or zone is online and valid */
290 block_pfn = max(block_pfn, zone->zone_start_pfn);
297 /* Ensure the end of the pageblock or zone is online and valid */
299 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
334 static void __reset_isolation_suitable(struct zone *zone)
336 unsigned long migrate_pfn = zone->zone_start_pfn;
337 unsigned long free_pfn = zone_end_pfn(zone) - 1;
343 if (!zone->compact_blockskip_flush)
346 zone->compact_blockskip_flush = false;
349 * Walk the zone and update pageblock skip information. Source looks
359 if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) &&
363 zone->compact_init_migrate_pfn = reset_migrate;
364 zone->compact_cached_migrate_pfn[0] = reset_migrate;
365 zone->compact_cached_migrate_pfn[1] = reset_migrate;
369 if (__reset_isolation_pfn(zone, free_pfn, free_set, true) &&
373 zone->compact_init_free_pfn = reset_free;
374 zone->compact_cached_free_pfn = reset_free;
380 zone->compact_cached_migrate_pfn[0] = migrate_pfn;
381 zone->compact_cached_migrate_pfn[1] = migrate_pfn;
382 zone->compact_cached_free_pfn = free_pfn;
391 struct zone *zone = &pgdat->node_zones[zoneid];
392 if (!populated_zone(zone))
396 if (zone->compact_blockskip_flush)
397 __reset_isolation_suitable(zone);
426 struct zone *zone = cc->zone;
434 if (pfn > zone->compact_cached_migrate_pfn[0])
435 zone->compact_cached_migrate_pfn[0] = pfn;
437 pfn > zone->compact_cached_migrate_pfn[1])
438 zone->compact_cached_migrate_pfn[1] = pfn;
448 struct zone *zone = cc->zone;
459 if (pfn < zone->compact_cached_free_pfn)
460 zone->compact_cached_free_pfn = pfn;
584 && compact_unlock_should_abort(&cc->zone->lock, flags,
619 locked = compact_lock_irqsave(&cc->zone->lock,
656 spin_unlock_irqrestore(&cc->zone->lock, flags);
691 * Non-free pages, invalid PFNs, or zone boundaries within the
708 if (block_start_pfn < cc->zone->zone_start_pfn)
709 block_start_pfn = cc->zone->zone_start_pfn;
732 block_end_pfn, cc->zone))
803 pg_data_t *pgdat = cc->zone->zone_pgdat;
899 * Skip if free. We read page order here without zone lock
1119 if (block_start_pfn < cc->zone->zone_start_pfn)
1120 block_start_pfn = cc->zone->zone_start_pfn;
1130 block_end_pfn, cc->zone))
1175 * We are checking page_order without zone->lock taken. But
1262 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn);
1263 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone));
1265 page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone);
1316 if (cc->free_pfn >= cc->zone->compact_init_free_pfn) {
1341 struct free_area *area = &cc->zone->free_area[order];
1351 spin_lock_irqsave(&cc->zone->lock, flags);
1362 cc->zone->zone_start_pfn);
1408 spin_unlock_irqrestore(&cc->zone->lock, flags);
1433 zone_end_pfn(cc->zone)),
1434 cc->zone);
1441 if (highest && highest >= cc->zone->compact_cached_free_pfn) {
1443 cc->zone->compact_cached_free_pfn = highest;
1461 struct zone *zone = cc->zone;
1477 * successfully isolated from, zone-cached value, or the end of the
1478 * zone when isolating for the first time. For looping we also need
1482 * zone which ends in the middle of a pageblock.
1489 zone_end_pfn(zone));
1505 * This can iterate a massively long zone without finding any
1512 zone);
1598 * freelist. All pages on the freelist are from the same zone, so there is no
1670 * If the migrate_pfn is not at the start of a zone or the start
1674 if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
1701 if (cc->migrate_pfn != cc->zone->zone_start_pfn)
1708 struct free_area *area = &cc->zone->free_area[order];
1716 spin_lock_irqsave(&cc->zone->lock, flags);
1742 if (pfn < cc->zone->zone_start_pfn)
1743 pfn = cc->zone->zone_start_pfn;
1750 spin_unlock_irqrestore(&cc->zone->lock, flags);
1783 * Start at where we last stopped, or beginning of the zone as
1789 if (block_start_pfn < cc->zone->zone_start_pfn)
1790 block_start_pfn = cc->zone->zone_start_pfn;
1813 * This can potentially iterate a massively long zone with
1821 block_end_pfn, cc->zone);
1885 * A zone's fragmentation score is the external fragmentation wrt to the
1886 * COMPACTION_HPAGE_ORDER scaled by the zone's size. It returns a value
1894 static unsigned int fragmentation_score_zone(struct zone *zone)
1898 score = zone->present_pages *
1899 extfrag_for_order(zone, COMPACTION_HPAGE_ORDER);
1900 return div64_ul(score, zone->zone_pgdat->node_present_pages + 1);
1916 struct zone *zone;
1918 zone = &pgdat->node_zones[zoneid];
1919 score += fragmentation_score_zone(zone);
1958 reset_cached_positions(cc->zone);
1967 cc->zone->compact_blockskip_flush = true;
1979 pgdat = cc->zone->zone_pgdat;
1983 score = fragmentation_score_zone(cc->zone);
2009 struct free_area *area = &cc->zone->free_area[order];
2064 trace_mm_compaction_finished(cc->zone, cc->order, ret);
2072 * compaction_suitable: Is this suitable to run compaction on this zone now?
2078 static enum compact_result __compaction_suitable(struct zone *zone, int order,
2088 watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
2093 if (zone_watermark_ok(zone, order, watermark, highest_zoneidx,
2112 low_wmark_pages(zone) : min_wmark_pages(zone);
2114 if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
2121 enum compact_result compaction_suitable(struct zone *zone, int order,
2128 ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx,
2129 zone_page_state(zone, NR_FREE_PAGES));
2147 fragindex = fragmentation_index(zone, order);
2152 trace_mm_compaction_suitable(zone, order, ret);
2162 struct zone *zone;
2166 * Make sure at least one zone would pass __compaction_suitable if we continue
2169 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2180 available = zone_reclaimable_pages(zone) / order;
2181 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
2182 compact_result = __compaction_suitable(zone, order, alloc_flags,
2195 unsigned long start_pfn = cc->zone->zone_start_pfn;
2196 unsigned long end_pfn = zone_end_pfn(cc->zone);
2202 * These counters track activities during zone compaction. Initialize
2203 * them before compacting a new zone.
2213 ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
2226 if (compaction_restarting(cc->zone, cc->order))
2227 __reset_isolation_suitable(cc->zone);
2230 * Setup to move all movable pages to the end of the zone. Used cached
2232 * want to compact the whole zone), but check that it is initialised
2233 * by ensuring the values are within zone boundaries.
2240 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync];
2241 cc->free_pfn = cc->zone->compact_cached_free_pfn;
2244 cc->zone->compact_cached_free_pfn = cc->free_pfn;
2248 cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
2249 cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
2252 if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn)
2267 cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1];
2300 cc->zone->compact_cached_migrate_pfn[1] =
2301 cc->zone->compact_cached_migrate_pfn[0];
2361 lru_add_drain_cpu_zone(cc->zone);
2388 * already reset to zone end in compact_finished()
2390 if (free_pfn > cc->zone->compact_cached_free_pfn)
2391 cc->zone->compact_cached_free_pfn = free_pfn;
2403 static enum compact_result compact_zone_order(struct zone *zone, int order,
2413 .zone = zone,
2471 struct zone *zone;
2483 /* Compact each zone in the list */
2484 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2489 && compaction_deferred(zone, order)) {
2494 status = compact_zone_order(zone, order, gfp_mask, prio,
2501 * We think the allocation will succeed in this zone,
2504 * succeeds in this zone.
2506 compaction_defer_reset(zone, order, false);
2514 * We think that allocation won't succeed in this zone
2518 defer_compaction(zone, order);
2534 * Compact all zones within a node till each zone's fragmentation score
2540 * per-zone locks.
2545 struct zone *zone;
2556 zone = &pgdat->node_zones[zoneid];
2557 if (!populated_zone(zone))
2560 cc.zone = zone;
2574 struct zone *zone;
2586 zone = &pgdat->node_zones[zoneid];
2587 if (!populated_zone(zone))
2590 cc.zone = zone;
2671 struct zone *zone;
2675 zone = &pgdat->node_zones[zoneid];
2677 if (!populated_zone(zone))
2680 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
2695 struct zone *zone;
2711 zone = &pgdat->node_zones[zoneid];
2712 if (!populated_zone(zone))
2715 if (compaction_deferred(zone, cc.order))
2718 if (compaction_suitable(zone, cc.order, 0, zoneid) !=
2725 cc.zone = zone;
2729 compaction_defer_reset(zone, cc.order, false);
2733 * otherwise coalesce on the zone's free area for
2737 drain_all_pages(zone);
2743 defer_compaction(zone, cc.order);