Lines Matching refs:cc
222 static inline bool isolation_suitable(struct compact_control *cc,
225 if (cc->ignore_skip_hint)
405 static bool test_and_set_skip(struct compact_control *cc, struct page *page,
411 if (cc->ignore_skip_hint)
418 if (!skip && !cc->no_set_skip_hint)
424 static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
426 struct zone *zone = cc->zone;
431 if (cc->no_set_skip_hint)
436 if (cc->mode != MIGRATE_ASYNC &&
445 static void update_pageblock_skip(struct compact_control *cc,
448 struct zone *zone = cc->zone;
450 if (cc->no_set_skip_hint)
463 static inline bool isolation_suitable(struct compact_control *cc,
474 static inline void update_pageblock_skip(struct compact_control *cc,
479 static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
483 static bool test_and_set_skip(struct compact_control *cc, struct page *page,
500 struct compact_control *cc)
504 if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
508 cc->contended = true;
531 unsigned long flags, bool *locked, struct compact_control *cc)
539 cc->contended = true;
553 static unsigned long isolate_freepages_block(struct compact_control *cc,
584 && compact_unlock_should_abort(&cc->zone->lock, flags,
585 &locked, cc))
619 locked = compact_lock_irqsave(&cc->zone->lock,
620 &flags, cc);
635 cc->nr_freepages += isolated;
638 if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
656 spin_unlock_irqrestore(&cc->zone->lock, flags);
679 cc->total_free_scanned += nr_scanned;
687 * @cc: Compaction control structure.
700 isolate_freepages_range(struct compact_control *cc,
708 if (block_start_pfn < cc->zone->zone_start_pfn)
709 block_start_pfn = cc->zone->zone_start_pfn;
732 block_end_pfn, cc->zone))
735 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
784 * @cc: Compaction control structure.
795 * The pages are isolated on cc->migratepages list (not required to be empty),
796 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
800 isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
803 pg_data_t *pgdat = cc->zone->zone_pgdat;
821 if (cc->nr_migratepages)
825 if (cc->mode == MIGRATE_ASYNC)
836 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
838 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
863 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
873 flags, &locked, cc)) {
891 if (!cc->ignore_skip_hint && get_pageblock_skip(page)) {
925 if (PageCompound(page) && !cc->alloc_contig) {
971 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
977 &flags, cc);
982 if (test_and_set_skip(cc, page, low_pfn))
995 if (unlikely(PageCompound(page) && !cc->alloc_contig)) {
1018 list_add(&page->lru, &cc->migratepages);
1019 cc->nr_migratepages += compound_nr(page);
1028 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX &&
1029 !cc->rescan && !cc->contended) {
1041 * instead of migrating, as we cannot form the cc->order buddy
1049 putback_movable_pages(&cc->migratepages);
1050 cc->nr_migratepages = 0;
1060 next_skip_pfn += 1UL << cc->order;
1083 if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) {
1086 update_cached_migrate(cc, low_pfn);
1093 cc->total_migrate_scanned += nr_scanned;
1102 * @cc: Compaction control structure.
1111 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
1119 if (block_start_pfn < cc->zone->zone_start_pfn)
1120 block_start_pfn = cc->zone->zone_start_pfn;
1130 block_end_pfn, cc->zone))
1133 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
1139 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX)
1149 static bool suitable_migration_source(struct compact_control *cc,
1157 if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
1162 if (cc->migratetype == MIGRATE_MOVABLE)
1165 return block_mt == cc->migratetype;
1169 static bool suitable_migration_target(struct compact_control *cc,
1183 if (cc->ignore_block_suitable)
1195 freelist_scan_limit(struct compact_control *cc)
1199 return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1;
1206 static inline bool compact_scanners_met(struct compact_control *cc)
1208 return (cc->free_pfn >> pageblock_order)
1209 <= (cc->migrate_pfn >> pageblock_order);
1248 fast_isolate_around(struct compact_control *cc, unsigned long pfn)
1254 if (cc->nr_freepages >= cc->nr_migratepages)
1258 if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC)
1262 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn);
1263 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone));
1265 page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone);
1269 isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
1272 if (cc->nr_freepages < cc->nr_migratepages)
1279 static int next_search_order(struct compact_control *cc, int order)
1283 order = cc->order - 1;
1286 if (order == cc->search_order) {
1287 cc->search_order--;
1288 if (cc->search_order < 0)
1289 cc->search_order = cc->order - 1;
1297 fast_isolate_freepages(struct compact_control *cc)
1299 unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1);
1309 if (cc->order <= 0)
1310 return cc->free_pfn;
1316 if (cc->free_pfn >= cc->zone->compact_init_free_pfn) {
1325 distance = (cc->free_pfn - cc->migrate_pfn);
1326 low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2));
1327 min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1));
1336 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);
1338 for (order = cc->search_order;
1340 order = next_search_order(cc, order)) {
1341 struct free_area *area = &cc->zone->free_area[order];
1351 spin_lock_irqsave(&cc->zone->lock, flags);
1362 cc->zone->zone_start_pfn);
1365 cc->fast_search_fail = 0;
1366 cc->search_order = order;
1398 cc->nr_freepages += nr_isolated;
1399 list_add_tail(&page->lru, &cc->freepages);
1403 order = cc->search_order + 1;
1408 spin_unlock_irqrestore(&cc->zone->lock, flags);
1419 cc->fast_search_fail++;
1428 cc->free_pfn = highest;
1430 if (cc->direct_compaction && pfn_valid(min_pfn)) {
1433 zone_end_pfn(cc->zone)),
1434 cc->zone);
1435 cc->free_pfn = min_pfn;
1441 if (highest && highest >= cc->zone->compact_cached_free_pfn) {
1443 cc->zone->compact_cached_free_pfn = highest;
1446 cc->total_free_scanned += nr_scanned;
1448 return cc->free_pfn;
1451 fast_isolate_around(cc, low_pfn);
1459 static void isolate_freepages(struct compact_control *cc)
1461 struct zone *zone = cc->zone;
1467 struct list_head *freelist = &cc->freepages;
1471 isolate_start_pfn = fast_isolate_freepages(cc);
1472 if (cc->nr_freepages)
1486 isolate_start_pfn = cc->free_pfn;
1490 low_pfn = pageblock_end_pfn(cc->migrate_pfn);
1491 stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1;
1495 * pages on cc->migratepages. We stop searching if the migrate
1517 if (!suitable_migration_target(cc, page))
1521 if (!isolation_suitable(cc, page))
1525 nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn,
1530 update_pageblock_skip(cc, page, block_start_pfn);
1533 if (cc->nr_freepages >= cc->nr_migratepages) {
1565 cc->free_pfn = isolate_start_pfn;
1579 struct compact_control *cc = (struct compact_control *)data;
1582 if (list_empty(&cc->freepages)) {
1583 isolate_freepages(cc);
1585 if (list_empty(&cc->freepages))
1589 freepage = list_entry(cc->freepages.next, struct page, lru);
1591 cc->nr_freepages--;
1603 struct compact_control *cc = (struct compact_control *)data;
1605 list_add(&page->lru, &cc->freepages);
1606 cc->nr_freepages++;
1627 update_fast_start_pfn(struct compact_control *cc, unsigned long pfn)
1629 if (cc->fast_start_pfn == ULONG_MAX)
1632 if (!cc->fast_start_pfn)
1633 cc->fast_start_pfn = pfn;
1635 cc->fast_start_pfn = min(cc->fast_start_pfn, pfn);
1639 reinit_migrate_pfn(struct compact_control *cc)
1641 if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX)
1642 return cc->migrate_pfn;
1644 cc->migrate_pfn = cc->fast_start_pfn;
1645 cc->fast_start_pfn = ULONG_MAX;
1647 return cc->migrate_pfn;
1655 static unsigned long fast_find_migrateblock(struct compact_control *cc)
1657 unsigned int limit = freelist_scan_limit(cc);
1660 unsigned long pfn = cc->migrate_pfn;
1666 if (cc->ignore_skip_hint)
1674 if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
1682 if (cc->order <= PAGE_ALLOC_COSTLY_ORDER)
1691 if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE)
1700 distance = (cc->free_pfn - cc->migrate_pfn) >> 1;
1701 if (cc->migrate_pfn != cc->zone->zone_start_pfn)
1703 high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);
1705 for (order = cc->order - 1;
1708 struct free_area *area = &cc->zone->free_area[order];
1716 spin_lock_irqsave(&cc->zone->lock, flags);
1740 update_fast_start_pfn(cc, free_pfn);
1742 if (pfn < cc->zone->zone_start_pfn)
1743 pfn = cc->zone->zone_start_pfn;
1744 cc->fast_search_fail = 0;
1750 spin_unlock_irqrestore(&cc->zone->lock, flags);
1753 cc->total_migrate_scanned += nr_scanned;
1760 cc->fast_search_fail++;
1761 pfn = reinit_migrate_pfn(cc);
1771 static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
1779 (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
1787 low_pfn = fast_find_migrateblock(cc);
1789 if (block_start_pfn < cc->zone->zone_start_pfn)
1790 block_start_pfn = cc->zone->zone_start_pfn;
1797 fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
1806 for (; block_end_pfn <= cc->free_pfn;
1821 block_end_pfn, cc->zone);
1833 !fast_find_block && !isolation_suitable(cc, page))
1844 if (!suitable_migration_source(cc, page)) {
1845 update_cached_migrate(cc, block_end_pfn);
1850 low_pfn = isolate_migratepages_block(cc, low_pfn,
1865 cc->migrate_pfn = low_pfn;
1867 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1949 static enum compact_result __compact_finished(struct compact_control *cc)
1952 const int migratetype = cc->migratetype;
1956 if (compact_scanners_met(cc)) {
1958 reset_cached_positions(cc->zone);
1966 if (cc->direct_compaction)
1967 cc->zone->compact_blockskip_flush = true;
1969 if (cc->whole_zone)
1975 if (cc->proactive_compaction) {
1979 pgdat = cc->zone->zone_pgdat;
1983 score = fragmentation_score_zone(cc->zone);
1994 if (is_via_compact_memory(cc->order))
2003 if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages))
2008 for (order = cc->order; order < MAX_ORDER; order++) {
2009 struct free_area *area = &cc->zone->free_area[order];
2041 if (cc->mode == MIGRATE_ASYNC ||
2042 IS_ALIGNED(cc->migrate_pfn,
2053 if (cc->contended || fatal_signal_pending(current))
2059 static enum compact_result compact_finished(struct compact_control *cc)
2063 ret = __compact_finished(cc);
2064 trace_mm_compaction_finished(cc->zone, cc->order, ret);
2192 compact_zone(struct compact_control *cc, struct capture_control *capc)
2195 unsigned long start_pfn = cc->zone->zone_start_pfn;
2196 unsigned long end_pfn = zone_end_pfn(cc->zone);
2198 const bool sync = cc->mode != MIGRATE_ASYNC;
2205 cc->total_migrate_scanned = 0;
2206 cc->total_free_scanned = 0;
2207 cc->nr_migratepages = 0;
2208 cc->nr_freepages = 0;
2209 INIT_LIST_HEAD(&cc->freepages);
2210 INIT_LIST_HEAD(&cc->migratepages);
2212 cc->migratetype = gfp_migratetype(cc->gfp_mask);
2213 ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
2214 cc->highest_zoneidx);
2226 if (compaction_restarting(cc->zone, cc->order))
2227 __reset_isolation_suitable(cc->zone);
2235 cc->fast_start_pfn = 0;
2236 if (cc->whole_zone) {
2237 cc->migrate_pfn = start_pfn;
2238 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
2240 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync];
2241 cc->free_pfn = cc->zone->compact_cached_free_pfn;
2242 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
2243 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
2244 cc->zone->compact_cached_free_pfn = cc->free_pfn;
2246 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
2247 cc->migrate_pfn = start_pfn;
2248 cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
2249 cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
2252 if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn)
2253 cc->whole_zone = true;
2267 cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1];
2269 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
2270 cc->free_pfn, end_pfn, sync);
2274 while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) {
2276 unsigned long start_pfn = cc->migrate_pfn;
2286 cc->rescan = false;
2289 cc->rescan = true;
2292 switch (isolate_migratepages(cc)) {
2295 putback_movable_pages(&cc->migratepages);
2296 cc->nr_migratepages = 0;
2300 cc->zone->compact_cached_migrate_pfn[1] =
2301 cc->zone->compact_cached_migrate_pfn[0];
2307 * previous cc->order aligned block.
2316 err = migrate_pages(&cc->migratepages, compaction_alloc,
2317 compaction_free, (unsigned long)cc, cc->mode,
2320 trace_mm_compaction_migratepages(cc->nr_migratepages, err,
2321 &cc->migratepages);
2324 cc->nr_migratepages = 0;
2326 putback_movable_pages(&cc->migratepages);
2331 if (err == -ENOMEM && !compact_scanners_met(cc)) {
2339 if (cc->direct_compaction &&
2340 (cc->mode == MIGRATE_ASYNC)) {
2341 cc->migrate_pfn = block_end_pfn(
2342 cc->migrate_pfn - 1, cc->order);
2351 * cc->order aligned block where we migrated from? If yes,
2356 if (cc->order > 0 && last_migrated_pfn) {
2358 block_start_pfn(cc->migrate_pfn, cc->order);
2361 lru_add_drain_cpu_zone(cc->zone);
2379 if (cc->nr_freepages > 0) {
2380 unsigned long free_pfn = release_freepages(&cc->freepages);
2382 cc->nr_freepages = 0;
2390 if (free_pfn > cc->zone->compact_cached_free_pfn)
2391 cc->zone->compact_cached_free_pfn = free_pfn;
2394 count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
2395 count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
2397 trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
2398 cc->free_pfn, end_pfn, sync, ret);
2409 struct compact_control cc = {
2424 .cc = &cc,
2436 ret = compact_zone(&cc, &capc);
2438 VM_BUG_ON(!list_empty(&cc.freepages));
2439 VM_BUG_ON(!list_empty(&cc.migratepages));
2546 struct compact_control cc = {
2560 cc.zone = zone;
2562 compact_zone(&cc, NULL);
2564 VM_BUG_ON(!list_empty(&cc.freepages));
2565 VM_BUG_ON(!list_empty(&cc.migratepages));
2575 struct compact_control cc = {
2590 cc.zone = zone;
2592 compact_zone(&cc, NULL);
2594 VM_BUG_ON(!list_empty(&cc.freepages));
2595 VM_BUG_ON(!list_empty(&cc.migratepages));
2696 struct compact_control cc = {
2704 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
2705 cc.highest_zoneidx);
2708 for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) {
2715 if (compaction_deferred(zone, cc.order))
2718 if (compaction_suitable(zone, cc.order, 0, zoneid) !=
2725 cc.zone = zone;
2726 status = compact_zone(&cc, NULL);
2729 compaction_defer_reset(zone, cc.order, false);
2734 * order >= cc.order. This is ratelimited by the
2743 defer_compaction(zone, cc.order);
2747 cc.total_migrate_scanned);
2749 cc.total_free_scanned);
2751 VM_BUG_ON(!list_empty(&cc.freepages));
2752 VM_BUG_ON(!list_empty(&cc.migratepages));
2760 if (pgdat->kcompactd_max_order <= cc.order)
2762 if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx)