Lines Matching refs:cc

215 static inline bool isolation_suitable(struct compact_control *cc,
218 if (cc->ignore_skip_hint)
447 static bool test_and_set_skip(struct compact_control *cc, struct page *page)
452 if (cc->ignore_skip_hint)
456 if (!skip && !cc->no_set_skip_hint)
462 static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
464 struct zone *zone = cc->zone;
467 if (cc->no_set_skip_hint)
475 if (cc->mode != MIGRATE_ASYNC &&
484 static void update_pageblock_skip(struct compact_control *cc,
487 struct zone *zone = cc->zone;
489 if (cc->no_set_skip_hint)
498 static inline bool isolation_suitable(struct compact_control *cc,
509 static inline void update_pageblock_skip(struct compact_control *cc,
514 static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
518 static bool test_and_set_skip(struct compact_control *cc, struct page *page)
534 struct compact_control *cc)
538 if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
542 cc->contended = true;
562 unsigned long flags, bool *locked, struct compact_control *cc)
570 cc->contended = true;
584 static unsigned long isolate_freepages_block(struct compact_control *cc,
614 && compact_unlock_should_abort(&cc->zone->lock, flags,
615 &locked, cc))
642 locked = compact_lock_irqsave(&cc->zone->lock,
643 &flags, cc);
659 cc->nr_freepages += isolated;
662 if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
678 spin_unlock_irqrestore(&cc->zone->lock, flags);
701 cc->total_free_scanned += nr_scanned;
709 * @cc: Compaction control structure.
722 isolate_freepages_range(struct compact_control *cc,
730 if (block_start_pfn < cc->zone->zone_start_pfn)
731 block_start_pfn = cc->zone->zone_start_pfn;
753 block_end_pfn, cc->zone))
756 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
788 static bool too_many_isolated(struct compact_control *cc)
790 pg_data_t *pgdat = cc->zone->zone_pgdat;
808 if (cc->gfp_mask & __GFP_FS) {
823 * @cc: Compaction control structure.
832 * cc->migrate_pfn will contain the next pfn to scan.
834 * The pages are isolated on cc->migratepages list (not required to be empty),
835 * and cc->nr_migratepages is updated accordingly.
838 isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
841 pg_data_t *pgdat = cc->zone->zone_pgdat;
855 cc->migrate_pfn = low_pfn;
862 while (unlikely(too_many_isolated(cc))) {
864 if (cc->nr_migratepages)
868 if (cc->mode == MIGRATE_ASYNC)
879 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
881 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
906 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
921 cc->contended = true;
941 low_pfn == cc->zone->zone_start_pfn)) {
942 if (!isolation_suitable(cc, page)) {
950 if (PageHuge(page) && cc->alloc_contig) {
956 ret = isolate_or_dissolve_huge_page(page, &cc->migratepages);
974 * on the cc->migratepages list.
1018 if (PageCompound(page) && !cc->alloc_contig) {
1076 if (!(cc->gfp_mask & __GFP_FS) && mapping)
1130 compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
1142 if (test_and_set_skip(cc, valid_page) &&
1143 !cc->finish_pageblock) {
1153 if (unlikely(folio_test_large(folio) && !cc->alloc_contig)) {
1172 list_add(&folio->lru, &cc->migratepages);
1174 cc->nr_migratepages += folio_nr_pages(folio);
1184 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX &&
1185 !cc->finish_pageblock && !cc->contended) {
1206 * instead of migrating, as we cannot form the cc->order buddy
1214 putback_movable_pages(&cc->migratepages);
1215 cc->nr_migratepages = 0;
1225 next_skip_pfn += 1UL << cc->order;
1257 if (low_pfn == end_pfn && (!nr_isolated || cc->finish_pageblock)) {
1258 if (!cc->no_set_skip_hint && valid_page && !skip_updated)
1260 update_cached_migrate(cc, low_pfn);
1267 cc->total_migrate_scanned += nr_scanned;
1271 cc->migrate_pfn = low_pfn;
1278 * @cc: Compaction control structure.
1286 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
1295 if (block_start_pfn < cc->zone->zone_start_pfn)
1296 block_start_pfn = cc->zone->zone_start_pfn;
1306 block_end_pfn, cc->zone))
1309 ret = isolate_migratepages_block(cc, pfn, block_end_pfn,
1315 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX)
1325 static bool suitable_migration_source(struct compact_control *cc,
1333 if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
1338 if (cc->migratetype == MIGRATE_MOVABLE)
1341 return block_mt == cc->migratetype;
1345 static bool suitable_migration_target(struct compact_control *cc,
1359 if (cc->ignore_block_suitable)
1371 freelist_scan_limit(struct compact_control *cc)
1375 return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1;
1382 static inline bool compact_scanners_met(struct compact_control *cc)
1384 return (cc->free_pfn >> pageblock_order)
1385 <= (cc->migrate_pfn >> pageblock_order);
1422 fast_isolate_around(struct compact_control *cc, unsigned long pfn)
1428 if (cc->nr_freepages >= cc->nr_migratepages)
1432 if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC)
1436 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn);
1437 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone));
1439 page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone);
1443 isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
1446 if (start_pfn == end_pfn && !cc->no_set_skip_hint)
1451 static int next_search_order(struct compact_control *cc, int order)
1455 order = cc->order - 1;
1458 if (order == cc->search_order) {
1459 cc->search_order--;
1460 if (cc->search_order < 0)
1461 cc->search_order = cc->order - 1;
1468 static void fast_isolate_freepages(struct compact_control *cc)
1470 unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1);
1480 if (cc->order <= 0)
1487 if (cc->free_pfn >= cc->zone->compact_init_free_pfn) {
1496 distance = (cc->free_pfn - cc->migrate_pfn);
1497 low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2));
1498 min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1));
1507 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);
1509 for (order = cc->search_order;
1511 order = next_search_order(cc, order)) {
1512 struct free_area *area = &cc->zone->free_area[order];
1522 spin_lock_irqsave(&cc->zone->lock, flags);
1533 cc->zone->zone_start_pfn);
1536 cc->fast_search_fail = 0;
1537 cc->search_order = order;
1571 cc->nr_freepages += nr_isolated;
1572 list_add_tail(&page->lru, &cc->freepages);
1576 order = cc->search_order + 1;
1581 spin_unlock_irqrestore(&cc->zone->lock, flags);
1584 if (cc->nr_freepages >= cc->nr_migratepages)
1595 trace_mm_compaction_fast_isolate_freepages(min_pfn, cc->free_pfn,
1599 cc->fast_search_fail++;
1608 cc->free_pfn = highest;
1610 if (cc->direct_compaction && pfn_valid(min_pfn)) {
1613 zone_end_pfn(cc->zone)),
1614 cc->zone);
1615 cc->free_pfn = min_pfn;
1621 if (highest && highest >= cc->zone->compact_cached_free_pfn) {
1623 cc->zone->compact_cached_free_pfn = highest;
1626 cc->total_free_scanned += nr_scanned;
1631 fast_isolate_around(cc, low_pfn);
1638 static void isolate_freepages(struct compact_control *cc)
1640 struct zone *zone = cc->zone;
1646 struct list_head *freelist = &cc->freepages;
1650 fast_isolate_freepages(cc);
1651 if (cc->nr_freepages)
1665 isolate_start_pfn = cc->free_pfn;
1669 low_pfn = pageblock_end_pfn(cc->migrate_pfn);
1670 stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1;
1674 * pages on cc->migratepages. We stop searching if the migrate
1703 if (!suitable_migration_target(cc, page))
1707 if (!isolation_suitable(cc, page))
1711 nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn,
1716 update_pageblock_skip(cc, page, block_start_pfn -
1720 if (cc->nr_freepages >= cc->nr_migratepages) {
1752 cc->free_pfn = isolate_start_pfn;
1765 struct compact_control *cc = (struct compact_control *)data;
1768 if (list_empty(&cc->freepages)) {
1769 isolate_freepages(cc);
1771 if (list_empty(&cc->freepages))
1775 dst = list_entry(cc->freepages.next, struct folio, lru);
1777 cc->nr_freepages--;
1789 struct compact_control *cc = (struct compact_control *)data;
1791 list_add(&dst->lru, &cc->freepages);
1792 cc->nr_freepages++;
1817 update_fast_start_pfn(struct compact_control *cc, unsigned long pfn)
1819 if (cc->fast_start_pfn == ULONG_MAX)
1822 if (!cc->fast_start_pfn)
1823 cc->fast_start_pfn = pfn;
1825 cc->fast_start_pfn = min(cc->fast_start_pfn, pfn);
1829 reinit_migrate_pfn(struct compact_control *cc)
1831 if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX)
1832 return cc->migrate_pfn;
1834 cc->migrate_pfn = cc->fast_start_pfn;
1835 cc->fast_start_pfn = ULONG_MAX;
1837 return cc->migrate_pfn;
1845 static unsigned long fast_find_migrateblock(struct compact_control *cc)
1847 unsigned int limit = freelist_scan_limit(cc);
1850 unsigned long pfn = cc->migrate_pfn;
1856 if (cc->ignore_skip_hint)
1863 if (cc->finish_pageblock)
1871 if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
1879 if (cc->order <= PAGE_ALLOC_COSTLY_ORDER)
1888 if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE)
1897 distance = (cc->free_pfn - cc->migrate_pfn) >> 1;
1898 if (cc->migrate_pfn != cc->zone->zone_start_pfn)
1900 high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);
1902 for (order = cc->order - 1;
1905 struct free_area *area = &cc->zone->free_area[order];
1913 spin_lock_irqsave(&cc->zone->lock, flags);
1937 update_fast_start_pfn(cc, free_pfn);
1939 if (pfn < cc->zone->zone_start_pfn)
1940 pfn = cc->zone->zone_start_pfn;
1941 cc->fast_search_fail = 0;
1946 spin_unlock_irqrestore(&cc->zone->lock, flags);
1949 cc->total_migrate_scanned += nr_scanned;
1956 cc->fast_search_fail++;
1957 pfn = reinit_migrate_pfn(cc);
1967 static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
1975 (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
1983 low_pfn = fast_find_migrateblock(cc);
1985 if (block_start_pfn < cc->zone->zone_start_pfn)
1986 block_start_pfn = cc->zone->zone_start_pfn;
1993 fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
2002 for (; block_end_pfn <= cc->free_pfn;
2004 cc->migrate_pfn = low_pfn = block_end_pfn,
2017 block_end_pfn, cc->zone);
2023 block_end_pfn = min(next_pfn, cc->free_pfn);
2035 low_pfn == cc->zone->zone_start_pfn) &&
2036 !fast_find_block && !isolation_suitable(cc, page))
2047 if (!suitable_migration_source(cc, page)) {
2048 update_cached_migrate(cc, block_end_pfn);
2053 if (isolate_migratepages_block(cc, low_pfn, block_end_pfn,
2065 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
2169 static enum compact_result __compact_finished(struct compact_control *cc)
2172 const int migratetype = cc->migratetype;
2176 if (compact_scanners_met(cc)) {
2178 reset_cached_positions(cc->zone);
2186 if (cc->direct_compaction)
2187 cc->zone->compact_blockskip_flush = true;
2189 if (cc->whole_zone)
2195 if (cc->proactive_compaction) {
2199 pgdat = cc->zone->zone_pgdat;
2203 score = fragmentation_score_zone(cc->zone);
2214 if (is_via_compact_memory(cc->order))
2223 if (!pageblock_aligned(cc->migrate_pfn))
2228 for (order = cc->order; order <= MAX_ORDER; order++) {
2229 struct free_area *area = &cc->zone->free_area[order];
2260 if (cc->contended || fatal_signal_pending(current))
2266 static enum compact_result compact_finished(struct compact_control *cc)
2270 ret = __compact_finished(cc);
2271 trace_mm_compaction_finished(cc->zone, cc->order, ret);
2381 compact_zone(struct compact_control *cc, struct capture_control *capc)
2384 unsigned long start_pfn = cc->zone->zone_start_pfn;
2385 unsigned long end_pfn = zone_end_pfn(cc->zone);
2387 const bool sync = cc->mode != MIGRATE_ASYNC;
2395 cc->total_migrate_scanned = 0;
2396 cc->total_free_scanned = 0;
2397 cc->nr_migratepages = 0;
2398 cc->nr_freepages = 0;
2399 INIT_LIST_HEAD(&cc->freepages);
2400 INIT_LIST_HEAD(&cc->migratepages);
2402 cc->migratetype = gfp_migratetype(cc->gfp_mask);
2404 if (!is_via_compact_memory(cc->order)) {
2408 watermark = wmark_pages(cc->zone,
2409 cc->alloc_flags & ALLOC_WMARK_MASK);
2410 if (zone_watermark_ok(cc->zone, cc->order, watermark,
2411 cc->highest_zoneidx, cc->alloc_flags))
2415 if (!compaction_suitable(cc->zone, cc->order,
2416 cc->highest_zoneidx))
2424 if (compaction_restarting(cc->zone, cc->order))
2425 __reset_isolation_suitable(cc->zone);
2433 cc->fast_start_pfn = 0;
2434 if (cc->whole_zone) {
2435 cc->migrate_pfn = start_pfn;
2436 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
2438 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync];
2439 cc->free_pfn = cc->zone->compact_cached_free_pfn;
2440 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
2441 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
2442 cc->zone->compact_cached_free_pfn = cc->free_pfn;
2444 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
2445 cc->migrate_pfn = start_pfn;
2446 cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
2447 cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
2450 if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn)
2451 cc->whole_zone = true;
2465 cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1];
2467 trace_mm_compaction_begin(cc, start_pfn, end_pfn, sync);
2472 while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) {
2474 unsigned long iteration_start_pfn = cc->migrate_pfn;
2484 cc->finish_pageblock = false;
2487 cc->finish_pageblock = true;
2491 switch (isolate_migratepages(cc)) {
2494 putback_movable_pages(&cc->migratepages);
2495 cc->nr_migratepages = 0;
2499 cc->zone->compact_cached_migrate_pfn[1] =
2500 cc->zone->compact_cached_migrate_pfn[0];
2506 * previous cc->order aligned block.
2511 last_migrated_pfn = max(cc->zone->zone_start_pfn,
2512 pageblock_start_pfn(cc->migrate_pfn - 1));
2515 err = migrate_pages(&cc->migratepages, compaction_alloc,
2516 compaction_free, (unsigned long)cc, cc->mode,
2519 trace_mm_compaction_migratepages(cc, nr_succeeded);
2522 cc->nr_migratepages = 0;
2524 putback_movable_pages(&cc->migratepages);
2529 if (err == -ENOMEM && !compact_scanners_met(cc)) {
2544 if (!pageblock_aligned(cc->migrate_pfn) &&
2545 !cc->ignore_skip_hint && !cc->finish_pageblock &&
2546 (cc->mode < MIGRATE_SYNC)) {
2547 cc->finish_pageblock = true;
2554 if (cc->order == COMPACTION_HPAGE_ORDER)
2570 * cc->order aligned block where we migrated from? If yes,
2575 if (cc->order > 0 && last_migrated_pfn) {
2577 block_start_pfn(cc->migrate_pfn, cc->order);
2580 lru_add_drain_cpu_zone(cc->zone);
2592 if (cc->nr_freepages > 0) {
2593 unsigned long free_pfn = release_freepages(&cc->freepages);
2595 cc->nr_freepages = 0;
2603 if (free_pfn > cc->zone->compact_cached_free_pfn)
2604 cc->zone->compact_cached_free_pfn = free_pfn;
2607 count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
2608 count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
2610 trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret);
2612 VM_BUG_ON(!list_empty(&cc->freepages));
2613 VM_BUG_ON(!list_empty(&cc->migratepages));
2624 struct compact_control cc = {
2639 .cc = &cc,
2651 ret = compact_zone(&cc, &capc);
2764 struct compact_control cc = {
2778 cc.zone = zone;
2780 compact_zone(&cc, NULL);
2783 cc.total_migrate_scanned);
2785 cc.total_free_scanned);
2795 struct compact_control cc = {
2810 cc.zone = zone;
2812 compact_zone(&cc, NULL);
2945 struct compact_control cc = {
2953 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
2954 cc.highest_zoneidx);
2957 for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) {
2964 if (compaction_deferred(zone, cc.order))
2968 if (zone_watermark_ok(zone, cc.order,
2972 if (!compaction_suitable(zone, cc.order, zoneid))
2978 cc.zone = zone;
2979 status = compact_zone(&cc, NULL);
2982 compaction_defer_reset(zone, cc.order, false);
2987 * order >= cc.order. This is ratelimited by the
2996 defer_compaction(zone, cc.order);
3000 cc.total_migrate_scanned);
3002 cc.total_free_scanned);
3010 if (pgdat->kcompactd_max_order <= cc.order)
3012 if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx)