Lines Matching defs:order
48 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
49 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
59 * Page order with-respect-to which proactive compaction
89 unsigned int i, order, nr_pages;
96 order = page_private(page);
97 nr_pages = 1 << order;
99 post_alloc_hook(page, order, __GFP_MOVABLE);
100 if (order)
101 split_page(page, order);
160 void defer_compaction(struct zone *zone, int order)
165 if (order < zone->compact_order_failed)
166 zone->compact_order_failed = order;
171 trace_mm_compaction_defer_compaction(zone, order);
175 bool compaction_deferred(struct zone *zone, int order)
179 if (order < zone->compact_order_failed)
188 trace_mm_compaction_deferred(zone, order);
194 * Update defer tracking counters after successful compaction of given order,
198 void compaction_defer_reset(struct zone *zone, int order,
205 if (order >= zone->compact_order_failed)
206 zone->compact_order_failed = order + 1;
208 trace_mm_compaction_defer_reset(zone, order);
212 bool compaction_restarting(struct zone *zone, int order)
214 if (order < zone->compact_order_failed)
241 * released. It is always pointless to compact pages of such order (if they are
565 unsigned int order;
599 const unsigned int order = compound_order(page);
601 if (likely(order < MAX_ORDER)) {
602 blockpfn += (1UL << order) - 1;
603 cursor += (1UL << order) - 1;
627 /* Found a free page, will break it into order-0 pages */
628 order = buddy_order(page);
629 isolated = __isolate_free_page(page, order);
632 set_page_private(page, order);
722 * is more than pageblock order. In this case, we adjust
748 * pageblock_nr_pages for some non-negative n. (Max order
838 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
847 * previous order-aligned block, and did not skip it due
855 * We failed to isolate in the previous order-aligned
858 * next_skip_pfn by 1 << order, as low_pfn might have
860 * a compound or a high-order buddy page in the
863 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
899 * Skip if free. We read page order here without zone lock
909 * a valid page order. Consider only values in the
910 * valid order range to prevent low_pfn overflow.
926 const unsigned int order = compound_order(page);
928 if (likely(order < MAX_ORDER))
929 low_pfn += (1UL << order) - 1;
992 * and it's on LRU. It can only be a THP so the order
1041 * instead of migrating, as we cannot form the cc->order buddy
1060 next_skip_pfn += 1UL << cc->order;
1177 * pageblock, so it's not worth to check order for valid range.
1279 static int next_search_order(struct compact_control *cc, int order)
1281 order--;
1282 if (order < 0)
1283 order = cc->order - 1;
1286 if (order == cc->search_order) {
1289 cc->search_order = cc->order - 1;
1293 return order;
1306 int order;
1308 /* Full compaction passes in a negative order */
1309 if (cc->order <= 0)
1333 * Search starts from the last successful isolation order or the next
1334 * order to search after a previous failure
1336 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);
1338 for (order = cc->search_order;
1339 !page && order >= 0;
1340 order = next_search_order(cc, order)) {
1341 struct free_area *area = &cc->zone->free_area[order];
1366 cc->search_order = order;
1395 if (__isolate_free_page(page, order)) {
1396 set_page_private(page, order);
1397 nr_isolated = 1 << order;
1403 order = cc->search_order + 1;
1411 * Smaller scan on next order so the total scan ig related
1662 int order;
1682 if (cc->order <= PAGE_ALLOC_COSTLY_ORDER)
1705 for (order = cc->order - 1;
1706 order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit;
1707 order--) {
1708 struct free_area *area = &cc->zone->free_area[order];
1871 * order == -1 is expected when compacting via
1874 static inline bool is_via_compact_memory(int order)
1876 return order == -1;
1951 unsigned int order;
1994 if (is_via_compact_memory(cc->order))
2008 for (order = cc->order; order < MAX_ORDER; order++) {
2009 struct free_area *area = &cc->zone->free_area[order];
2026 if (find_suitable_fallback(area, order, migratetype,
2064 trace_mm_compaction_finished(cc->zone, cc->order, ret);
2078 static enum compact_result __compaction_suitable(struct zone *zone, int order,
2085 if (is_via_compact_memory(order))
2090 * If watermarks for high-order allocation are already met, there
2093 if (zone_watermark_ok(zone, order, watermark, highest_zoneidx,
2098 * Watermarks for order-0 must be met for compaction to be able to
2111 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
2113 watermark += compact_gap(order);
2121 enum compact_result compaction_suitable(struct zone *zone, int order,
2128 ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx,
2135 * watermarks, but we already failed the high-order watermark check
2146 if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
2147 fragindex = fragmentation_index(zone, order);
2152 trace_mm_compaction_suitable(zone, order, ret);
2159 bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
2176 * want to trash just for a single high order allocation which
2180 available = zone_reclaimable_pages(zone) / order;
2182 compact_result = __compaction_suitable(zone, order, alloc_flags,
2213 ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
2226 if (compaction_restarting(cc->zone, cc->order))
2307 * previous cc->order aligned block.
2337 * order-aligned block, so skip the rest of it.
2342 cc->migrate_pfn - 1, cc->order);
2351 * cc->order aligned block where we migrated from? If yes,
2356 if (cc->order > 0 && last_migrated_pfn) {
2358 block_start_pfn(cc->migrate_pfn, cc->order);
2403 static enum compact_result compact_zone_order(struct zone *zone, int order,
2410 .order = order,
2411 .search_order = order,
2455 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
2457 * @order: The order of the current allocation
2465 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
2481 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
2489 && compaction_deferred(zone, order)) {
2494 status = compact_zone_order(zone, order, gfp_mask, prio,
2506 compaction_defer_reset(zone, order, false);
2518 defer_compaction(zone, order);
2547 .order = -1,
2576 .order = -1,
2692 * order is allocatable.
2697 .order = pgdat->kcompactd_max_order,
2704 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
2715 if (compaction_deferred(zone, cc.order))
2718 if (compaction_suitable(zone, cc.order, 0, zoneid) !=
2729 compaction_defer_reset(zone, cc.order, false);
2734 * order >= cc.order. This is ratelimited by the
2743 defer_compaction(zone, cc.order);
2757 * the requested order/highest_zoneidx in case it was higher/tighter
2760 if (pgdat->kcompactd_max_order <= cc.order)
2766 void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx)
2768 if (!order)
2771 if (pgdat->kcompactd_max_order < order)
2772 pgdat->kcompactd_max_order = order;
2787 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,