Lines Matching refs:order

243  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
282 static void __free_pages_ok(struct page *page, unsigned int order,
362 * many cases very high-order allocations like THP are likely to be
419 static inline void kasan_free_nondeferred_pages(struct page *page, int order)
422 kasan_free_pages(page, order);
683 * Higher-order pages are called "compound pages". They are structured thusly:
693 * The first tail page's ->compound_order holds the order of allocation.
694 * This usage means that zero-order pages may not be compound.
703 void prep_compound_page(struct page *page, unsigned int order)
706 int nr_pages = 1 << order;
717 set_compound_order(page, order);
768 unsigned int order, int migratetype)
773 if (order >= debug_guardpage_minorder())
778 set_page_private(page, order);
780 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
786 unsigned int order, int migratetype)
795 __mod_zone_freepage_state(zone, (1 << order), migratetype);
799 unsigned int order, int migratetype) { return false; }
801 unsigned int order, int migratetype) {}
804 static inline void set_buddy_order(struct page *page, unsigned int order)
806 set_page_private(page, order);
815 * (c) a page and its buddy have the same order &&
821 * For recording page's order, we use page_private(page).
824 unsigned int order)
829 if (buddy_order(buddy) != order)
857 int order, int migratetype)
859 if (!capc || order != capc->cc->order)
868 * Do not let lower order allocations polluate a movable pageblock.
871 * have trouble finding a high-order free page.
873 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
888 int order, int migratetype)
896 unsigned int order, int migratetype)
898 struct free_area *area = &zone->free_area[order];
906 unsigned int order, int migratetype)
908 struct free_area *area = &zone->free_area[order];
920 unsigned int order, int migratetype)
922 struct free_area *area = &zone->free_area[order];
928 unsigned int order)
937 zone->free_area[order].nr_free--;
942 * of the next-highest order is free. If it is, it's possible
946 * as a higher order page
950 struct page *page, unsigned int order)
955 if (order >= MAX_ORDER - 2)
963 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
967 page_is_buddy(higher_page, higher_buddy, order + 1);
983 * free pages of length of (1 << order) and marked with PageBuddy.
984 * Page's order is recorded in page_private(page) field.
996 struct zone *zone, unsigned int order,
1013 __mod_zone_freepage_state(zone, 1 << order, migratetype);
1015 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
1019 while (order < max_order) {
1020 if (compaction_capture(capc, page, order, migratetype)) {
1021 __mod_zone_freepage_state(zone, -(1 << order),
1025 buddy_pfn = __find_buddy_pfn(pfn, order);
1030 if (!page_is_buddy(page, buddy, order))
1034 * merge with it and move up one order.
1037 clear_page_guard(zone, buddy, order, migratetype);
1039 del_page_from_free_list(buddy, zone, order);
1043 order++;
1045 if (order < MAX_ORDER - 1) {
1046 /* If we are here, it means order is >= pageblock_order.
1052 * low-order merging.
1057 buddy_pfn = __find_buddy_pfn(pfn, order);
1066 max_order = order + 1;
1071 set_buddy_order(page, order);
1075 else if (is_shuffle_order(order))
1078 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
1081 add_to_free_list_tail(page, zone, order, migratetype);
1083 add_to_free_list(page, zone, order, migratetype);
1087 page_reporting_notify_free(order);
1213 unsigned int order, bool check_free)
1219 trace_mm_page_free(page, order);
1221 if (unlikely(PageHWPoison(page)) && !order) {
1227 __memcg_kmem_uncharge_page(page, order);
1228 reset_page_owner(page, order);
1234 * avoid checking PageCompound for order-0 pages.
1236 if (unlikely(order)) {
1240 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1244 for (i = 1; i < (1 << order); i++) {
1257 __memcg_kmem_uncharge_page(page, order);
1265 reset_page_owner(page, order);
1269 PAGE_SIZE << order);
1271 PAGE_SIZE << order);
1274 kernel_init_free_pages(page, 1 << order);
1276 kernel_poison_pages(page, 1 << order, 0);
1282 arch_free_page(page, order);
1285 kernel_map_pages(page, 1 << order, 0);
1287 kasan_free_nondeferred_pages(page, order);
1294 * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
1312 * With DEBUG_VM disabled, order-0 pages being freed are checked only when
1313 * moving from pcp lists to free list in order to reduce overhead. With
1342 * Assumes all pages on list are in same zone, and of same order.
1435 unsigned int order,
1443 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1521 static void __free_pages_ok(struct page *page, unsigned int order,
1528 if (!free_pages_prepare(page, order, true))
1533 __count_vm_events(PGFREE, 1 << order);
1534 free_one_page(page_zone(page), page, pfn, order, migratetype,
1539 void __free_pages_core(struct page *page, unsigned int order)
1541 unsigned int nr_pages = 1 << order;
1565 __free_pages_ok(page, order, FPI_TO_TAIL);
1613 unsigned int order)
1617 __free_pages_core(page, order);
1839 * In order to try and keep some memory in the cache we have the loop
1840 * broken along max page order boundaries. This way we will not cause
1993 * deferred pages to satisfy the allocation specified by order, rounded up to
2007 deferred_grow_zone(struct zone *zone, unsigned int order)
2009 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2074 _deferred_grow_zone(struct zone *zone, unsigned int order)
2076 return deferred_grow_zone(zone, order);
2157 * The order of subdivision here is critical for the IO subsystem.
2158 * Please do not alter this order without good reasons and regression
2160 * the order in which smaller blocks are delivered depends on the order
2162 * influencing the order in which pages are delivered to the IO
2227 * With DEBUG_VM enabled, order-0 pages are checked for expected state when
2245 * With DEBUG_VM disabled, free order-0 pages are checked for expected state
2262 static bool check_new_pages(struct page *page, unsigned int order)
2265 for (i = 0; i < (1 << order); i++) {
2275 inline void post_alloc_hook(struct page *page, unsigned int order,
2281 arch_alloc_page(page, order);
2283 kernel_map_pages(page, 1 << order, 1);
2284 kasan_alloc_pages(page, order);
2285 kernel_poison_pages(page, 1 << order, 1);
2286 set_page_owner(page, order, gfp_flags);
2289 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
2292 post_alloc_hook(page, order, gfp_flags);
2295 kernel_init_free_pages(page, 1 << order);
2297 if (order && (gfp_flags & __GFP_COMP))
2298 prep_compound_page(page, order);
2317 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
2325 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
2331 expand(zone, page, order, current_order, migratetype);
2341 * This array describes the order lists are fallen back to when
2358 unsigned int order)
2360 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2364 unsigned int order) { return NULL; }
2378 unsigned int order;
2405 order = buddy_order(page);
2406 move_to_free_list(page, zone, order, migratetype);
2407 pfn += 1 << order;
2408 pages_moved += 1 << order;
2459 static bool can_steal_fallback(unsigned int order, int start_mt)
2462 * Leaving this order check is intended, although there is
2463 * relaxed order check in next check. The reason is that
2468 if (order >= pageblock_order)
2471 if (order >= pageblock_order / 2 ||
2518 * This function implements actual steal behaviour. If order is large enough,
2602 * Check whether there is a suitable fallback freepage with requested order.
2607 int find_suitable_fallback(struct free_area *area, unsigned int order,
2625 if (can_steal_fallback(order, migratetype))
2639 * Reserve a pageblock for exclusive use of high-order atomic allocations if
2640 * there are no empty page blocks that contain a page with a suitable order
2677 * potentially hurts the reliability of high-order allocations when under
2692 int order;
2706 for (order = 0; order < MAX_ORDER; order++) {
2707 struct free_area *area = &(zone->free_area[order]);
2762 * The use of signed ints for order and current_order is a deliberate
2767 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2772 int min_order = order;
2807 && current_order > order)
2816 for (current_order = order; current_order < MAX_ORDER;
2837 trace_mm_page_alloc_extfrag(page, order, current_order,
2845 __rmqueue_with_cma_reuse(struct zone *zone, unsigned int order,
2850 page = __rmqueue_smallest(zone, order, migratetype);
2855 page = __rmqueue_smallest(zone, order, migratetype);
2859 __rmqueue_fallback(zone, order, migratetype, alloc_flags))
2870 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
2876 page = __rmqueue_with_cma_reuse(zone, order, migratetype, alloc_flags);
2889 page = __rmqueue_cma_fallback(zone, order);
2895 page = __rmqueue_smallest(zone, order, migratetype);
2898 page = __rmqueue_cma_fallback(zone, order);
2900 if (!page && __rmqueue_fallback(zone, order, migratetype,
2906 trace_mm_page_alloc_zone_locked(page, order, migratetype);
2915 static int rmqueue_bulk(struct zone *zone, unsigned int order,
2923 struct page *page = __rmqueue(zone, order, migratetype,
2933 * physical page order. The page is added to the tail of
2937 * head, thus also in the physical page order. This is useful
2945 -(1 << order));
2954 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
3144 unsigned int order, t;
3169 for_each_migratetype_order(order, t) {
3171 &zone->free_area[order].free_list[t], lru) {
3175 for (i = 0; i < (1UL << order); i++) {
3235 * Free a 0-order page
3251 * Free a list of 0-order pages
3289 * split_page takes a non-compound higher-order page, and splits it into
3290 * n (1<<order) sub-pages: page[0..n]
3296 void split_page(struct page *page, unsigned int order)
3303 for (i = 1; i < (1 << order); i++)
3305 split_page_owner(page, 1 << order);
3306 split_page_memcg(page, 1 << order);
3310 int __isolate_free_page(struct page *page, unsigned int order)
3324 * emulate a high-order watermark check with a raised order-0
3325 * watermark, because we already know our high-order page
3328 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3332 __mod_zone_freepage_state(zone, -(1UL << order), mt);
3337 del_page_from_free_list(page, zone, order);
3343 if (order >= pageblock_order - 1) {
3344 struct page *endpage = page + (1 << order) - 1;
3355 return 1UL << order;
3361 * @order: Order of the isolated page
3367 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3375 __free_one_page(page, page_to_pfn(page), zone, order, mt,
3454 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3458 struct zone *zone, unsigned int order,
3465 if (likely(order == 0)) {
3481 * allocate greater than order-1 page units with __GFP_NOFAIL.
3483 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3489 * order-0 request can reach here when the pcplist is skipped
3491 * reserved for high-order atomic allocation, so order-0
3494 if (order > 0 && alloc_flags & ALLOC_HARDER) {
3495 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3497 trace_mm_page_alloc_zone_locked(page, order, migratetype);
3500 page = __rmqueue(zone, order, migratetype, alloc_flags);
3501 } while (page && check_new_pages(page, order));
3505 __mod_zone_freepage_state(zone, -(1 << order),
3508 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3548 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3550 if (order < fail_page_alloc.min_order)
3560 return should_fail(&fail_page_alloc.attr, 1 << order);
3577 debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
3588 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3595 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3597 return __should_fail_alloc_page(gfp_mask, order);
3602 unsigned int order, unsigned int alloc_flags)
3605 long unusable_free = (1 << order) - 1;
3625 * Return true if free base pages are above 'mark'. For high-order checks it
3626 * will return true of the order-0 watermark is reached and there is at least
3630 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3639 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
3658 * Check watermarks for an order-0 allocation request. If these
3659 * are not met, then a high-order request also cannot go ahead
3665 /* If this is an order-0 request then the watermark is fine */
3666 if (!order)
3669 /* For a high-order request, check at least one suitable page is free */
3670 for (o = order; o < MAX_ORDER; o++) {
3694 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3697 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3701 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3710 * Fast check for order-0 only. If this fails then the reserves
3713 if (!order) {
3726 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3730 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations
3735 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost
3738 return __zone_watermark_ok(z, order, mark, highest_zoneidx,
3745 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3753 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
3829 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3899 if (!zone_watermark_fast(zone, order, mark,
3910 if (_deferred_grow_zone(zone, order))
3923 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3933 if (zone_watermark_ok(zone, order, mark,
3942 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
3945 prep_new_page(page, order, gfp_mask, alloc_flags);
3948 * If this is a high-order atomic allocation then check
3951 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
3952 reserve_highatomic_pageblock(page, zone, order);
3959 if (_deferred_grow_zone(zone, order))
4023 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4029 page = get_page_from_freelist(gfp_mask, order,
4036 page = get_page_from_freelist(gfp_mask, order,
4043 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4051 .order = order,
4075 ~__GFP_DIRECT_RECLAIM, order,
4083 /* The OOM killer will not help higher order allocs */
4084 if (order > PAGE_ALLOC_COSTLY_ORDER)
4120 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
4135 /* Try memory compaction for high-order allocations before reclaim */
4137 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4145 if (!order)
4151 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4165 prep_new_page(page, order, gfp_mask, alloc_flags);
4169 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4175 compaction_defer_reset(zone, order, true);
4192 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4203 if (!order)
4218 * compaction was skipped because there are not enough order-0 pages
4222 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4244 if (order > PAGE_ALLOC_COSTLY_ORDER)
4256 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4265 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4270 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4279 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
4287 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4293 * Let's give them a good hope and keep retrying while the order-0
4383 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
4397 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4411 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4418 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4423 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4446 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4457 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
4557 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4567 * their order will become available due to high fragmentation so
4570 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4604 wmark = __zone_watermark_ok(zone, order, min_wmark,
4606 trace_reclaim_retry_zone(z, order, reclaimable,
4681 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4685 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
4734 wake_all_kswapds(order, gfp_mask, ac);
4740 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4747 * movable high-order allocations, do that as well, as compaction will
4755 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4757 page = __alloc_pages_direct_compact(gfp_mask, order,
4773 * order, fail immediately unless the allocator has
4779 * bursty high order allocations,
4802 wake_all_kswapds(order, gfp_mask, ac);
4820 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4836 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4845 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4855 * Do not retry costly high order allocations unless they are
4861 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4866 * It doesn't make any sense to retry for the compaction if the order-0
4872 should_compact_retry(ac, order, alloc_flags,
4887 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4937 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
4945 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
4954 "page allocation failure: order:%u", order);
4959 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4991 if (should_fail_alloc_page(gfp_mask, order))
5014 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
5023 * There are several places where we assume that the order value is sane
5026 if (unlikely(order >= MAX_ORDER)) {
5033 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
5043 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
5062 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
5066 unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) {
5067 __free_pages(page, order);
5071 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
5082 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
5086 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
5099 static inline void free_the_page(struct page *page, unsigned int order)
5101 if (order == 0) /* Via pcp? */
5104 __free_pages_ok(page, order, FPI_NONE);
5107 void __free_pages(struct page *page, unsigned int order)
5113 free_the_page(page, order);
5115 while (order-- > 0)
5116 free_the_page(page + (1 << order), order);
5120 void free_pages(unsigned long addr, unsigned int order)
5124 __free_pages(virt_to_page((void *)addr), order);
5133 * within a 0 or higher order page. Multiple fragments within that page
5161 int order = get_order(nc->size);
5164 unsigned int deta = 1U << (unsigned int)order;
5166 for (i = 0; i < (1 << order); i++) {
5268 * Frees a page fragment allocated out of either a compound or order 0 page.
5287 static void *make_alloc_exact(unsigned long addr, unsigned int order,
5291 unsigned long alloc_end = addr + (PAGE_SIZE << order);
5294 split_page(virt_to_page((void *)addr), order);
5320 unsigned int order = get_order(size);
5326 addr = __get_free_pages(gfp_mask, order);
5327 return make_alloc_exact(addr, order, size);
5345 unsigned int order = get_order(size);
5351 p = alloc_pages_node(nid, gfp_mask, order);
5354 return make_alloc_exact((unsigned long)page_address(p), order, size);
5734 unsigned int order;
5744 for (order = 0; order < MAX_ORDER; order++) {
5745 struct free_area *area = &zone->free_area[order];
5748 nr[order] = area->nr_free;
5749 total += nr[order] << order;
5751 types[order] = 0;
5754 types[order] |= 1 << type;
5758 for (order = 0; order < MAX_ORDER; order++) {
5760 nr[order], K(1UL) << order);
5761 if (nr[order])
5762 show_migration_types(types[order]);
6131 * needs the percpu allocator in order to allocate its pagesets
6341 unsigned int order, t;
6342 for_each_migratetype_order(order, t) {
6343 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
6344 zone->free_area[order].nr_free = 0;
6512 * fragmented and becoming unavailable for high-order allocations.
6538 /* Update high, then batch, in order */
6737 * zones within a node are in order of monotonic increases memory addresses
6975 unsigned int order;
6982 order = HUGETLB_PAGE_ORDER;
6984 order = MAX_ORDER - 1;
6987 * Assume the largest contiguous order of interest is a huge page.
6991 pageblock_order = order;
7194 * aligned but the node_mem_map endpoints must be in order
7599 * such cases we allow max_zone_pfn sorted in the descending order
8387 /* Make sure we've got at least a 0-order allocation.. */
8441 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
8552 * in MEM_GOING_OFFLINE in order to indicate that these pages
8662 unsigned int order;
8667 .order = -1,
8679 * MIGRATE_ISOLATE. Because pageblock and max order pages may
8730 * page allocator holds, ie. they can be part of higher order
8740 order = 0;
8743 if (++order >= MAX_ORDER) {
8747 outer_start &= ~0UL << order;
8751 order = buddy_order(pfn_to_page(outer_start));
8754 * outer_start page could be small order buddy page and
8759 if (outer_start + (1UL << order) <= start)
8947 unsigned int order;
8976 order = buddy_order(page);
8977 del_page_from_free_list(page, zone, order);
8978 pfn += (1 << order);
8989 unsigned int order;
8992 for (order = 0; order < MAX_ORDER; order++) {
8993 struct page *page_head = page - (pfn & ((1 << order) - 1));
8995 if (PageBuddy(page_head) && buddy_order(page_head) >= order)
9000 return order < MAX_ORDER;
9005 * Break down a higher-order page in sub-pages, and keep our target out of
9046 unsigned int order;
9050 for (order = 0; order < MAX_ORDER; order++) {
9051 struct page *page_head = page - (pfn & ((1 << order) - 1));
9054 if (PageBuddy(page_head) && page_order >= order) {