Lines Matching refs:order
233 static void __free_pages_ok(struct page *page, unsigned int order,
310 static bool page_contains_unaccepted(struct page *page, unsigned int order);
311 static void accept_page(struct page *page, unsigned int order);
312 static bool try_to_accept_memory(struct zone *zone, unsigned int order);
338 _deferred_grow_zone(struct zone *zone, unsigned int order)
340 return deferred_grow_zone(zone, order);
527 static inline unsigned int order_to_pindex(int migratetype, int order)
530 if (order > PAGE_ALLOC_COSTLY_ORDER) {
531 VM_BUG_ON(order != pageblock_order);
535 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
538 return (MIGRATE_PCPTYPES * order) + migratetype;
543 int order = pindex / MIGRATE_PCPTYPES;
547 order = pageblock_order;
549 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
552 return order;
555 static inline bool pcp_allowed_order(unsigned int order)
557 if (order <= PAGE_ALLOC_COSTLY_ORDER)
560 if (order == pageblock_order)
566 static inline void free_the_page(struct page *page, unsigned int order)
568 if (pcp_allowed_order(order)) /* Via pcp? */
569 free_unref_page(page, order);
571 __free_pages_ok(page, order, FPI_NONE);
575 * Higher-order pages are called "compound pages". They are structured thusly:
582 * The first tail page's ->compound_order holds the order of allocation.
583 * This usage means that zero-order pages may not be compound.
586 void prep_compound_page(struct page *page, unsigned int order)
589 int nr_pages = 1 << order;
595 prep_compound_head(page, order);
612 static inline void set_buddy_order(struct page *page, unsigned int order)
614 set_page_private(page, order);
631 int order, int migratetype)
633 if (!capc || order != capc->cc->order)
642 * Do not let lower order allocations pollute a movable pageblock.
645 * have trouble finding a high-order free page.
647 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
662 int order, int migratetype)
670 unsigned int order, int migratetype)
672 struct free_area *area = &zone->free_area[order];
680 unsigned int order, int migratetype)
682 struct free_area *area = &zone->free_area[order];
694 unsigned int order, int migratetype)
696 struct free_area *area = &zone->free_area[order];
702 unsigned int order)
711 zone->free_area[order].nr_free--;
723 * of the next-highest order is free. If it is, it's possible
727 * as a higher order page
731 struct page *page, unsigned int order)
736 if (order >= MAX_ORDER - 1)
742 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1,
759 * free pages of length of (1 << order) and marked with PageBuddy.
760 * Page's order is recorded in page_private(page) field.
772 struct zone *zone, unsigned int order,
786 __mod_zone_freepage_state(zone, 1 << order, migratetype);
788 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
791 while (order < MAX_ORDER) {
792 if (compaction_capture(capc, page, order, migratetype)) {
793 __mod_zone_freepage_state(zone, -(1 << order),
798 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn);
802 if (unlikely(order >= pageblock_order)) {
819 * merge with it and move up one order.
822 clear_page_guard(zone, buddy, order, migratetype);
824 del_page_from_free_list(buddy, zone, order);
828 order++;
832 set_buddy_order(page, order);
836 else if (is_shuffle_order(order))
839 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
842 add_to_free_list_tail(page, zone, order, migratetype);
844 add_to_free_list(page, zone, order, migratetype);
848 page_reporting_notify_free(order);
854 * @order: the order of the page
865 unsigned int order, unsigned long split_pfn_offset)
880 if (!PageBuddy(free_page) || buddy_order(free_page) != order) {
887 __mod_zone_freepage_state(zone, -(1UL << order), mt);
889 del_page_from_free_list(free_page, zone, order);
891 pfn < free_page_pfn + (1UL << order);) {
895 pfn ? __ffs(pfn) : order,
903 split_pfn_offset = (1UL << order) - (pfn - free_page_pfn);
1083 unsigned int order, fpi_t fpi_flags)
1091 trace_mm_page_free(page, order);
1092 kmsan_free_page(page, order);
1094 if (unlikely(PageHWPoison(page)) && !order) {
1100 __memcg_kmem_uncharge_page(page, order);
1101 reset_page_owner(page, order);
1102 page_table_check_free(page, order);
1108 * avoid checking PageCompound for order-0 pages.
1110 if (unlikely(order)) {
1114 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1118 for (i = 1; i < (1 << order); i++) {
1133 __memcg_kmem_uncharge_page(page, order);
1143 reset_page_owner(page, order);
1144 page_table_check_free(page, order);
1148 PAGE_SIZE << order);
1150 PAGE_SIZE << order);
1153 kernel_poison_pages(page, 1 << order);
1164 kasan_poison_pages(page, order, init);
1171 kernel_init_pages(page, 1 << order);
1178 arch_free_page(page, order);
1180 debug_pagealloc_unmap_pages(page, 1 << order);
1195 unsigned int order;
1222 order = pindex_to_order(pindex);
1223 nr_pages = 1 << order;
1241 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
1242 trace_mm_page_pcpu_drain(page, order, mt);
1251 unsigned int order,
1261 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1265 static void __free_pages_ok(struct page *page, unsigned int order,
1273 if (!free_pages_prepare(page, order, fpi_flags))
1288 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1291 __count_vm_events(PGFREE, 1 << order);
1294 void __free_pages_core(struct page *page, unsigned int order)
1296 unsigned int nr_pages = 1 << order;
1316 if (page_contains_unaccepted(page, order)) {
1317 if (order == MAX_ORDER && __free_unaccepted(page))
1320 accept_page(page, order);
1327 __free_pages_ok(page, order, FPI_TO_TAIL);
1347 * of the pfn range). For example, if the pageblock order is MAX_ORDER, which
1383 * The order of subdivision here is critical for the IO subsystem.
1384 * Please do not alter this order without good reasons and regression
1386 * the order in which smaller blocks are delivered depends on the order
1388 * influencing the order in which pages are delivered to the IO
1445 static inline bool check_new_pages(struct page *page, unsigned int order)
1448 for (int i = 0; i < (1 << order); i++) {
1487 inline void post_alloc_hook(struct page *page, unsigned int order,
1498 arch_alloc_page(page, order);
1499 debug_pagealloc_map_pages(page, 1 << order);
1506 kernel_unpoison_pages(page, 1 << order);
1520 for (i = 0; i != 1 << order; ++i)
1527 kasan_unpoison_pages(page, order, init)) {
1536 for (i = 0; i != 1 << order; ++i)
1541 kernel_init_pages(page, 1 << order);
1543 set_page_owner(page, order, gfp_flags);
1544 page_table_check_alloc(page, order);
1547 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1550 post_alloc_hook(page, order, gfp_flags);
1552 if (order && (gfp_flags & __GFP_COMP))
1553 prep_compound_page(page, order);
1572 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1580 for (current_order = order; current_order <= MAX_ORDER; ++current_order) {
1586 expand(zone, page, order, current_order, migratetype);
1588 trace_mm_page_alloc_zone_locked(page, order, migratetype,
1589 pcp_allowed_order(order) &&
1599 * This array describes the order lists are fallen back to when
1612 unsigned int order)
1614 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1618 unsigned int order) { return NULL; }
1632 unsigned int order;
1654 order = buddy_order(page);
1655 move_to_free_list(page, zone, order, migratetype);
1656 pfn += 1 << order;
1657 pages_moved += 1 << order;
1708 static bool can_steal_fallback(unsigned int order, int start_mt)
1711 * Leaving this order check is intended, although there is
1712 * relaxed order check in next check. The reason is that
1717 if (order >= pageblock_order)
1720 if (order >= pageblock_order / 2 ||
1767 * This function implements actual steal behaviour. If order is large enough,
1850 * Check whether there is a suitable fallback freepage with requested order.
1855 int find_suitable_fallback(struct free_area *area, unsigned int order,
1870 if (can_steal_fallback(order, migratetype))
1884 * Reserve a pageblock for exclusive use of high-order atomic allocations if
1885 * there are no empty page blocks that contain a page with a suitable order
1921 * potentially hurts the reliability of high-order allocations when under
1936 int order;
1950 for (order = 0; order <= MAX_ORDER; order++) {
1951 struct free_area *area = &(zone->free_area[order]);
2006 * The use of signed ints for order and current_order is a deliberate
2011 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2016 int min_order = order;
2026 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT)
2051 && current_order > order)
2060 for (current_order = order; current_order <= MAX_ORDER;
2081 trace_mm_page_alloc_extfrag(page, order, current_order,
2089 __rmqueue_with_cma_reuse(struct zone *zone, unsigned int order,
2094 page = __rmqueue_smallest(zone, order, migratetype);
2099 page = __rmqueue_smallest(zone, order, migratetype);
2103 __rmqueue_fallback(zone, order, migratetype, alloc_flags))
2114 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
2120 page = __rmqueue_with_cma_reuse(zone, order, migratetype, alloc_flags);
2134 page = __rmqueue_cma_fallback(zone, order);
2140 page = __rmqueue_smallest(zone, order, migratetype);
2143 page = __rmqueue_cma_fallback(zone, order);
2145 if (!page && __rmqueue_fallback(zone, order, migratetype,
2157 static int rmqueue_bulk(struct zone *zone, unsigned int order,
2166 struct page *page = __rmqueue(zone, order, migratetype,
2173 * physical page order. The page is added to the tail of
2177 * head, thus also in the physical page order. This is useful
2184 -(1 << order));
2187 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2342 unsigned int order)
2346 if (!free_pages_prepare(page, order, FPI_NONE))
2359 /* Free everything if batch freeing high-order pages. */
2403 unsigned int order)
2409 __count_vm_events(PGFREE, 1 << order);
2410 pindex = order_to_pindex(migratetype, order);
2412 pcp->count += 1 << order;
2415 * As high-order pages other than THP's stored on PCP can contribute
2420 free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER);
2431 void free_unref_page(struct page *page, unsigned int order)
2439 if (!free_unref_page_prepare(page, pfn, order))
2452 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
2462 free_unref_page_commit(zone, pcp, page, pcpmigratetype, order);
2465 free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
2471 * Free a list of 0-order pages
2556 * split_page takes a non-compound higher-order page, and splits it into
2557 * n (1<<order) sub-pages: page[0..n]
2563 void split_page(struct page *page, unsigned int order)
2570 for (i = 1; i < (1 << order); i++)
2572 split_page_owner(page, 1 << order);
2573 split_page_memcg(page, 1 << order);
2577 int __isolate_free_page(struct page *page, unsigned int order)
2586 * emulate a high-order watermark check with a raised order-0
2587 * watermark, because we already know our high-order page
2590 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
2594 __mod_zone_freepage_state(zone, -(1UL << order), mt);
2597 del_page_from_free_list(page, zone, order);
2603 if (order >= pageblock_order - 1) {
2604 struct page *endpage = page + (1 << order) - 1;
2617 return 1UL << order;
2623 * @order: Order of the isolated page
2629 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
2637 __free_one_page(page, page_to_pfn(page), zone, order, mt,
2669 unsigned int order, unsigned int alloc_flags,
2679 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2681 page = __rmqueue(zone, order, migratetype, alloc_flags);
2686 * failing a high-order atomic allocation in the
2690 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2697 __mod_zone_freepage_state(zone, -(1 << order),
2700 } while (check_new_pages(page, order));
2702 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2710 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
2724 * Scale batch relative to order if batch implies
2731 batch = max(batch >> order, 2);
2732 alloced = rmqueue_bulk(zone, order,
2736 pcp->count += alloced << order;
2743 pcp->count -= 1 << order;
2744 } while (check_new_pages(page, order));
2751 struct zone *zone, unsigned int order,
2773 list = &pcp->lists[order_to_pindex(migratetype, order)];
2774 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
2778 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2786 * Use pcplists for THP or "cheap" high-order allocations.
2798 struct zone *zone, unsigned int order,
2806 * allocate greater than order-1 page units with __GFP_NOFAIL.
2808 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
2810 if (likely(pcp_allowed_order(order))) {
2811 page = rmqueue_pcplist(preferred_zone, zone, order,
2817 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
2832 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2834 return __should_fail_alloc_page(gfp_mask, order);
2839 unsigned int order, unsigned int alloc_flags)
2841 long unusable_free = (1 << order) - 1;
2864 * Return true if free base pages are above 'mark'. For high-order checks it
2865 * will return true of the order-0 watermark is reached and there is at least
2869 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2877 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
2909 * Check watermarks for an order-0 allocation request. If these
2910 * are not met, then a high-order request also cannot go ahead
2916 /* If this is an order-0 request then the watermark is fine */
2917 if (!order)
2920 /* For a high-order request, check at least one suitable page is free */
2921 for (o = order; o <= MAX_ORDER; o++) {
2947 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2950 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
2954 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
2963 * Fast check for order-0 only. If this fails then the reserves
2966 if (!order) {
2979 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
2984 * Ignore watermark boosting for __GFP_HIGH order-0 allocations
2989 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost
2992 return __zone_watermark_ok(z, order, mark, highest_zoneidx,
2999 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3007 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
3082 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3153 if (!zone_watermark_fast(zone, order, mark,
3159 if (try_to_accept_memory(zone, order))
3169 if (_deferred_grow_zone(zone, order))
3182 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3192 if (zone_watermark_ok(zone, order, mark,
3201 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
3204 prep_new_page(page, order, gfp_mask, alloc_flags);
3207 * If this is a high-order atomic allocation then check
3216 if (try_to_accept_memory(zone, order))
3223 if (_deferred_grow_zone(zone, order))
3287 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3293 page = get_page_from_freelist(gfp_mask, order,
3300 page = get_page_from_freelist(gfp_mask, order,
3307 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3315 .order = order,
3339 ~__GFP_DIRECT_RECLAIM, order,
3347 /* The OOM killer will not help higher order allocs */
3348 if (order > PAGE_ALLOC_COSTLY_ORDER)
3385 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
3400 /* Try memory compaction for high-order allocations before reclaim */
3402 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3410 if (!order)
3417 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3434 prep_new_page(page, order, gfp_mask, alloc_flags);
3438 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3444 compaction_defer_reset(zone, order, true);
3461 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3472 if (!order)
3479 * Compaction was skipped due to a lack of free order-0
3483 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
3501 if (order > PAGE_ALLOC_COSTLY_ORDER)
3513 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3522 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
3527 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3536 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3544 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3550 * Let's give them a good hope and keep retrying while the order-0
3648 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
3661 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3674 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
3683 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
3688 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3707 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
3720 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
3727 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
3756 if (order > 0)
3828 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3838 * their order will become available due to high fragmentation so
3841 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
3870 wmark = __zone_watermark_ok(zone, order, min_wmark,
3872 trace_reclaim_retry_zone(z, order, reclaimable,
3933 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3937 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
3961 alloc_flags = gfp_to_alloc_flags(gfp_mask, order);
3988 wake_all_kswapds(order, gfp_mask, ac);
3994 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4001 * movable high-order allocations, do that as well, as compaction will
4009 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4011 page = __alloc_pages_direct_compact(gfp_mask, order,
4027 * order, fail immediately unless the allocator has
4033 * bursty high order allocations,
4056 wake_all_kswapds(order, gfp_mask, ac);
4075 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4088 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4094 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4104 * Do not retry costly high order allocations unless they are
4110 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4115 * It doesn't make any sense to retry for the compaction if the order-0
4121 should_compact_retry(ac, order, alloc_flags,
4136 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4195 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac);
4204 "page allocation failure: order:%u", order);
4209 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4238 if (should_fail_alloc_page(gfp_mask, order))
4258 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
4429 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
4438 * There are several places where we assume that the order value is sane
4441 if (WARN_ON_ONCE_GFP(order > MAX_ORDER, gfp))
4454 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
4465 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
4478 page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
4482 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
4483 __free_pages(page, order);
4487 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
4488 kmsan_alloc_page(page, order, alloc_gfp);
4494 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
4497 struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
4501 if (folio && order > 1)
4512 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
4516 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
4532 * @order: The order of the allocation.
4535 * pages. It does not check that the @order passed in matches that of
4549 void __free_pages(struct page *page, unsigned int order)
4555 free_the_page(page, order);
4557 while (order-- > 0)
4558 free_the_page(page + (1 << order), order);
4562 void free_pages(unsigned long addr, unsigned int order)
4566 __free_pages(virt_to_page((void *)addr), order);
4575 * within a 0 or higher order page. Multiple fragments within that page
4687 * Frees a page fragment allocated out of either a compound or order 0 page.
4698 static void *make_alloc_exact(unsigned long addr, unsigned int order,
4706 split_page_owner(page, 1 << order);
4707 split_page_memcg(page, 1 << order);
4711 last = page + (1UL << order);
4735 unsigned int order = get_order(size);
4741 addr = __get_free_pages(gfp_mask, order);
4742 return make_alloc_exact(addr, order, size);
4760 unsigned int order = get_order(size);
4766 p = alloc_pages_node(nid, gfp_mask, order);
4769 return make_alloc_exact((unsigned long)page_address(p), order, size);
5034 pr_info("Fallback order for Node %d: ", local_node);
5199 * needs the percpu allocator in order to allocate its pagesets
5290 * fragmented and becoming unavailable for high-order allocations.
6152 int order;
6157 .order = -1,
6169 * MIGRATE_ISOLATE. Because pageblock and max order pages may
6218 * page allocator holds, ie. they can be part of higher order
6226 order = 0;
6229 if (++order > MAX_ORDER) {
6233 outer_start &= ~0UL << order;
6237 order = buddy_order(pfn_to_page(outer_start));
6240 * outer_start page could be small order buddy page and
6245 if (outer_start + (1UL << order) <= start)
6437 unsigned int order;
6466 order = buddy_order(page);
6467 del_page_from_free_list(page, zone, order);
6468 pfn += (1 << order);
6480 unsigned int order;
6482 for (order = 0; order <= MAX_ORDER; order++) {
6483 struct page *page_head = page - (pfn & ((1 << order) - 1));
6486 buddy_order_unsafe(page_head) >= order)
6490 return order <= MAX_ORDER;
6496 * Break down a higher-order page in sub-pages, and keep our target out of
6537 unsigned int order;
6541 for (order = 0; order <= MAX_ORDER; order++) {
6542 struct page *page_head = page - (pfn & ((1 << order) - 1));
6545 if (PageBuddy(page_head) && page_order >= order) {
6627 static bool page_contains_unaccepted(struct page *page, unsigned int order)
6630 phys_addr_t end = start + (PAGE_SIZE << order);
6635 static void accept_page(struct page *page, unsigned int order)
6639 accept_memory(start, start + (PAGE_SIZE << order));
6676 static bool try_to_accept_memory(struct zone *zone, unsigned int order)
6684 __zone_watermark_unusable_free(zone, order, 0));
6726 static bool page_contains_unaccepted(struct page *page, unsigned int order)
6731 static void accept_page(struct page *page, unsigned int order)
6735 static bool try_to_accept_memory(struct zone *zone, unsigned int order)