1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/mm/page_alloc.c
4 *
5 * Manages the free list, the system allocates free pages here.
6 * Note that kmalloc() lives in slab.c
7 *
8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
9 * Swap reorganised 29.12.95, Stephen Tweedie
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
16 */
17
18 #include <linux/stddef.h>
19 #include <linux/mm.h>
20 #include <linux/highmem.h>
21 #include <linux/interrupt.h>
22 #include <linux/jiffies.h>
23 #include <linux/compiler.h>
24 #include <linux/kernel.h>
25 #include <linux/kasan.h>
26 #include <linux/kmsan.h>
27 #include <linux/module.h>
28 #include <linux/suspend.h>
29 #include <linux/ratelimit.h>
30 #include <linux/oom.h>
31 #include <linux/topology.h>
32 #include <linux/sysctl.h>
33 #include <linux/cpu.h>
34 #include <linux/cpuset.h>
35 #include <linux/memory_hotplug.h>
36 #include <linux/nodemask.h>
37 #include <linux/vmstat.h>
38 #include <linux/fault-inject.h>
39 #include <linux/compaction.h>
40 #include <trace/events/kmem.h>
41 #include <trace/events/oom.h>
42 #include <linux/prefetch.h>
43 #include <linux/mm_inline.h>
44 #include <linux/mmu_notifier.h>
45 #include <linux/migrate.h>
46 #include <linux/sched/mm.h>
47 #include <linux/page_owner.h>
48 #include <linux/page_table_check.h>
49 #include <linux/memcontrol.h>
50 #include <linux/ftrace.h>
51 #include <linux/lockdep.h>
52 #include <linux/psi.h>
53 #include <linux/khugepaged.h>
54 #include <linux/zswapd.h>
55 #ifdef CONFIG_RECLAIM_ACCT
56 #include <linux/reclaim_acct.h>
57 #endif
58 #include <linux/delayacct.h>
59 #include <asm/div64.h>
60 #include "internal.h"
61 #include "shuffle.h"
62 #include "page_reporting.h"
63
64 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
65 typedef int __bitwise fpi_t;
66
67 /* No special request */
68 #define FPI_NONE ((__force fpi_t)0)
69
70 /*
71 * Skip free page reporting notification for the (possibly merged) page.
72 * This does not hinder free page reporting from grabbing the page,
73 * reporting it and marking it "reported" - it only skips notifying
74 * the free page reporting infrastructure about a newly freed page. For
75 * example, used when temporarily pulling a page from a freelist and
76 * putting it back unmodified.
77 */
78 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0))
79
80 /*
81 * Place the (possibly merged) page to the tail of the freelist. Will ignore
82 * page shuffling (relevant code - e.g., memory onlining - is expected to
83 * shuffle the whole zone).
84 *
85 * Note: No code should rely on this flag for correctness - it's purely
86 * to allow for optimizations when handing back either fresh pages
87 * (memory onlining) or untouched pages (page isolation, free page
88 * reporting).
89 */
90 #define FPI_TO_TAIL ((__force fpi_t)BIT(1))
91
92 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
93 static DEFINE_MUTEX(pcp_batch_high_lock);
94 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
95
96 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
97 /*
98 * On SMP, spin_trylock is sufficient protection.
99 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP.
100 */
101 #define pcp_trylock_prepare(flags) do { } while (0)
102 #define pcp_trylock_finish(flag) do { } while (0)
103 #else
104
105 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */
106 #define pcp_trylock_prepare(flags) local_irq_save(flags)
107 #define pcp_trylock_finish(flags) local_irq_restore(flags)
108 #endif
109
110 /*
111 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
112 * a migration causing the wrong PCP to be locked and remote memory being
113 * potentially allocated, pin the task to the CPU for the lookup+lock.
114 * preempt_disable is used on !RT because it is faster than migrate_disable.
115 * migrate_disable is used on RT because otherwise RT spinlock usage is
116 * interfered with and a high priority task cannot preempt the allocator.
117 */
118 #ifndef CONFIG_PREEMPT_RT
119 #define pcpu_task_pin() preempt_disable()
120 #define pcpu_task_unpin() preempt_enable()
121 #else
122 #define pcpu_task_pin() migrate_disable()
123 #define pcpu_task_unpin() migrate_enable()
124 #endif
125
126 /*
127 * Generic helper to lookup and a per-cpu variable with an embedded spinlock.
128 * Return value should be used with equivalent unlock helper.
129 */
130 #define pcpu_spin_lock(type, member, ptr) \
131 ({ \
132 type *_ret; \
133 pcpu_task_pin(); \
134 _ret = this_cpu_ptr(ptr); \
135 spin_lock(&_ret->member); \
136 _ret; \
137 })
138
139 #define pcpu_spin_trylock(type, member, ptr) \
140 ({ \
141 type *_ret; \
142 pcpu_task_pin(); \
143 _ret = this_cpu_ptr(ptr); \
144 if (!spin_trylock(&_ret->member)) { \
145 pcpu_task_unpin(); \
146 _ret = NULL; \
147 } \
148 _ret; \
149 })
150
151 #define pcpu_spin_unlock(member, ptr) \
152 ({ \
153 spin_unlock(&ptr->member); \
154 pcpu_task_unpin(); \
155 })
156
157 /* struct per_cpu_pages specific helpers. */
158 #define pcp_spin_lock(ptr) \
159 pcpu_spin_lock(struct per_cpu_pages, lock, ptr)
160
161 #define pcp_spin_trylock(ptr) \
162 pcpu_spin_trylock(struct per_cpu_pages, lock, ptr)
163
164 #define pcp_spin_unlock(ptr) \
165 pcpu_spin_unlock(lock, ptr)
166
167 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
168 DEFINE_PER_CPU(int, numa_node);
169 EXPORT_PER_CPU_SYMBOL(numa_node);
170 #endif
171
172 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
173
174 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
175 /*
176 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
177 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
178 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
179 * defined in <linux/topology.h>.
180 */
181 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
182 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
183 #endif
184
185 static DEFINE_MUTEX(pcpu_drain_mutex);
186
187 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
188 volatile unsigned long latent_entropy __latent_entropy;
189 EXPORT_SYMBOL(latent_entropy);
190 #endif
191
192 /*
193 * Array of node states.
194 */
195 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
196 [N_POSSIBLE] = NODE_MASK_ALL,
197 [N_ONLINE] = { { [0] = 1UL } },
198 #ifndef CONFIG_NUMA
199 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
200 #ifdef CONFIG_HIGHMEM
201 [N_HIGH_MEMORY] = { { [0] = 1UL } },
202 #endif
203 [N_MEMORY] = { { [0] = 1UL } },
204 [N_CPU] = { { [0] = 1UL } },
205 #endif /* NUMA */
206 };
207 EXPORT_SYMBOL(node_states);
208
209 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
210
211 /*
212 * A cached value of the page's pageblock's migratetype, used when the page is
213 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
214 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
215 * Also the migratetype set in the page does not necessarily match the pcplist
216 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
217 * other index - this ensures that it will be put on the correct CMA freelist.
218 */
get_pcppage_migratetype(struct page *page)219 static inline int get_pcppage_migratetype(struct page *page)
220 {
221 return page->index;
222 }
223
set_pcppage_migratetype(struct page *page, int migratetype)224 static inline void set_pcppage_migratetype(struct page *page, int migratetype)
225 {
226 page->index = migratetype;
227 }
228
229 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
230 unsigned int pageblock_order __read_mostly;
231 #endif
232
233 static void __free_pages_ok(struct page *page, unsigned int order,
234 fpi_t fpi_flags);
235
236 /*
237 * results with 256, 32 in the lowmem_reserve sysctl:
238 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
239 * 1G machine -> (16M dma, 784M normal, 224M high)
240 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
241 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
242 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
243 *
244 * TBD: should special case ZONE_DMA32 machines here - in those we normally
245 * don't need any ZONE_NORMAL reservation
246 */
247 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
248 #ifdef CONFIG_ZONE_DMA
249 [ZONE_DMA] = 256,
250 #endif
251 #ifdef CONFIG_ZONE_DMA32
252 [ZONE_DMA32] = 256,
253 #endif
254 [ZONE_NORMAL] = 32,
255 #ifdef CONFIG_HIGHMEM
256 [ZONE_HIGHMEM] = 0,
257 #endif
258 [ZONE_MOVABLE] = 0,
259 };
260
261 char * const zone_names[MAX_NR_ZONES] = {
262 #ifdef CONFIG_ZONE_DMA
263 "DMA",
264 #endif
265 #ifdef CONFIG_ZONE_DMA32
266 "DMA32",
267 #endif
268 "Normal",
269 #ifdef CONFIG_HIGHMEM
270 "HighMem",
271 #endif
272 "Movable",
273 #ifdef CONFIG_ZONE_DEVICE
274 "Device",
275 #endif
276 };
277
278 const char * const migratetype_names[MIGRATE_TYPES] = {
279 "Unmovable",
280 "Movable",
281 "Reclaimable",
282 #ifdef CONFIG_CMA_REUSE
283 "CMA",
284 #endif
285 "HighAtomic",
286 #if defined(CONFIG_CMA) && !defined(CONFIG_CMA_REUSE)
287 "CMA",
288 #endif
289 #ifdef CONFIG_MEMORY_ISOLATION
290 "Isolate",
291 #endif
292 };
293
294 int min_free_kbytes = 1024;
295 int user_min_free_kbytes = -1;
296 static int watermark_boost_factor __read_mostly = 15000;
297 static int watermark_scale_factor = 10;
298
299 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
300 int movable_zone;
301 EXPORT_SYMBOL(movable_zone);
302
303 #if MAX_NUMNODES > 1
304 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
305 unsigned int nr_online_nodes __read_mostly = 1;
306 EXPORT_SYMBOL(nr_node_ids);
307 EXPORT_SYMBOL(nr_online_nodes);
308 #endif
309
310 static bool page_contains_unaccepted(struct page *page, unsigned int order);
311 static void accept_page(struct page *page, unsigned int order);
312 static bool try_to_accept_memory(struct zone *zone, unsigned int order);
313 static inline bool has_unaccepted_memory(void);
314 static bool __free_unaccepted(struct page *page);
315
316 int page_group_by_mobility_disabled __read_mostly;
317
318 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
319 /*
320 * During boot we initialize deferred pages on-demand, as needed, but once
321 * page_alloc_init_late() has finished, the deferred pages are all initialized,
322 * and we can permanently disable that path.
323 */
324 DEFINE_STATIC_KEY_TRUE(deferred_pages);
325
deferred_pages_enabled(void)326 static inline bool deferred_pages_enabled(void)
327 {
328 return static_branch_unlikely(&deferred_pages);
329 }
330
331 /*
332 * deferred_grow_zone() is __init, but it is called from
333 * get_page_from_freelist() during early boot until deferred_pages permanently
334 * disables this call. This is why we have refdata wrapper to avoid warning,
335 * and to ensure that the function body gets unloaded.
336 */
337 static bool __ref
_deferred_grow_zone(struct zone *zone, unsigned int order)338 _deferred_grow_zone(struct zone *zone, unsigned int order)
339 {
340 return deferred_grow_zone(zone, order);
341 }
342 #else
deferred_pages_enabled(void)343 static inline bool deferred_pages_enabled(void)
344 {
345 return false;
346 }
347 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
348
349 /* Return a pointer to the bitmap storing bits affecting a block of pages */
get_pageblock_bitmap(const struct page *page, unsigned long pfn)350 static inline unsigned long *get_pageblock_bitmap(const struct page *page,
351 unsigned long pfn)
352 {
353 #ifdef CONFIG_SPARSEMEM
354 return section_to_usemap(__pfn_to_section(pfn));
355 #else
356 return page_zone(page)->pageblock_flags;
357 #endif /* CONFIG_SPARSEMEM */
358 }
359
pfn_to_bitidx(const struct page *page, unsigned long pfn)360 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
361 {
362 #ifdef CONFIG_SPARSEMEM
363 pfn &= (PAGES_PER_SECTION-1);
364 #else
365 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn);
366 #endif /* CONFIG_SPARSEMEM */
367 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
368 }
369
370 /**
371 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
372 * @page: The page within the block of interest
373 * @pfn: The target page frame number
374 * @mask: mask of bits that the caller is interested in
375 *
376 * Return: pageblock_bits flags
377 */
get_pfnblock_flags_mask(const struct page *page, unsigned long pfn, unsigned long mask)378 unsigned long get_pfnblock_flags_mask(const struct page *page,
379 unsigned long pfn, unsigned long mask)
380 {
381 unsigned long *bitmap;
382 unsigned long bitidx, word_bitidx;
383 unsigned long word;
384
385 bitmap = get_pageblock_bitmap(page, pfn);
386 bitidx = pfn_to_bitidx(page, pfn);
387 word_bitidx = bitidx / BITS_PER_LONG;
388 bitidx &= (BITS_PER_LONG-1);
389 /*
390 * This races, without locks, with set_pfnblock_flags_mask(). Ensure
391 * a consistent read of the memory array, so that results, even though
392 * racy, are not corrupted.
393 */
394 word = READ_ONCE(bitmap[word_bitidx]);
395 return (word >> bitidx) & mask;
396 }
397
get_pfnblock_migratetype(const struct page *page, unsigned long pfn)398 static __always_inline int get_pfnblock_migratetype(const struct page *page,
399 unsigned long pfn)
400 {
401 return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
402 }
403
404 /**
405 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
406 * @page: The page within the block of interest
407 * @flags: The flags to set
408 * @pfn: The target page frame number
409 * @mask: mask of bits that the caller is interested in
410 */
set_pfnblock_flags_mask(struct page *page, unsigned long flags, unsigned long pfn, unsigned long mask)411 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
412 unsigned long pfn,
413 unsigned long mask)
414 {
415 unsigned long *bitmap;
416 unsigned long bitidx, word_bitidx;
417 unsigned long word;
418
419 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
420 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
421
422 bitmap = get_pageblock_bitmap(page, pfn);
423 bitidx = pfn_to_bitidx(page, pfn);
424 word_bitidx = bitidx / BITS_PER_LONG;
425 bitidx &= (BITS_PER_LONG-1);
426
427 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
428
429 mask <<= bitidx;
430 flags <<= bitidx;
431
432 word = READ_ONCE(bitmap[word_bitidx]);
433 do {
434 } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags));
435 }
436
set_pageblock_migratetype(struct page *page, int migratetype)437 void set_pageblock_migratetype(struct page *page, int migratetype)
438 {
439 if (unlikely(page_group_by_mobility_disabled &&
440 migratetype < MIGRATE_PCPTYPES))
441 migratetype = MIGRATE_UNMOVABLE;
442
443 set_pfnblock_flags_mask(page, (unsigned long)migratetype,
444 page_to_pfn(page), MIGRATETYPE_MASK);
445 }
446
447 #ifdef CONFIG_DEBUG_VM
page_outside_zone_boundaries(struct zone *zone, struct page *page)448 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
449 {
450 int ret;
451 unsigned seq;
452 unsigned long pfn = page_to_pfn(page);
453 unsigned long sp, start_pfn;
454
455 do {
456 seq = zone_span_seqbegin(zone);
457 start_pfn = zone->zone_start_pfn;
458 sp = zone->spanned_pages;
459 ret = !zone_spans_pfn(zone, pfn);
460 } while (zone_span_seqretry(zone, seq));
461
462 if (ret)
463 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
464 pfn, zone_to_nid(zone), zone->name,
465 start_pfn, start_pfn + sp);
466
467 return ret;
468 }
469
470 /*
471 * Temporary debugging check for pages not lying within a given zone.
472 */
bad_range(struct zone *zone, struct page *page)473 static int __maybe_unused bad_range(struct zone *zone, struct page *page)
474 {
475 if (page_outside_zone_boundaries(zone, page))
476 return 1;
477 if (zone != page_zone(page))
478 return 1;
479
480 return 0;
481 }
482 #else
bad_range(struct zone *zone, struct page *page)483 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
484 {
485 return 0;
486 }
487 #endif
488
bad_page(struct page *page, const char *reason)489 static void bad_page(struct page *page, const char *reason)
490 {
491 static unsigned long resume;
492 static unsigned long nr_shown;
493 static unsigned long nr_unshown;
494
495 /*
496 * Allow a burst of 60 reports, then keep quiet for that minute;
497 * or allow a steady drip of one report per second.
498 */
499 if (nr_shown == 60) {
500 if (time_before(jiffies, resume)) {
501 nr_unshown++;
502 goto out;
503 }
504 if (nr_unshown) {
505 pr_alert(
506 "BUG: Bad page state: %lu messages suppressed\n",
507 nr_unshown);
508 nr_unshown = 0;
509 }
510 nr_shown = 0;
511 }
512 if (nr_shown++ == 0)
513 resume = jiffies + 60 * HZ;
514
515 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
516 current->comm, page_to_pfn(page));
517 dump_page(page, reason);
518
519 print_modules();
520 dump_stack();
521 out:
522 /* Leave bad fields for debug, except PageBuddy could make trouble */
523 page_mapcount_reset(page); /* remove PageBuddy */
524 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
525 }
526
order_to_pindex(int migratetype, int order)527 static inline unsigned int order_to_pindex(int migratetype, int order)
528 {
529 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
530 if (order > PAGE_ALLOC_COSTLY_ORDER) {
531 VM_BUG_ON(order != pageblock_order);
532 return NR_LOWORDER_PCP_LISTS;
533 }
534 #else
535 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
536 #endif
537
538 return (MIGRATE_PCPTYPES * order) + migratetype;
539 }
540
pindex_to_order(unsigned int pindex)541 static inline int pindex_to_order(unsigned int pindex)
542 {
543 int order = pindex / MIGRATE_PCPTYPES;
544
545 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
546 if (pindex == NR_LOWORDER_PCP_LISTS)
547 order = pageblock_order;
548 #else
549 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
550 #endif
551
552 return order;
553 }
554
pcp_allowed_order(unsigned int order)555 static inline bool pcp_allowed_order(unsigned int order)
556 {
557 if (order <= PAGE_ALLOC_COSTLY_ORDER)
558 return true;
559 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
560 if (order == pageblock_order)
561 return true;
562 #endif
563 return false;
564 }
565
free_the_page(struct page *page, unsigned int order)566 static inline void free_the_page(struct page *page, unsigned int order)
567 {
568 if (pcp_allowed_order(order)) /* Via pcp? */
569 free_unref_page(page, order);
570 else
571 __free_pages_ok(page, order, FPI_NONE);
572 }
573
574 /*
575 * Higher-order pages are called "compound pages". They are structured thusly:
576 *
577 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
578 *
579 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
580 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
581 *
582 * The first tail page's ->compound_order holds the order of allocation.
583 * This usage means that zero-order pages may not be compound.
584 */
585
prep_compound_page(struct page *page, unsigned int order)586 void prep_compound_page(struct page *page, unsigned int order)
587 {
588 int i;
589 int nr_pages = 1 << order;
590
591 __SetPageHead(page);
592 for (i = 1; i < nr_pages; i++)
593 prep_compound_tail(page, i);
594
595 prep_compound_head(page, order);
596 }
597
destroy_large_folio(struct folio *folio)598 void destroy_large_folio(struct folio *folio)
599 {
600 if (folio_test_hugetlb(folio)) {
601 free_huge_folio(folio);
602 return;
603 }
604
605 if (folio_test_large_rmappable(folio))
606 folio_undo_large_rmappable(folio);
607
608 mem_cgroup_uncharge(folio);
609 free_the_page(&folio->page, folio_order(folio));
610 }
611
set_buddy_order(struct page *page, unsigned int order)612 static inline void set_buddy_order(struct page *page, unsigned int order)
613 {
614 set_page_private(page, order);
615 __SetPageBuddy(page);
616 }
617
618 #ifdef CONFIG_COMPACTION
task_capc(struct zone *zone)619 static inline struct capture_control *task_capc(struct zone *zone)
620 {
621 struct capture_control *capc = current->capture_control;
622
623 return unlikely(capc) &&
624 !(current->flags & PF_KTHREAD) &&
625 !capc->page &&
626 capc->cc->zone == zone ? capc : NULL;
627 }
628
629 static inline bool
compaction_capture(struct capture_control *capc, struct page *page, int order, int migratetype)630 compaction_capture(struct capture_control *capc, struct page *page,
631 int order, int migratetype)
632 {
633 if (!capc || order != capc->cc->order)
634 return false;
635
636 /* Do not accidentally pollute CMA or isolated regions*/
637 if (is_migrate_cma(migratetype) ||
638 is_migrate_isolate(migratetype))
639 return false;
640
641 /*
642 * Do not let lower order allocations pollute a movable pageblock.
643 * This might let an unmovable request use a reclaimable pageblock
644 * and vice-versa but no more than normal fallback logic which can
645 * have trouble finding a high-order free page.
646 */
647 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
648 return false;
649
650 capc->page = page;
651 return true;
652 }
653
654 #else
task_capc(struct zone *zone)655 static inline struct capture_control *task_capc(struct zone *zone)
656 {
657 return NULL;
658 }
659
660 static inline bool
compaction_capture(struct capture_control *capc, struct page *page, int order, int migratetype)661 compaction_capture(struct capture_control *capc, struct page *page,
662 int order, int migratetype)
663 {
664 return false;
665 }
666 #endif /* CONFIG_COMPACTION */
667
668 /* Used for pages not on another list */
add_to_free_list(struct page *page, struct zone *zone, unsigned int order, int migratetype)669 static inline void add_to_free_list(struct page *page, struct zone *zone,
670 unsigned int order, int migratetype)
671 {
672 struct free_area *area = &zone->free_area[order];
673
674 list_add(&page->buddy_list, &area->free_list[migratetype]);
675 area->nr_free++;
676 }
677
678 /* Used for pages not on another list */
add_to_free_list_tail(struct page *page, struct zone *zone, unsigned int order, int migratetype)679 static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
680 unsigned int order, int migratetype)
681 {
682 struct free_area *area = &zone->free_area[order];
683
684 list_add_tail(&page->buddy_list, &area->free_list[migratetype]);
685 area->nr_free++;
686 }
687
688 /*
689 * Used for pages which are on another list. Move the pages to the tail
690 * of the list - so the moved pages won't immediately be considered for
691 * allocation again (e.g., optimization for memory onlining).
692 */
move_to_free_list(struct page *page, struct zone *zone, unsigned int order, int migratetype)693 static inline void move_to_free_list(struct page *page, struct zone *zone,
694 unsigned int order, int migratetype)
695 {
696 struct free_area *area = &zone->free_area[order];
697
698 list_move_tail(&page->buddy_list, &area->free_list[migratetype]);
699 }
700
del_page_from_free_list(struct page *page, struct zone *zone, unsigned int order)701 static inline void del_page_from_free_list(struct page *page, struct zone *zone,
702 unsigned int order)
703 {
704 /* clear reported state and update reported page count */
705 if (page_reported(page))
706 __ClearPageReported(page);
707
708 list_del(&page->buddy_list);
709 __ClearPageBuddy(page);
710 set_page_private(page, 0);
711 zone->free_area[order].nr_free--;
712 }
713
get_page_from_free_area(struct free_area *area, int migratetype)714 static inline struct page *get_page_from_free_area(struct free_area *area,
715 int migratetype)
716 {
717 return list_first_entry_or_null(&area->free_list[migratetype],
718 struct page, buddy_list);
719 }
720
721 /*
722 * If this is not the largest possible page, check if the buddy
723 * of the next-highest order is free. If it is, it's possible
724 * that pages are being freed that will coalesce soon. In case,
725 * that is happening, add the free page to the tail of the list
726 * so it's less likely to be used soon and more likely to be merged
727 * as a higher order page
728 */
729 static inline bool
buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, struct page *page, unsigned int order)730 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
731 struct page *page, unsigned int order)
732 {
733 unsigned long higher_page_pfn;
734 struct page *higher_page;
735
736 if (order >= MAX_ORDER - 1)
737 return false;
738
739 higher_page_pfn = buddy_pfn & pfn;
740 higher_page = page + (higher_page_pfn - pfn);
741
742 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1,
743 NULL) != NULL;
744 }
745
746 /*
747 * Freeing function for a buddy system allocator.
748 *
749 * The concept of a buddy system is to maintain direct-mapped table
750 * (containing bit values) for memory blocks of various "orders".
751 * The bottom level table contains the map for the smallest allocatable
752 * units of memory (here, pages), and each level above it describes
753 * pairs of units from the levels below, hence, "buddies".
754 * At a high level, all that happens here is marking the table entry
755 * at the bottom level available, and propagating the changes upward
756 * as necessary, plus some accounting needed to play nicely with other
757 * parts of the VM system.
758 * At each level, we keep a list of pages, which are heads of continuous
759 * free pages of length of (1 << order) and marked with PageBuddy.
760 * Page's order is recorded in page_private(page) field.
761 * So when we are allocating or freeing one, we can derive the state of the
762 * other. That is, if we allocate a small block, and both were
763 * free, the remainder of the region must be split into blocks.
764 * If a block is freed, and its buddy is also free, then this
765 * triggers coalescing into a block of larger size.
766 *
767 * -- nyc
768 */
769
__free_one_page(struct page *page, unsigned long pfn, struct zone *zone, unsigned int order, int migratetype, fpi_t fpi_flags)770 static inline void __free_one_page(struct page *page,
771 unsigned long pfn,
772 struct zone *zone, unsigned int order,
773 int migratetype, fpi_t fpi_flags)
774 {
775 struct capture_control *capc = task_capc(zone);
776 unsigned long buddy_pfn = 0;
777 unsigned long combined_pfn;
778 struct page *buddy;
779 bool to_tail;
780
781 VM_BUG_ON(!zone_is_initialized(zone));
782 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
783
784 VM_BUG_ON(migratetype == -1);
785 if (likely(!is_migrate_isolate(migratetype)))
786 __mod_zone_freepage_state(zone, 1 << order, migratetype);
787
788 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
789 VM_BUG_ON_PAGE(bad_range(zone, page), page);
790
791 while (order < MAX_ORDER) {
792 if (compaction_capture(capc, page, order, migratetype)) {
793 __mod_zone_freepage_state(zone, -(1 << order),
794 migratetype);
795 return;
796 }
797
798 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn);
799 if (!buddy)
800 goto done_merging;
801
802 if (unlikely(order >= pageblock_order)) {
803 /*
804 * We want to prevent merge between freepages on pageblock
805 * without fallbacks and normal pageblock. Without this,
806 * pageblock isolation could cause incorrect freepage or CMA
807 * accounting or HIGHATOMIC accounting.
808 */
809 int buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn);
810
811 if (migratetype != buddy_mt
812 && (!migratetype_is_mergeable(migratetype) ||
813 !migratetype_is_mergeable(buddy_mt)))
814 goto done_merging;
815 }
816
817 /*
818 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
819 * merge with it and move up one order.
820 */
821 if (page_is_guard(buddy))
822 clear_page_guard(zone, buddy, order, migratetype);
823 else
824 del_page_from_free_list(buddy, zone, order);
825 combined_pfn = buddy_pfn & pfn;
826 page = page + (combined_pfn - pfn);
827 pfn = combined_pfn;
828 order++;
829 }
830
831 done_merging:
832 set_buddy_order(page, order);
833
834 if (fpi_flags & FPI_TO_TAIL)
835 to_tail = true;
836 else if (is_shuffle_order(order))
837 to_tail = shuffle_pick_tail();
838 else
839 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
840
841 if (to_tail)
842 add_to_free_list_tail(page, zone, order, migratetype);
843 else
844 add_to_free_list(page, zone, order, migratetype);
845
846 /* Notify page reporting subsystem of freed page */
847 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
848 page_reporting_notify_free(order);
849 }
850
851 /**
852 * split_free_page() -- split a free page at split_pfn_offset
853 * @free_page: the original free page
854 * @order: the order of the page
855 * @split_pfn_offset: split offset within the page
856 *
857 * Return -ENOENT if the free page is changed, otherwise 0
858 *
859 * It is used when the free page crosses two pageblocks with different migratetypes
860 * at split_pfn_offset within the page. The split free page will be put into
861 * separate migratetype lists afterwards. Otherwise, the function achieves
862 * nothing.
863 */
split_free_page(struct page *free_page, unsigned int order, unsigned long split_pfn_offset)864 int split_free_page(struct page *free_page,
865 unsigned int order, unsigned long split_pfn_offset)
866 {
867 struct zone *zone = page_zone(free_page);
868 unsigned long free_page_pfn = page_to_pfn(free_page);
869 unsigned long pfn;
870 unsigned long flags;
871 int free_page_order;
872 int mt;
873 int ret = 0;
874
875 if (split_pfn_offset == 0)
876 return ret;
877
878 spin_lock_irqsave(&zone->lock, flags);
879
880 if (!PageBuddy(free_page) || buddy_order(free_page) != order) {
881 ret = -ENOENT;
882 goto out;
883 }
884
885 mt = get_pfnblock_migratetype(free_page, free_page_pfn);
886 if (likely(!is_migrate_isolate(mt)))
887 __mod_zone_freepage_state(zone, -(1UL << order), mt);
888
889 del_page_from_free_list(free_page, zone, order);
890 for (pfn = free_page_pfn;
891 pfn < free_page_pfn + (1UL << order);) {
892 int mt = get_pfnblock_migratetype(pfn_to_page(pfn), pfn);
893
894 free_page_order = min_t(unsigned int,
895 pfn ? __ffs(pfn) : order,
896 __fls(split_pfn_offset));
897 __free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order,
898 mt, FPI_NONE);
899 pfn += 1UL << free_page_order;
900 split_pfn_offset -= (1UL << free_page_order);
901 /* we have done the first part, now switch to second part */
902 if (split_pfn_offset == 0)
903 split_pfn_offset = (1UL << order) - (pfn - free_page_pfn);
904 }
905 out:
906 spin_unlock_irqrestore(&zone->lock, flags);
907 return ret;
908 }
909 /*
910 * A bad page could be due to a number of fields. Instead of multiple branches,
911 * try and check multiple fields with one check. The caller must do a detailed
912 * check if necessary.
913 */
page_expected_state(struct page *page, unsigned long check_flags)914 static inline bool page_expected_state(struct page *page,
915 unsigned long check_flags)
916 {
917 if (unlikely(atomic_read(&page->_mapcount) != -1))
918 return false;
919
920 if (unlikely((unsigned long)page->mapping |
921 page_ref_count(page) |
922 #ifdef CONFIG_MEMCG
923 page->memcg_data |
924 #endif
925 (page->flags & check_flags)))
926 return false;
927
928 return true;
929 }
930
page_bad_reason(struct page *page, unsigned long flags)931 static const char *page_bad_reason(struct page *page, unsigned long flags)
932 {
933 const char *bad_reason = NULL;
934
935 if (unlikely(atomic_read(&page->_mapcount) != -1))
936 bad_reason = "nonzero mapcount";
937 if (unlikely(page->mapping != NULL))
938 bad_reason = "non-NULL mapping";
939 if (unlikely(page_ref_count(page) != 0))
940 bad_reason = "nonzero _refcount";
941 if (unlikely(page->flags & flags)) {
942 if (flags == PAGE_FLAGS_CHECK_AT_PREP)
943 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
944 else
945 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
946 }
947 #ifdef CONFIG_MEMCG
948 if (unlikely(page->memcg_data))
949 bad_reason = "page still charged to cgroup";
950 #endif
951 return bad_reason;
952 }
953
free_page_is_bad_report(struct page *page)954 static void free_page_is_bad_report(struct page *page)
955 {
956 bad_page(page,
957 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
958 }
959
free_page_is_bad(struct page *page)960 static inline bool free_page_is_bad(struct page *page)
961 {
962 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
963 return false;
964
965 /* Something has gone sideways, find it */
966 free_page_is_bad_report(page);
967 return true;
968 }
969
is_check_pages_enabled(void)970 static inline bool is_check_pages_enabled(void)
971 {
972 return static_branch_unlikely(&check_pages_enabled);
973 }
974
free_tail_page_prepare(struct page *head_page, struct page *page)975 static int free_tail_page_prepare(struct page *head_page, struct page *page)
976 {
977 struct folio *folio = (struct folio *)head_page;
978 int ret = 1;
979
980 /*
981 * We rely page->lru.next never has bit 0 set, unless the page
982 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
983 */
984 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
985
986 if (!is_check_pages_enabled()) {
987 ret = 0;
988 goto out;
989 }
990 switch (page - head_page) {
991 case 1:
992 /* the first tail page: these may be in place of ->mapping */
993 if (unlikely(folio_entire_mapcount(folio))) {
994 bad_page(page, "nonzero entire_mapcount");
995 goto out;
996 }
997 if (unlikely(atomic_read(&folio->_nr_pages_mapped))) {
998 bad_page(page, "nonzero nr_pages_mapped");
999 goto out;
1000 }
1001 if (unlikely(atomic_read(&folio->_pincount))) {
1002 bad_page(page, "nonzero pincount");
1003 goto out;
1004 }
1005 break;
1006 case 2:
1007 /*
1008 * the second tail page: ->mapping is
1009 * deferred_list.next -- ignore value.
1010 */
1011 break;
1012 default:
1013 if (page->mapping != TAIL_MAPPING) {
1014 bad_page(page, "corrupted mapping in tail page");
1015 goto out;
1016 }
1017 break;
1018 }
1019 if (unlikely(!PageTail(page))) {
1020 bad_page(page, "PageTail not set");
1021 goto out;
1022 }
1023 if (unlikely(compound_head(page) != head_page)) {
1024 bad_page(page, "compound_head not consistent");
1025 goto out;
1026 }
1027 ret = 0;
1028 out:
1029 page->mapping = NULL;
1030 clear_compound_head(page);
1031 return ret;
1032 }
1033
1034 /*
1035 * Skip KASAN memory poisoning when either:
1036 *
1037 * 1. For generic KASAN: deferred memory initialization has not yet completed.
1038 * Tag-based KASAN modes skip pages freed via deferred memory initialization
1039 * using page tags instead (see below).
1040 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating
1041 * that error detection is disabled for accesses via the page address.
1042 *
1043 * Pages will have match-all tags in the following circumstances:
1044 *
1045 * 1. Pages are being initialized for the first time, including during deferred
1046 * memory init; see the call to page_kasan_tag_reset in __init_single_page.
1047 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the
1048 * exception of pages unpoisoned by kasan_unpoison_vmalloc.
1049 * 3. The allocation was excluded from being checked due to sampling,
1050 * see the call to kasan_unpoison_pages.
1051 *
1052 * Poisoning pages during deferred memory init will greatly lengthen the
1053 * process and cause problem in large memory systems as the deferred pages
1054 * initialization is done with interrupt disabled.
1055 *
1056 * Assuming that there will be no reference to those newly initialized
1057 * pages before they are ever allocated, this should have no effect on
1058 * KASAN memory tracking as the poison will be properly inserted at page
1059 * allocation time. The only corner case is when pages are allocated by
1060 * on-demand allocation and then freed again before the deferred pages
1061 * initialization is done, but this is not likely to happen.
1062 */
should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)1063 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
1064 {
1065 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1066 return deferred_pages_enabled();
1067
1068 return page_kasan_tag(page) == 0xff;
1069 }
1070
kernel_init_pages(struct page *page, int numpages)1071 static void kernel_init_pages(struct page *page, int numpages)
1072 {
1073 int i;
1074
1075 /* s390's use of memset() could override KASAN redzones. */
1076 kasan_disable_current();
1077 for (i = 0; i < numpages; i++)
1078 clear_highpage_kasan_tagged(page + i);
1079 kasan_enable_current();
1080 }
1081
free_pages_prepare(struct page *page, unsigned int order, fpi_t fpi_flags)1082 static __always_inline bool free_pages_prepare(struct page *page,
1083 unsigned int order, fpi_t fpi_flags)
1084 {
1085 int bad = 0;
1086 bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
1087 bool init = want_init_on_free();
1088
1089 VM_BUG_ON_PAGE(PageTail(page), page);
1090
1091 trace_mm_page_free(page, order);
1092 kmsan_free_page(page, order);
1093
1094 if (unlikely(PageHWPoison(page)) && !order) {
1095 /*
1096 * Do not let hwpoison pages hit pcplists/buddy
1097 * Untie memcg state and reset page's owner
1098 */
1099 if (memcg_kmem_online() && PageMemcgKmem(page))
1100 __memcg_kmem_uncharge_page(page, order);
1101 reset_page_owner(page, order);
1102 page_table_check_free(page, order);
1103 return false;
1104 }
1105
1106 /*
1107 * Check tail pages before head page information is cleared to
1108 * avoid checking PageCompound for order-0 pages.
1109 */
1110 if (unlikely(order)) {
1111 bool compound = PageCompound(page);
1112 int i;
1113
1114 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1115
1116 if (compound)
1117 page[1].flags &= ~PAGE_FLAGS_SECOND;
1118 for (i = 1; i < (1 << order); i++) {
1119 if (compound)
1120 bad += free_tail_page_prepare(page, page + i);
1121 if (is_check_pages_enabled()) {
1122 if (free_page_is_bad(page + i)) {
1123 bad++;
1124 continue;
1125 }
1126 }
1127 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1128 }
1129 }
1130 if (PageMappingFlags(page))
1131 page->mapping = NULL;
1132 if (memcg_kmem_online() && PageMemcgKmem(page))
1133 __memcg_kmem_uncharge_page(page, order);
1134 if (is_check_pages_enabled()) {
1135 if (free_page_is_bad(page))
1136 bad++;
1137 if (bad)
1138 return false;
1139 }
1140
1141 page_cpupid_reset_last(page);
1142 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1143 reset_page_owner(page, order);
1144 page_table_check_free(page, order);
1145
1146 if (!PageHighMem(page)) {
1147 debug_check_no_locks_freed(page_address(page),
1148 PAGE_SIZE << order);
1149 debug_check_no_obj_freed(page_address(page),
1150 PAGE_SIZE << order);
1151 }
1152
1153 kernel_poison_pages(page, 1 << order);
1154
1155 /*
1156 * As memory initialization might be integrated into KASAN,
1157 * KASAN poisoning and memory initialization code must be
1158 * kept together to avoid discrepancies in behavior.
1159 *
1160 * With hardware tag-based KASAN, memory tags must be set before the
1161 * page becomes unavailable via debug_pagealloc or arch_free_page.
1162 */
1163 if (!skip_kasan_poison) {
1164 kasan_poison_pages(page, order, init);
1165
1166 /* Memory is already initialized if KASAN did it internally. */
1167 if (kasan_has_integrated_init())
1168 init = false;
1169 }
1170 if (init)
1171 kernel_init_pages(page, 1 << order);
1172
1173 /*
1174 * arch_free_page() can make the page's contents inaccessible. s390
1175 * does this. So nothing which can access the page's contents should
1176 * happen after this.
1177 */
1178 arch_free_page(page, order);
1179
1180 debug_pagealloc_unmap_pages(page, 1 << order);
1181
1182 return true;
1183 }
1184
1185 /*
1186 * Frees a number of pages from the PCP lists
1187 * Assumes all pages on list are in same zone.
1188 * count is the number of pages to free.
1189 */
free_pcppages_bulk(struct zone *zone, int count, struct per_cpu_pages *pcp, int pindex)1190 static void free_pcppages_bulk(struct zone *zone, int count,
1191 struct per_cpu_pages *pcp,
1192 int pindex)
1193 {
1194 unsigned long flags;
1195 unsigned int order;
1196 bool isolated_pageblocks;
1197 struct page *page;
1198
1199 /*
1200 * Ensure proper count is passed which otherwise would stuck in the
1201 * below while (list_empty(list)) loop.
1202 */
1203 count = min(pcp->count, count);
1204
1205 /* Ensure requested pindex is drained first. */
1206 pindex = pindex - 1;
1207
1208 spin_lock_irqsave(&zone->lock, flags);
1209 isolated_pageblocks = has_isolate_pageblock(zone);
1210
1211 while (count > 0) {
1212 struct list_head *list;
1213 int nr_pages;
1214
1215 /* Remove pages from lists in a round-robin fashion. */
1216 do {
1217 if (++pindex > NR_PCP_LISTS - 1)
1218 pindex = 0;
1219 list = &pcp->lists[pindex];
1220 } while (list_empty(list));
1221
1222 order = pindex_to_order(pindex);
1223 nr_pages = 1 << order;
1224 do {
1225 int mt;
1226
1227 page = list_last_entry(list, struct page, pcp_list);
1228 mt = get_pcppage_migratetype(page);
1229
1230 /* must delete to avoid corrupting pcp list */
1231 list_del(&page->pcp_list);
1232 count -= nr_pages;
1233 pcp->count -= nr_pages;
1234
1235 /* MIGRATE_ISOLATE page should not go to pcplists */
1236 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1237 /* Pageblock could have been isolated meanwhile */
1238 if (unlikely(isolated_pageblocks))
1239 mt = get_pageblock_migratetype(page);
1240
1241 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
1242 trace_mm_page_pcpu_drain(page, order, mt);
1243 } while (count > 0 && !list_empty(list));
1244 }
1245
1246 spin_unlock_irqrestore(&zone->lock, flags);
1247 }
1248
free_one_page(struct zone *zone, struct page *page, unsigned long pfn, unsigned int order, int migratetype, fpi_t fpi_flags)1249 static void free_one_page(struct zone *zone,
1250 struct page *page, unsigned long pfn,
1251 unsigned int order,
1252 int migratetype, fpi_t fpi_flags)
1253 {
1254 unsigned long flags;
1255
1256 spin_lock_irqsave(&zone->lock, flags);
1257 if (unlikely(has_isolate_pageblock(zone) ||
1258 is_migrate_isolate(migratetype))) {
1259 migratetype = get_pfnblock_migratetype(page, pfn);
1260 }
1261 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1262 spin_unlock_irqrestore(&zone->lock, flags);
1263 }
1264
__free_pages_ok(struct page *page, unsigned int order, fpi_t fpi_flags)1265 static void __free_pages_ok(struct page *page, unsigned int order,
1266 fpi_t fpi_flags)
1267 {
1268 unsigned long flags;
1269 int migratetype;
1270 unsigned long pfn = page_to_pfn(page);
1271 struct zone *zone = page_zone(page);
1272
1273 if (!free_pages_prepare(page, order, fpi_flags))
1274 return;
1275
1276 /*
1277 * Calling get_pfnblock_migratetype() without spin_lock_irqsave() here
1278 * is used to avoid calling get_pfnblock_migratetype() under the lock.
1279 * This will reduce the lock holding time.
1280 */
1281 migratetype = get_pfnblock_migratetype(page, pfn);
1282
1283 spin_lock_irqsave(&zone->lock, flags);
1284 if (unlikely(has_isolate_pageblock(zone) ||
1285 is_migrate_isolate(migratetype))) {
1286 migratetype = get_pfnblock_migratetype(page, pfn);
1287 }
1288 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1289 spin_unlock_irqrestore(&zone->lock, flags);
1290
1291 __count_vm_events(PGFREE, 1 << order);
1292 }
1293
__free_pages_core(struct page *page, unsigned int order)1294 void __free_pages_core(struct page *page, unsigned int order)
1295 {
1296 unsigned int nr_pages = 1 << order;
1297 struct page *p = page;
1298 unsigned int loop;
1299
1300 /*
1301 * When initializing the memmap, __init_single_page() sets the refcount
1302 * of all pages to 1 ("allocated"/"not free"). We have to set the
1303 * refcount of all involved pages to 0.
1304 */
1305 prefetchw(p);
1306 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1307 prefetchw(p + 1);
1308 __ClearPageReserved(p);
1309 set_page_count(p, 0);
1310 }
1311 __ClearPageReserved(p);
1312 set_page_count(p, 0);
1313
1314 atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1315
1316 if (page_contains_unaccepted(page, order)) {
1317 if (order == MAX_ORDER && __free_unaccepted(page))
1318 return;
1319
1320 accept_page(page, order);
1321 }
1322
1323 /*
1324 * Bypass PCP and place fresh pages right to the tail, primarily
1325 * relevant for memory onlining.
1326 */
1327 __free_pages_ok(page, order, FPI_TO_TAIL);
1328 }
1329
1330 /*
1331 * Check that the whole (or subset of) a pageblock given by the interval of
1332 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1333 * with the migration of free compaction scanner.
1334 *
1335 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1336 *
1337 * It's possible on some configurations to have a setup like node0 node1 node0
1338 * i.e. it's possible that all pages within a zones range of pages do not
1339 * belong to a single zone. We assume that a border between node0 and node1
1340 * can occur within a single pageblock, but not a node0 node1 node0
1341 * interleaving within a single pageblock. It is therefore sufficient to check
1342 * the first and last page of a pageblock and avoid checking each individual
1343 * page in a pageblock.
1344 *
1345 * Note: the function may return non-NULL struct page even for a page block
1346 * which contains a memory hole (i.e. there is no physical memory for a subset
1347 * of the pfn range). For example, if the pageblock order is MAX_ORDER, which
1348 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole
1349 * even though the start pfn is online and valid. This should be safe most of
1350 * the time because struct pages are still initialized via init_unavailable_range()
1351 * and pfn walkers shouldn't touch any physical memory range for which they do
1352 * not recognize any specific metadata in struct pages.
1353 */
__pageblock_pfn_to_page(unsigned long start_pfn, unsigned long end_pfn, struct zone *zone)1354 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1355 unsigned long end_pfn, struct zone *zone)
1356 {
1357 struct page *start_page;
1358 struct page *end_page;
1359
1360 /* end_pfn is one past the range we are checking */
1361 end_pfn--;
1362
1363 if (!pfn_valid(end_pfn))
1364 return NULL;
1365
1366 start_page = pfn_to_online_page(start_pfn);
1367 if (!start_page)
1368 return NULL;
1369
1370 if (page_zone(start_page) != zone)
1371 return NULL;
1372
1373 end_page = pfn_to_page(end_pfn);
1374
1375 /* This gives a shorter code than deriving page_zone(end_page) */
1376 if (page_zone_id(start_page) != page_zone_id(end_page))
1377 return NULL;
1378
1379 return start_page;
1380 }
1381
1382 /*
1383 * The order of subdivision here is critical for the IO subsystem.
1384 * Please do not alter this order without good reasons and regression
1385 * testing. Specifically, as large blocks of memory are subdivided,
1386 * the order in which smaller blocks are delivered depends on the order
1387 * they're subdivided in this function. This is the primary factor
1388 * influencing the order in which pages are delivered to the IO
1389 * subsystem according to empirical testing, and this is also justified
1390 * by considering the behavior of a buddy system containing a single
1391 * large block of memory acted on by a series of small allocations.
1392 * This behavior is a critical factor in sglist merging's success.
1393 *
1394 * -- nyc
1395 */
expand(struct zone *zone, struct page *page, int low, int high, int migratetype)1396 static inline void expand(struct zone *zone, struct page *page,
1397 int low, int high, int migratetype)
1398 {
1399 unsigned long size = 1 << high;
1400
1401 while (high > low) {
1402 high--;
1403 size >>= 1;
1404 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1405
1406 /*
1407 * Mark as guard pages (or page), that will allow to
1408 * merge back to allocator when buddy will be freed.
1409 * Corresponding page table entries will not be touched,
1410 * pages will stay not present in virtual address space
1411 */
1412 if (set_page_guard(zone, &page[size], high, migratetype))
1413 continue;
1414
1415 add_to_free_list(&page[size], zone, high, migratetype);
1416 set_buddy_order(&page[size], high);
1417 }
1418 }
1419
check_new_page_bad(struct page *page)1420 static void check_new_page_bad(struct page *page)
1421 {
1422 if (unlikely(page->flags & __PG_HWPOISON)) {
1423 /* Don't complain about hwpoisoned pages */
1424 page_mapcount_reset(page); /* remove PageBuddy */
1425 return;
1426 }
1427
1428 bad_page(page,
1429 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
1430 }
1431
1432 /*
1433 * This page is about to be returned from the page allocator
1434 */
check_new_page(struct page *page)1435 static int check_new_page(struct page *page)
1436 {
1437 if (likely(page_expected_state(page,
1438 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1439 return 0;
1440
1441 check_new_page_bad(page);
1442 return 1;
1443 }
1444
check_new_pages(struct page *page, unsigned int order)1445 static inline bool check_new_pages(struct page *page, unsigned int order)
1446 {
1447 if (is_check_pages_enabled()) {
1448 for (int i = 0; i < (1 << order); i++) {
1449 struct page *p = page + i;
1450
1451 if (check_new_page(p))
1452 return true;
1453 }
1454 }
1455
1456 return false;
1457 }
1458
should_skip_kasan_unpoison(gfp_t flags)1459 static inline bool should_skip_kasan_unpoison(gfp_t flags)
1460 {
1461 /* Don't skip if a software KASAN mode is enabled. */
1462 if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
1463 IS_ENABLED(CONFIG_KASAN_SW_TAGS))
1464 return false;
1465
1466 /* Skip, if hardware tag-based KASAN is not enabled. */
1467 if (!kasan_hw_tags_enabled())
1468 return true;
1469
1470 /*
1471 * With hardware tag-based KASAN enabled, skip if this has been
1472 * requested via __GFP_SKIP_KASAN.
1473 */
1474 return flags & __GFP_SKIP_KASAN;
1475 }
1476
should_skip_init(gfp_t flags)1477 static inline bool should_skip_init(gfp_t flags)
1478 {
1479 /* Don't skip, if hardware tag-based KASAN is not enabled. */
1480 if (!kasan_hw_tags_enabled())
1481 return false;
1482
1483 /* For hardware tag-based KASAN, skip if requested. */
1484 return (flags & __GFP_SKIP_ZERO);
1485 }
1486
post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags)1487 inline void post_alloc_hook(struct page *page, unsigned int order,
1488 gfp_t gfp_flags)
1489 {
1490 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
1491 !should_skip_init(gfp_flags);
1492 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS);
1493 int i;
1494
1495 set_page_private(page, 0);
1496 set_page_refcounted(page);
1497
1498 arch_alloc_page(page, order);
1499 debug_pagealloc_map_pages(page, 1 << order);
1500
1501 /*
1502 * Page unpoisoning must happen before memory initialization.
1503 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
1504 * allocations and the page unpoisoning code will complain.
1505 */
1506 kernel_unpoison_pages(page, 1 << order);
1507
1508 /*
1509 * As memory initialization might be integrated into KASAN,
1510 * KASAN unpoisoning and memory initializion code must be
1511 * kept together to avoid discrepancies in behavior.
1512 */
1513
1514 /*
1515 * If memory tags should be zeroed
1516 * (which happens only when memory should be initialized as well).
1517 */
1518 if (zero_tags) {
1519 /* Initialize both memory and memory tags. */
1520 for (i = 0; i != 1 << order; ++i)
1521 tag_clear_highpage(page + i);
1522
1523 /* Take note that memory was initialized by the loop above. */
1524 init = false;
1525 }
1526 if (!should_skip_kasan_unpoison(gfp_flags) &&
1527 kasan_unpoison_pages(page, order, init)) {
1528 /* Take note that memory was initialized by KASAN. */
1529 if (kasan_has_integrated_init())
1530 init = false;
1531 } else {
1532 /*
1533 * If memory tags have not been set by KASAN, reset the page
1534 * tags to ensure page_address() dereferencing does not fault.
1535 */
1536 for (i = 0; i != 1 << order; ++i)
1537 page_kasan_tag_reset(page + i);
1538 }
1539 /* If memory is still not initialized, initialize it now. */
1540 if (init)
1541 kernel_init_pages(page, 1 << order);
1542
1543 set_page_owner(page, order, gfp_flags);
1544 page_table_check_alloc(page, order);
1545 }
1546
prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, unsigned int alloc_flags)1547 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1548 unsigned int alloc_flags)
1549 {
1550 post_alloc_hook(page, order, gfp_flags);
1551
1552 if (order && (gfp_flags & __GFP_COMP))
1553 prep_compound_page(page, order);
1554
1555 /*
1556 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
1557 * allocate the page. The expectation is that the caller is taking
1558 * steps that will free more memory. The caller should avoid the page
1559 * being used for !PFMEMALLOC purposes.
1560 */
1561 if (alloc_flags & ALLOC_NO_WATERMARKS)
1562 set_page_pfmemalloc(page);
1563 else
1564 clear_page_pfmemalloc(page);
1565 }
1566
1567 /*
1568 * Go through the free lists for the given migratetype and remove
1569 * the smallest available page from the freelists
1570 */
1571 static __always_inline
__rmqueue_smallest(struct zone *zone, unsigned int order, int migratetype)1572 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1573 int migratetype)
1574 {
1575 unsigned int current_order;
1576 struct free_area *area;
1577 struct page *page;
1578
1579 /* Find a page of the appropriate size in the preferred list */
1580 for (current_order = order; current_order <= MAX_ORDER; ++current_order) {
1581 area = &(zone->free_area[current_order]);
1582 page = get_page_from_free_area(area, migratetype);
1583 if (!page)
1584 continue;
1585 del_page_from_free_list(page, zone, current_order);
1586 expand(zone, page, order, current_order, migratetype);
1587 set_pcppage_migratetype(page, migratetype);
1588 trace_mm_page_alloc_zone_locked(page, order, migratetype,
1589 pcp_allowed_order(order) &&
1590 migratetype < MIGRATE_PCPTYPES);
1591 return page;
1592 }
1593
1594 return NULL;
1595 }
1596
1597
1598 /*
1599 * This array describes the order lists are fallen back to when
1600 * the free lists for the desirable migrate type are depleted
1601 *
1602 * The other migratetypes do not have fallbacks.
1603 */
1604 static int fallbacks[MIGRATE_TYPES][MIGRATE_PCPTYPES - 1] = {
1605 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE },
1606 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE },
1607 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE },
1608 };
1609
1610 #ifdef CONFIG_CMA
__rmqueue_cma_fallback(struct zone *zone, unsigned int order)1611 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1612 unsigned int order)
1613 {
1614 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1615 }
1616 #else
__rmqueue_cma_fallback(struct zone *zone, unsigned int order)1617 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1618 unsigned int order) { return NULL; }
1619 #endif
1620
1621 /*
1622 * Move the free pages in a range to the freelist tail of the requested type.
1623 * Note that start_page and end_pages are not aligned on a pageblock
1624 * boundary. If alignment is required, use move_freepages_block()
1625 */
move_freepages(struct zone *zone, unsigned long start_pfn, unsigned long end_pfn, int migratetype, int *num_movable)1626 static int move_freepages(struct zone *zone,
1627 unsigned long start_pfn, unsigned long end_pfn,
1628 int migratetype, int *num_movable)
1629 {
1630 struct page *page;
1631 unsigned long pfn;
1632 unsigned int order;
1633 int pages_moved = 0;
1634
1635 for (pfn = start_pfn; pfn <= end_pfn;) {
1636 page = pfn_to_page(pfn);
1637 if (!PageBuddy(page)) {
1638 /*
1639 * We assume that pages that could be isolated for
1640 * migration are movable. But we don't actually try
1641 * isolating, as that would be expensive.
1642 */
1643 if (num_movable &&
1644 (PageLRU(page) || __PageMovable(page)))
1645 (*num_movable)++;
1646 pfn++;
1647 continue;
1648 }
1649
1650 /* Make sure we are not inadvertently changing nodes */
1651 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1652 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
1653
1654 order = buddy_order(page);
1655 move_to_free_list(page, zone, order, migratetype);
1656 pfn += 1 << order;
1657 pages_moved += 1 << order;
1658 }
1659
1660 return pages_moved;
1661 }
1662
move_freepages_block(struct zone *zone, struct page *page, int migratetype, int *num_movable)1663 int move_freepages_block(struct zone *zone, struct page *page,
1664 int migratetype, int *num_movable)
1665 {
1666 unsigned long start_pfn, end_pfn, pfn;
1667
1668 if (num_movable)
1669 *num_movable = 0;
1670
1671 pfn = page_to_pfn(page);
1672 start_pfn = pageblock_start_pfn(pfn);
1673 end_pfn = pageblock_end_pfn(pfn) - 1;
1674
1675 /* Do not cross zone boundaries */
1676 if (!zone_spans_pfn(zone, start_pfn))
1677 start_pfn = pfn;
1678 if (!zone_spans_pfn(zone, end_pfn))
1679 return 0;
1680
1681 return move_freepages(zone, start_pfn, end_pfn, migratetype,
1682 num_movable);
1683 }
1684
change_pageblock_range(struct page *pageblock_page, int start_order, int migratetype)1685 static void change_pageblock_range(struct page *pageblock_page,
1686 int start_order, int migratetype)
1687 {
1688 int nr_pageblocks = 1 << (start_order - pageblock_order);
1689
1690 while (nr_pageblocks--) {
1691 set_pageblock_migratetype(pageblock_page, migratetype);
1692 pageblock_page += pageblock_nr_pages;
1693 }
1694 }
1695
1696 /*
1697 * When we are falling back to another migratetype during allocation, try to
1698 * steal extra free pages from the same pageblocks to satisfy further
1699 * allocations, instead of polluting multiple pageblocks.
1700 *
1701 * If we are stealing a relatively large buddy page, it is likely there will
1702 * be more free pages in the pageblock, so try to steal them all. For
1703 * reclaimable and unmovable allocations, we steal regardless of page size,
1704 * as fragmentation caused by those allocations polluting movable pageblocks
1705 * is worse than movable allocations stealing from unmovable and reclaimable
1706 * pageblocks.
1707 */
can_steal_fallback(unsigned int order, int start_mt)1708 static bool can_steal_fallback(unsigned int order, int start_mt)
1709 {
1710 /*
1711 * Leaving this order check is intended, although there is
1712 * relaxed order check in next check. The reason is that
1713 * we can actually steal whole pageblock if this condition met,
1714 * but, below check doesn't guarantee it and that is just heuristic
1715 * so could be changed anytime.
1716 */
1717 if (order >= pageblock_order)
1718 return true;
1719
1720 if (order >= pageblock_order / 2 ||
1721 start_mt == MIGRATE_RECLAIMABLE ||
1722 start_mt == MIGRATE_UNMOVABLE ||
1723 page_group_by_mobility_disabled)
1724 return true;
1725
1726 return false;
1727 }
1728
boost_watermark(struct zone *zone)1729 static inline bool boost_watermark(struct zone *zone)
1730 {
1731 unsigned long max_boost;
1732
1733 if (!watermark_boost_factor)
1734 return false;
1735 /*
1736 * Don't bother in zones that are unlikely to produce results.
1737 * On small machines, including kdump capture kernels running
1738 * in a small area, boosting the watermark can cause an out of
1739 * memory situation immediately.
1740 */
1741 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
1742 return false;
1743
1744 max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
1745 watermark_boost_factor, 10000);
1746
1747 /*
1748 * high watermark may be uninitialised if fragmentation occurs
1749 * very early in boot so do not boost. We do not fall
1750 * through and boost by pageblock_nr_pages as failing
1751 * allocations that early means that reclaim is not going
1752 * to help and it may even be impossible to reclaim the
1753 * boosted watermark resulting in a hang.
1754 */
1755 if (!max_boost)
1756 return false;
1757
1758 max_boost = max(pageblock_nr_pages, max_boost);
1759
1760 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
1761 max_boost);
1762
1763 return true;
1764 }
1765
1766 /*
1767 * This function implements actual steal behaviour. If order is large enough,
1768 * we can steal whole pageblock. If not, we first move freepages in this
1769 * pageblock to our migratetype and determine how many already-allocated pages
1770 * are there in the pageblock with a compatible migratetype. If at least half
1771 * of pages are free or compatible, we can change migratetype of the pageblock
1772 * itself, so pages freed in the future will be put on the correct free list.
1773 */
steal_suitable_fallback(struct zone *zone, struct page *page, unsigned int alloc_flags, int start_type, bool whole_block)1774 static void steal_suitable_fallback(struct zone *zone, struct page *page,
1775 unsigned int alloc_flags, int start_type, bool whole_block)
1776 {
1777 unsigned int current_order = buddy_order(page);
1778 int free_pages, movable_pages, alike_pages;
1779 int old_block_type;
1780
1781 old_block_type = get_pageblock_migratetype(page);
1782
1783 /*
1784 * This can happen due to races and we want to prevent broken
1785 * highatomic accounting.
1786 */
1787 if (is_migrate_highatomic(old_block_type))
1788 goto single_page;
1789
1790 /* Take ownership for orders >= pageblock_order */
1791 if (current_order >= pageblock_order) {
1792 change_pageblock_range(page, current_order, start_type);
1793 goto single_page;
1794 }
1795
1796 /*
1797 * Boost watermarks to increase reclaim pressure to reduce the
1798 * likelihood of future fallbacks. Wake kswapd now as the node
1799 * may be balanced overall and kswapd will not wake naturally.
1800 */
1801 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
1802 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
1803
1804 /* We are not allowed to try stealing from the whole block */
1805 if (!whole_block)
1806 goto single_page;
1807
1808 free_pages = move_freepages_block(zone, page, start_type,
1809 &movable_pages);
1810 /* moving whole block can fail due to zone boundary conditions */
1811 if (!free_pages)
1812 goto single_page;
1813
1814 /*
1815 * Determine how many pages are compatible with our allocation.
1816 * For movable allocation, it's the number of movable pages which
1817 * we just obtained. For other types it's a bit more tricky.
1818 */
1819 if (start_type == MIGRATE_MOVABLE) {
1820 alike_pages = movable_pages;
1821 } else {
1822 /*
1823 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
1824 * to MOVABLE pageblock, consider all non-movable pages as
1825 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
1826 * vice versa, be conservative since we can't distinguish the
1827 * exact migratetype of non-movable pages.
1828 */
1829 if (old_block_type == MIGRATE_MOVABLE)
1830 alike_pages = pageblock_nr_pages
1831 - (free_pages + movable_pages);
1832 else
1833 alike_pages = 0;
1834 }
1835 /*
1836 * If a sufficient number of pages in the block are either free or of
1837 * compatible migratability as our allocation, claim the whole block.
1838 */
1839 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
1840 page_group_by_mobility_disabled)
1841 set_pageblock_migratetype(page, start_type);
1842
1843 return;
1844
1845 single_page:
1846 move_to_free_list(page, zone, current_order, start_type);
1847 }
1848
1849 /*
1850 * Check whether there is a suitable fallback freepage with requested order.
1851 * If only_stealable is true, this function returns fallback_mt only if
1852 * we can steal other freepages all together. This would help to reduce
1853 * fragmentation due to mixed migratetype pages in one pageblock.
1854 */
find_suitable_fallback(struct free_area *area, unsigned int order, int migratetype, bool only_stealable, bool *can_steal)1855 int find_suitable_fallback(struct free_area *area, unsigned int order,
1856 int migratetype, bool only_stealable, bool *can_steal)
1857 {
1858 int i;
1859 int fallback_mt;
1860
1861 if (area->nr_free == 0)
1862 return -1;
1863
1864 *can_steal = false;
1865 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) {
1866 fallback_mt = fallbacks[migratetype][i];
1867 if (free_area_empty(area, fallback_mt))
1868 continue;
1869
1870 if (can_steal_fallback(order, migratetype))
1871 *can_steal = true;
1872
1873 if (!only_stealable)
1874 return fallback_mt;
1875
1876 if (*can_steal)
1877 return fallback_mt;
1878 }
1879
1880 return -1;
1881 }
1882
1883 /*
1884 * Reserve a pageblock for exclusive use of high-order atomic allocations if
1885 * there are no empty page blocks that contain a page with a suitable order
1886 */
reserve_highatomic_pageblock(struct page *page, struct zone *zone)1887 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
1888 {
1889 int mt;
1890 unsigned long max_managed, flags;
1891
1892 /*
1893 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
1894 * Check is race-prone but harmless.
1895 */
1896 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
1897 if (zone->nr_reserved_highatomic >= max_managed)
1898 return;
1899
1900 spin_lock_irqsave(&zone->lock, flags);
1901
1902 /* Recheck the nr_reserved_highatomic limit under the lock */
1903 if (zone->nr_reserved_highatomic >= max_managed)
1904 goto out_unlock;
1905
1906 /* Yoink! */
1907 mt = get_pageblock_migratetype(page);
1908 /* Only reserve normal pageblocks (i.e., they can merge with others) */
1909 if (migratetype_is_mergeable(mt)) {
1910 zone->nr_reserved_highatomic += pageblock_nr_pages;
1911 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
1912 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
1913 }
1914
1915 out_unlock:
1916 spin_unlock_irqrestore(&zone->lock, flags);
1917 }
1918
1919 /*
1920 * Used when an allocation is about to fail under memory pressure. This
1921 * potentially hurts the reliability of high-order allocations when under
1922 * intense memory pressure but failed atomic allocations should be easier
1923 * to recover from than an OOM.
1924 *
1925 * If @force is true, try to unreserve a pageblock even though highatomic
1926 * pageblock is exhausted.
1927 */
unreserve_highatomic_pageblock(const struct alloc_context *ac, bool force)1928 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
1929 bool force)
1930 {
1931 struct zonelist *zonelist = ac->zonelist;
1932 unsigned long flags;
1933 struct zoneref *z;
1934 struct zone *zone;
1935 struct page *page;
1936 int order;
1937 bool ret;
1938
1939 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
1940 ac->nodemask) {
1941 /*
1942 * Preserve at least one pageblock unless memory pressure
1943 * is really high.
1944 */
1945 if (!force && zone->nr_reserved_highatomic <=
1946 pageblock_nr_pages)
1947 continue;
1948
1949 spin_lock_irqsave(&zone->lock, flags);
1950 for (order = 0; order <= MAX_ORDER; order++) {
1951 struct free_area *area = &(zone->free_area[order]);
1952
1953 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
1954 if (!page)
1955 continue;
1956
1957 /*
1958 * In page freeing path, migratetype change is racy so
1959 * we can counter several free pages in a pageblock
1960 * in this loop although we changed the pageblock type
1961 * from highatomic to ac->migratetype. So we should
1962 * adjust the count once.
1963 */
1964 if (is_migrate_highatomic_page(page)) {
1965 /*
1966 * It should never happen but changes to
1967 * locking could inadvertently allow a per-cpu
1968 * drain to add pages to MIGRATE_HIGHATOMIC
1969 * while unreserving so be safe and watch for
1970 * underflows.
1971 */
1972 zone->nr_reserved_highatomic -= min(
1973 pageblock_nr_pages,
1974 zone->nr_reserved_highatomic);
1975 }
1976
1977 /*
1978 * Convert to ac->migratetype and avoid the normal
1979 * pageblock stealing heuristics. Minimally, the caller
1980 * is doing the work and needs the pages. More
1981 * importantly, if the block was always converted to
1982 * MIGRATE_UNMOVABLE or another type then the number
1983 * of pageblocks that cannot be completely freed
1984 * may increase.
1985 */
1986 set_pageblock_migratetype(page, ac->migratetype);
1987 ret = move_freepages_block(zone, page, ac->migratetype,
1988 NULL);
1989 if (ret) {
1990 spin_unlock_irqrestore(&zone->lock, flags);
1991 return ret;
1992 }
1993 }
1994 spin_unlock_irqrestore(&zone->lock, flags);
1995 }
1996
1997 return false;
1998 }
1999
2000 /*
2001 * Try finding a free buddy page on the fallback list and put it on the free
2002 * list of requested migratetype, possibly along with other pages from the same
2003 * block, depending on fragmentation avoidance heuristics. Returns true if
2004 * fallback was found so that __rmqueue_smallest() can grab it.
2005 *
2006 * The use of signed ints for order and current_order is a deliberate
2007 * deviation from the rest of this file, to make the for loop
2008 * condition simpler.
2009 */
2010 static __always_inline bool
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype, unsigned int alloc_flags)2011 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2012 unsigned int alloc_flags)
2013 {
2014 struct free_area *area;
2015 int current_order;
2016 int min_order = order;
2017 struct page *page;
2018 int fallback_mt;
2019 bool can_steal;
2020
2021 /*
2022 * Do not steal pages from freelists belonging to other pageblocks
2023 * i.e. orders < pageblock_order. If there are no local zones free,
2024 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2025 */
2026 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT)
2027 min_order = pageblock_order;
2028
2029 /*
2030 * Find the largest available free page in the other list. This roughly
2031 * approximates finding the pageblock with the most free pages, which
2032 * would be too costly to do exactly.
2033 */
2034 for (current_order = MAX_ORDER; current_order >= min_order;
2035 --current_order) {
2036 area = &(zone->free_area[current_order]);
2037 fallback_mt = find_suitable_fallback(area, current_order,
2038 start_migratetype, false, &can_steal);
2039 if (fallback_mt == -1)
2040 continue;
2041
2042 /*
2043 * We cannot steal all free pages from the pageblock and the
2044 * requested migratetype is movable. In that case it's better to
2045 * steal and split the smallest available page instead of the
2046 * largest available page, because even if the next movable
2047 * allocation falls back into a different pageblock than this
2048 * one, it won't cause permanent fragmentation.
2049 */
2050 if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2051 && current_order > order)
2052 goto find_smallest;
2053
2054 goto do_steal;
2055 }
2056
2057 return false;
2058
2059 find_smallest:
2060 for (current_order = order; current_order <= MAX_ORDER;
2061 current_order++) {
2062 area = &(zone->free_area[current_order]);
2063 fallback_mt = find_suitable_fallback(area, current_order,
2064 start_migratetype, false, &can_steal);
2065 if (fallback_mt != -1)
2066 break;
2067 }
2068
2069 /*
2070 * This should not happen - we already found a suitable fallback
2071 * when looking for the largest page.
2072 */
2073 VM_BUG_ON(current_order > MAX_ORDER);
2074
2075 do_steal:
2076 page = get_page_from_free_area(area, fallback_mt);
2077
2078 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
2079 can_steal);
2080
2081 trace_mm_page_alloc_extfrag(page, order, current_order,
2082 start_migratetype, fallback_mt);
2083
2084 return true;
2085
2086 }
2087
2088 static __always_inline struct page *
__rmqueue_with_cma_reuse(struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags)2089 __rmqueue_with_cma_reuse(struct zone *zone, unsigned int order,
2090 int migratetype, unsigned int alloc_flags)
2091 {
2092 struct page *page = NULL;
2093 retry:
2094 page = __rmqueue_smallest(zone, order, migratetype);
2095
2096 if (unlikely(!page) && is_migrate_cma(migratetype)) {
2097 migratetype = MIGRATE_MOVABLE;
2098 alloc_flags &= ~ALLOC_CMA;
2099 page = __rmqueue_smallest(zone, order, migratetype);
2100 }
2101
2102 if (unlikely(!page) &&
2103 __rmqueue_fallback(zone, order, migratetype, alloc_flags))
2104 goto retry;
2105
2106 return page;
2107 }
2108
2109 /*
2110 * Do the hard work of removing an element from the buddy allocator.
2111 * Call me with the zone->lock already held.
2112 */
2113 static __always_inline struct page *
__rmqueue(struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags)2114 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
2115 unsigned int alloc_flags)
2116 {
2117 struct page *page;
2118
2119 #ifdef CONFIG_CMA_REUSE
2120 page = __rmqueue_with_cma_reuse(zone, order, migratetype, alloc_flags);
2121 if (page)
2122 return page;
2123 #endif
2124
2125 if (IS_ENABLED(CONFIG_CMA)) {
2126 /*
2127 * Balance movable allocations between regular and CMA areas by
2128 * allocating from CMA when over half of the zone's free memory
2129 * is in the CMA area.
2130 */
2131 if (alloc_flags & ALLOC_CMA &&
2132 zone_page_state(zone, NR_FREE_CMA_PAGES) >
2133 zone_page_state(zone, NR_FREE_PAGES) / 2) {
2134 page = __rmqueue_cma_fallback(zone, order);
2135 if (page)
2136 return page;
2137 }
2138 }
2139 retry:
2140 page = __rmqueue_smallest(zone, order, migratetype);
2141 if (unlikely(!page)) {
2142 if (alloc_flags & ALLOC_CMA)
2143 page = __rmqueue_cma_fallback(zone, order);
2144
2145 if (!page && __rmqueue_fallback(zone, order, migratetype,
2146 alloc_flags))
2147 goto retry;
2148 }
2149 return page;
2150 }
2151
2152 /*
2153 * Obtain a specified number of elements from the buddy allocator, all under
2154 * a single hold of the lock, for efficiency. Add them to the supplied list.
2155 * Returns the number of new pages which were placed at *list.
2156 */
rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, int migratetype, unsigned int alloc_flags)2157 static int rmqueue_bulk(struct zone *zone, unsigned int order,
2158 unsigned long count, struct list_head *list,
2159 int migratetype, unsigned int alloc_flags)
2160 {
2161 unsigned long flags;
2162 int i;
2163
2164 spin_lock_irqsave(&zone->lock, flags);
2165 for (i = 0; i < count; ++i) {
2166 struct page *page = __rmqueue(zone, order, migratetype,
2167 alloc_flags);
2168 if (unlikely(page == NULL))
2169 break;
2170
2171 /*
2172 * Split buddy pages returned by expand() are received here in
2173 * physical page order. The page is added to the tail of
2174 * caller's list. From the callers perspective, the linked list
2175 * is ordered by page number under some conditions. This is
2176 * useful for IO devices that can forward direction from the
2177 * head, thus also in the physical page order. This is useful
2178 * for IO devices that can merge IO requests if the physical
2179 * pages are ordered properly.
2180 */
2181 list_add_tail(&page->pcp_list, list);
2182 if (is_migrate_cma(get_pcppage_migratetype(page)))
2183 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2184 -(1 << order));
2185 }
2186
2187 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2188 spin_unlock_irqrestore(&zone->lock, flags);
2189
2190 return i;
2191 }
2192
2193 #ifdef CONFIG_NUMA
2194 /*
2195 * Called from the vmstat counter updater to drain pagesets of this
2196 * currently executing processor on remote nodes after they have
2197 * expired.
2198 */
drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)2199 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2200 {
2201 int to_drain, batch;
2202
2203 batch = READ_ONCE(pcp->batch);
2204 to_drain = min(pcp->count, batch);
2205 if (to_drain > 0) {
2206 spin_lock(&pcp->lock);
2207 free_pcppages_bulk(zone, to_drain, pcp, 0);
2208 spin_unlock(&pcp->lock);
2209 }
2210 }
2211 #endif
2212
2213 /*
2214 * Drain pcplists of the indicated processor and zone.
2215 */
drain_pages_zone(unsigned int cpu, struct zone *zone)2216 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2217 {
2218 struct per_cpu_pages *pcp;
2219
2220 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2221 if (pcp->count) {
2222 spin_lock(&pcp->lock);
2223 free_pcppages_bulk(zone, pcp->count, pcp, 0);
2224 spin_unlock(&pcp->lock);
2225 }
2226 }
2227
2228 /*
2229 * Drain pcplists of all zones on the indicated processor.
2230 */
drain_pages(unsigned int cpu)2231 static void drain_pages(unsigned int cpu)
2232 {
2233 struct zone *zone;
2234
2235 for_each_populated_zone(zone) {
2236 drain_pages_zone(cpu, zone);
2237 }
2238 }
2239
2240 /*
2241 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
2242 */
drain_local_pages(struct zone *zone)2243 void drain_local_pages(struct zone *zone)
2244 {
2245 int cpu = smp_processor_id();
2246
2247 if (zone)
2248 drain_pages_zone(cpu, zone);
2249 else
2250 drain_pages(cpu);
2251 }
2252
2253 /*
2254 * The implementation of drain_all_pages(), exposing an extra parameter to
2255 * drain on all cpus.
2256 *
2257 * drain_all_pages() is optimized to only execute on cpus where pcplists are
2258 * not empty. The check for non-emptiness can however race with a free to
2259 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
2260 * that need the guarantee that every CPU has drained can disable the
2261 * optimizing racy check.
2262 */
__drain_all_pages(struct zone *zone, bool force_all_cpus)2263 static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
2264 {
2265 int cpu;
2266
2267 /*
2268 * Allocate in the BSS so we won't require allocation in
2269 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2270 */
2271 static cpumask_t cpus_with_pcps;
2272
2273 /*
2274 * Do not drain if one is already in progress unless it's specific to
2275 * a zone. Such callers are primarily CMA and memory hotplug and need
2276 * the drain to be complete when the call returns.
2277 */
2278 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2279 if (!zone)
2280 return;
2281 mutex_lock(&pcpu_drain_mutex);
2282 }
2283
2284 /*
2285 * We don't care about racing with CPU hotplug event
2286 * as offline notification will cause the notified
2287 * cpu to drain that CPU pcps and on_each_cpu_mask
2288 * disables preemption as part of its processing
2289 */
2290 for_each_online_cpu(cpu) {
2291 struct per_cpu_pages *pcp;
2292 struct zone *z;
2293 bool has_pcps = false;
2294
2295 if (force_all_cpus) {
2296 /*
2297 * The pcp.count check is racy, some callers need a
2298 * guarantee that no cpu is missed.
2299 */
2300 has_pcps = true;
2301 } else if (zone) {
2302 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2303 if (pcp->count)
2304 has_pcps = true;
2305 } else {
2306 for_each_populated_zone(z) {
2307 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
2308 if (pcp->count) {
2309 has_pcps = true;
2310 break;
2311 }
2312 }
2313 }
2314
2315 if (has_pcps)
2316 cpumask_set_cpu(cpu, &cpus_with_pcps);
2317 else
2318 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2319 }
2320
2321 for_each_cpu(cpu, &cpus_with_pcps) {
2322 if (zone)
2323 drain_pages_zone(cpu, zone);
2324 else
2325 drain_pages(cpu);
2326 }
2327
2328 mutex_unlock(&pcpu_drain_mutex);
2329 }
2330
2331 /*
2332 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2333 *
2334 * When zone parameter is non-NULL, spill just the single zone's pages.
2335 */
drain_all_pages(struct zone *zone)2336 void drain_all_pages(struct zone *zone)
2337 {
2338 __drain_all_pages(zone, false);
2339 }
2340
free_unref_page_prepare(struct page *page, unsigned long pfn, unsigned int order)2341 static bool free_unref_page_prepare(struct page *page, unsigned long pfn,
2342 unsigned int order)
2343 {
2344 int migratetype;
2345
2346 if (!free_pages_prepare(page, order, FPI_NONE))
2347 return false;
2348
2349 migratetype = get_pfnblock_migratetype(page, pfn);
2350 set_pcppage_migratetype(page, migratetype);
2351 return true;
2352 }
2353
nr_pcp_free(struct per_cpu_pages *pcp, int high, bool free_high)2354 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, bool free_high)
2355 {
2356 int min_nr_free, max_nr_free;
2357 int batch = READ_ONCE(pcp->batch);
2358
2359 /* Free everything if batch freeing high-order pages. */
2360 if (unlikely(free_high))
2361 return pcp->count;
2362
2363 /* Check for PCP disabled or boot pageset */
2364 if (unlikely(high < batch))
2365 return 1;
2366
2367 /* Leave at least pcp->batch pages on the list */
2368 min_nr_free = batch;
2369 max_nr_free = high - batch;
2370
2371 /*
2372 * Double the number of pages freed each time there is subsequent
2373 * freeing of pages without any allocation.
2374 */
2375 batch <<= pcp->free_factor;
2376 if (batch < max_nr_free)
2377 pcp->free_factor++;
2378 batch = clamp(batch, min_nr_free, max_nr_free);
2379
2380 return batch;
2381 }
2382
nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, bool free_high)2383 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
2384 bool free_high)
2385 {
2386 int high = READ_ONCE(pcp->high);
2387
2388 if (unlikely(!high || free_high))
2389 return 0;
2390
2391 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags))
2392 return high;
2393
2394 /*
2395 * If reclaim is active, limit the number of pages that can be
2396 * stored on pcp lists
2397 */
2398 return min(READ_ONCE(pcp->batch) << 2, high);
2399 }
2400
free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, struct page *page, int migratetype, unsigned int order)2401 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
2402 struct page *page, int migratetype,
2403 unsigned int order)
2404 {
2405 int high;
2406 int pindex;
2407 bool free_high;
2408
2409 __count_vm_events(PGFREE, 1 << order);
2410 pindex = order_to_pindex(migratetype, order);
2411 list_add(&page->pcp_list, &pcp->lists[pindex]);
2412 pcp->count += 1 << order;
2413
2414 /*
2415 * As high-order pages other than THP's stored on PCP can contribute
2416 * to fragmentation, limit the number stored when PCP is heavily
2417 * freeing without allocation. The remainder after bulk freeing
2418 * stops will be drained from vmstat refresh context.
2419 */
2420 free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER);
2421
2422 high = nr_pcp_high(pcp, zone, free_high);
2423 if (pcp->count >= high) {
2424 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, free_high), pcp, pindex);
2425 }
2426 }
2427
2428 /*
2429 * Free a pcp page
2430 */
free_unref_page(struct page *page, unsigned int order)2431 void free_unref_page(struct page *page, unsigned int order)
2432 {
2433 unsigned long __maybe_unused UP_flags;
2434 struct per_cpu_pages *pcp;
2435 struct zone *zone;
2436 unsigned long pfn = page_to_pfn(page);
2437 int migratetype, pcpmigratetype;
2438
2439 if (!free_unref_page_prepare(page, pfn, order))
2440 return;
2441
2442 /*
2443 * We only track unmovable, reclaimable and movable on pcp lists.
2444 * Place ISOLATE pages on the isolated list because they are being
2445 * offlined but treat HIGHATOMIC and CMA as movable pages so we can
2446 * get those areas back if necessary. Otherwise, we may have to free
2447 * excessively into the page allocator
2448 */
2449 migratetype = pcpmigratetype = get_pcppage_migratetype(page);
2450 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
2451 if (unlikely(is_migrate_isolate(migratetype))) {
2452 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
2453 return;
2454 }
2455 pcpmigratetype = MIGRATE_MOVABLE;
2456 }
2457
2458 zone = page_zone(page);
2459 pcp_trylock_prepare(UP_flags);
2460 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2461 if (pcp) {
2462 free_unref_page_commit(zone, pcp, page, pcpmigratetype, order);
2463 pcp_spin_unlock(pcp);
2464 } else {
2465 free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
2466 }
2467 pcp_trylock_finish(UP_flags);
2468 }
2469
2470 /*
2471 * Free a list of 0-order pages
2472 */
free_unref_page_list(struct list_head *list)2473 void free_unref_page_list(struct list_head *list)
2474 {
2475 unsigned long __maybe_unused UP_flags;
2476 struct page *page, *next;
2477 struct per_cpu_pages *pcp = NULL;
2478 struct zone *locked_zone = NULL;
2479 int batch_count = 0;
2480 int migratetype;
2481
2482 /* Prepare pages for freeing */
2483 list_for_each_entry_safe(page, next, list, lru) {
2484 unsigned long pfn = page_to_pfn(page);
2485 if (!free_unref_page_prepare(page, pfn, 0)) {
2486 list_del(&page->lru);
2487 continue;
2488 }
2489
2490 /*
2491 * Free isolated pages directly to the allocator, see
2492 * comment in free_unref_page.
2493 */
2494 migratetype = get_pcppage_migratetype(page);
2495 if (unlikely(is_migrate_isolate(migratetype))) {
2496 list_del(&page->lru);
2497 free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
2498 continue;
2499 }
2500 }
2501
2502 list_for_each_entry_safe(page, next, list, lru) {
2503 struct zone *zone = page_zone(page);
2504
2505 list_del(&page->lru);
2506 migratetype = get_pcppage_migratetype(page);
2507
2508 /*
2509 * Either different zone requiring a different pcp lock or
2510 * excessive lock hold times when freeing a large list of
2511 * pages.
2512 */
2513 if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) {
2514 if (pcp) {
2515 pcp_spin_unlock(pcp);
2516 pcp_trylock_finish(UP_flags);
2517 }
2518
2519 batch_count = 0;
2520
2521 /*
2522 * trylock is necessary as pages may be getting freed
2523 * from IRQ or SoftIRQ context after an IO completion.
2524 */
2525 pcp_trylock_prepare(UP_flags);
2526 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2527 if (unlikely(!pcp)) {
2528 pcp_trylock_finish(UP_flags);
2529 free_one_page(zone, page, page_to_pfn(page),
2530 0, migratetype, FPI_NONE);
2531 locked_zone = NULL;
2532 continue;
2533 }
2534 locked_zone = zone;
2535 }
2536
2537 /*
2538 * Non-isolated types over MIGRATE_PCPTYPES get added
2539 * to the MIGRATE_MOVABLE pcp list.
2540 */
2541 if (unlikely(migratetype >= MIGRATE_PCPTYPES))
2542 migratetype = MIGRATE_MOVABLE;
2543
2544 trace_mm_page_free_batched(page);
2545 free_unref_page_commit(zone, pcp, page, migratetype, 0);
2546 batch_count++;
2547 }
2548
2549 if (pcp) {
2550 pcp_spin_unlock(pcp);
2551 pcp_trylock_finish(UP_flags);
2552 }
2553 }
2554
2555 /*
2556 * split_page takes a non-compound higher-order page, and splits it into
2557 * n (1<<order) sub-pages: page[0..n]
2558 * Each sub-page must be freed individually.
2559 *
2560 * Note: this is probably too low level an operation for use in drivers.
2561 * Please consult with lkml before using this in your driver.
2562 */
split_page(struct page *page, unsigned int order)2563 void split_page(struct page *page, unsigned int order)
2564 {
2565 int i;
2566
2567 VM_BUG_ON_PAGE(PageCompound(page), page);
2568 VM_BUG_ON_PAGE(!page_count(page), page);
2569
2570 for (i = 1; i < (1 << order); i++)
2571 set_page_refcounted(page + i);
2572 split_page_owner(page, 1 << order);
2573 split_page_memcg(page, 1 << order);
2574 }
2575 EXPORT_SYMBOL_GPL(split_page);
2576
__isolate_free_page(struct page *page, unsigned int order)2577 int __isolate_free_page(struct page *page, unsigned int order)
2578 {
2579 struct zone *zone = page_zone(page);
2580 int mt = get_pageblock_migratetype(page);
2581
2582 if (!is_migrate_isolate(mt)) {
2583 unsigned long watermark;
2584 /*
2585 * Obey watermarks as if the page was being allocated. We can
2586 * emulate a high-order watermark check with a raised order-0
2587 * watermark, because we already know our high-order page
2588 * exists.
2589 */
2590 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
2591 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2592 return 0;
2593
2594 __mod_zone_freepage_state(zone, -(1UL << order), mt);
2595 }
2596
2597 del_page_from_free_list(page, zone, order);
2598
2599 /*
2600 * Set the pageblock if the isolated page is at least half of a
2601 * pageblock
2602 */
2603 if (order >= pageblock_order - 1) {
2604 struct page *endpage = page + (1 << order) - 1;
2605 for (; page < endpage; page += pageblock_nr_pages) {
2606 int mt = get_pageblock_migratetype(page);
2607 /*
2608 * Only change normal pageblocks (i.e., they can merge
2609 * with others)
2610 */
2611 if (migratetype_is_mergeable(mt))
2612 set_pageblock_migratetype(page,
2613 MIGRATE_MOVABLE);
2614 }
2615 }
2616
2617 return 1UL << order;
2618 }
2619
2620 /**
2621 * __putback_isolated_page - Return a now-isolated page back where we got it
2622 * @page: Page that was isolated
2623 * @order: Order of the isolated page
2624 * @mt: The page's pageblock's migratetype
2625 *
2626 * This function is meant to return a page pulled from the free lists via
2627 * __isolate_free_page back to the free lists they were pulled from.
2628 */
__putback_isolated_page(struct page *page, unsigned int order, int mt)2629 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
2630 {
2631 struct zone *zone = page_zone(page);
2632
2633 /* zone lock should be held when this function is called */
2634 lockdep_assert_held(&zone->lock);
2635
2636 /* Return isolated page to tail of freelist. */
2637 __free_one_page(page, page_to_pfn(page), zone, order, mt,
2638 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
2639 }
2640
2641 /*
2642 * Update NUMA hit/miss statistics
2643 */
zone_statistics(struct zone *preferred_zone, struct zone *z, long nr_account)2644 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
2645 long nr_account)
2646 {
2647 #ifdef CONFIG_NUMA
2648 enum numa_stat_item local_stat = NUMA_LOCAL;
2649
2650 /* skip numa counters update if numa stats is disabled */
2651 if (!static_branch_likely(&vm_numa_stat_key))
2652 return;
2653
2654 if (zone_to_nid(z) != numa_node_id())
2655 local_stat = NUMA_OTHER;
2656
2657 if (zone_to_nid(z) == zone_to_nid(preferred_zone))
2658 __count_numa_events(z, NUMA_HIT, nr_account);
2659 else {
2660 __count_numa_events(z, NUMA_MISS, nr_account);
2661 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
2662 }
2663 __count_numa_events(z, local_stat, nr_account);
2664 #endif
2665 }
2666
2667 static __always_inline
rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, unsigned int order, unsigned int alloc_flags, int migratetype)2668 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
2669 unsigned int order, unsigned int alloc_flags,
2670 int migratetype)
2671 {
2672 struct page *page;
2673 unsigned long flags;
2674
2675 do {
2676 page = NULL;
2677 spin_lock_irqsave(&zone->lock, flags);
2678 if (alloc_flags & ALLOC_HIGHATOMIC)
2679 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2680 if (!page) {
2681 page = __rmqueue(zone, order, migratetype, alloc_flags);
2682
2683 /*
2684 * If the allocation fails, allow OOM handling access
2685 * to HIGHATOMIC reserves as failing now is worse than
2686 * failing a high-order atomic allocation in the
2687 * future.
2688 */
2689 if (!page && (alloc_flags & ALLOC_OOM))
2690 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2691
2692 if (!page) {
2693 spin_unlock_irqrestore(&zone->lock, flags);
2694 return NULL;
2695 }
2696 }
2697 __mod_zone_freepage_state(zone, -(1 << order),
2698 get_pcppage_migratetype(page));
2699 spin_unlock_irqrestore(&zone->lock, flags);
2700 } while (check_new_pages(page, order));
2701
2702 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2703 zone_statistics(preferred_zone, zone, 1);
2704
2705 return page;
2706 }
2707
2708 /* Remove page from the per-cpu list, caller must protect the list */
2709 static inline
__rmqueue_pcplist(struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags, struct per_cpu_pages *pcp, struct list_head *list)2710 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
2711 int migratetype,
2712 unsigned int alloc_flags,
2713 struct per_cpu_pages *pcp,
2714 struct list_head *list)
2715 {
2716 struct page *page;
2717
2718 do {
2719 if (list_empty(list)) {
2720 int batch = READ_ONCE(pcp->batch);
2721 int alloced;
2722
2723 /*
2724 * Scale batch relative to order if batch implies
2725 * free pages can be stored on the PCP. Batch can
2726 * be 1 for small zones or for boot pagesets which
2727 * should never store free pages as the pages may
2728 * belong to arbitrary zones.
2729 */
2730 if (batch > 1)
2731 batch = max(batch >> order, 2);
2732 alloced = rmqueue_bulk(zone, order,
2733 batch, list,
2734 migratetype, alloc_flags);
2735
2736 pcp->count += alloced << order;
2737 if (unlikely(list_empty(list)))
2738 return NULL;
2739 }
2740
2741 page = list_first_entry(list, struct page, pcp_list);
2742 list_del(&page->pcp_list);
2743 pcp->count -= 1 << order;
2744 } while (check_new_pages(page, order));
2745
2746 return page;
2747 }
2748
2749 /* Lock and remove page from the per-cpu list */
rmqueue_pcplist(struct zone *preferred_zone, struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags)2750 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2751 struct zone *zone, unsigned int order,
2752 int migratetype, unsigned int alloc_flags)
2753 {
2754 struct per_cpu_pages *pcp;
2755 struct list_head *list;
2756 struct page *page;
2757 unsigned long __maybe_unused UP_flags;
2758
2759 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
2760 pcp_trylock_prepare(UP_flags);
2761 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2762 if (!pcp) {
2763 pcp_trylock_finish(UP_flags);
2764 return NULL;
2765 }
2766
2767 /*
2768 * On allocation, reduce the number of pages that are batch freed.
2769 * See nr_pcp_free() where free_factor is increased for subsequent
2770 * frees.
2771 */
2772 pcp->free_factor >>= 1;
2773 list = &pcp->lists[order_to_pindex(migratetype, order)];
2774 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
2775 pcp_spin_unlock(pcp);
2776 pcp_trylock_finish(UP_flags);
2777 if (page) {
2778 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2779 zone_statistics(preferred_zone, zone, 1);
2780 }
2781 return page;
2782 }
2783
2784 /*
2785 * Allocate a page from the given zone.
2786 * Use pcplists for THP or "cheap" high-order allocations.
2787 */
2788
2789 /*
2790 * Do not instrument rmqueue() with KMSAN. This function may call
2791 * __msan_poison_alloca() through a call to set_pfnblock_flags_mask().
2792 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it
2793 * may call rmqueue() again, which will result in a deadlock.
2794 */
2795 __no_sanitize_memory
2796 static inline
rmqueue(struct zone *preferred_zone, struct zone *zone, unsigned int order, gfp_t gfp_flags, unsigned int alloc_flags, int migratetype)2797 struct page *rmqueue(struct zone *preferred_zone,
2798 struct zone *zone, unsigned int order,
2799 gfp_t gfp_flags, unsigned int alloc_flags,
2800 int migratetype)
2801 {
2802 struct page *page;
2803
2804 /*
2805 * We most definitely don't want callers attempting to
2806 * allocate greater than order-1 page units with __GFP_NOFAIL.
2807 */
2808 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
2809
2810 if (likely(pcp_allowed_order(order))) {
2811 page = rmqueue_pcplist(preferred_zone, zone, order,
2812 migratetype, alloc_flags);
2813 if (likely(page))
2814 goto out;
2815 }
2816
2817 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
2818 migratetype);
2819
2820 out:
2821 /* Separate test+clear to avoid unnecessary atomics */
2822 if ((alloc_flags & ALLOC_KSWAPD) &&
2823 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) {
2824 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2825 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
2826 }
2827
2828 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
2829 return page;
2830 }
2831
should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)2832 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2833 {
2834 return __should_fail_alloc_page(gfp_mask, order);
2835 }
2836 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
2837
__zone_watermark_unusable_free(struct zone *z, unsigned int order, unsigned int alloc_flags)2838 static inline long __zone_watermark_unusable_free(struct zone *z,
2839 unsigned int order, unsigned int alloc_flags)
2840 {
2841 long unusable_free = (1 << order) - 1;
2842
2843 /*
2844 * If the caller does not have rights to reserves below the min
2845 * watermark then subtract the high-atomic reserves. This will
2846 * over-estimate the size of the atomic reserve but it avoids a search.
2847 */
2848 if (likely(!(alloc_flags & ALLOC_RESERVES)))
2849 unusable_free += z->nr_reserved_highatomic;
2850
2851 #ifdef CONFIG_CMA
2852 /* If allocation can't use CMA areas don't use free CMA pages */
2853 if (!(alloc_flags & ALLOC_CMA))
2854 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
2855 #endif
2856 #ifdef CONFIG_UNACCEPTED_MEMORY
2857 unusable_free += zone_page_state(z, NR_UNACCEPTED);
2858 #endif
2859
2860 return unusable_free;
2861 }
2862
2863 /*
2864 * Return true if free base pages are above 'mark'. For high-order checks it
2865 * will return true of the order-0 watermark is reached and there is at least
2866 * one free page of a suitable size. Checking now avoids taking the zone lock
2867 * to check in the allocation paths if no pages are free.
2868 */
__zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags, long free_pages)2869 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2870 int highest_zoneidx, unsigned int alloc_flags,
2871 long free_pages)
2872 {
2873 long min = mark;
2874 int o;
2875
2876 /* free_pages may go negative - that's OK */
2877 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
2878
2879 if (unlikely(alloc_flags & ALLOC_RESERVES)) {
2880 /*
2881 * __GFP_HIGH allows access to 50% of the min reserve as well
2882 * as OOM.
2883 */
2884 if (alloc_flags & ALLOC_MIN_RESERVE) {
2885 min -= min / 2;
2886
2887 /*
2888 * Non-blocking allocations (e.g. GFP_ATOMIC) can
2889 * access more reserves than just __GFP_HIGH. Other
2890 * non-blocking allocations requests such as GFP_NOWAIT
2891 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get
2892 * access to the min reserve.
2893 */
2894 if (alloc_flags & ALLOC_NON_BLOCK)
2895 min -= min / 4;
2896 }
2897
2898 /*
2899 * OOM victims can try even harder than the normal reserve
2900 * users on the grounds that it's definitely going to be in
2901 * the exit path shortly and free memory. Any allocation it
2902 * makes during the free path will be small and short-lived.
2903 */
2904 if (alloc_flags & ALLOC_OOM)
2905 min -= min / 2;
2906 }
2907
2908 /*
2909 * Check watermarks for an order-0 allocation request. If these
2910 * are not met, then a high-order request also cannot go ahead
2911 * even if a suitable page happened to be free.
2912 */
2913 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
2914 return false;
2915
2916 /* If this is an order-0 request then the watermark is fine */
2917 if (!order)
2918 return true;
2919
2920 /* For a high-order request, check at least one suitable page is free */
2921 for (o = order; o <= MAX_ORDER; o++) {
2922 struct free_area *area = &z->free_area[o];
2923 int mt;
2924
2925 if (!area->nr_free)
2926 continue;
2927
2928 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
2929 if (!free_area_empty(area, mt))
2930 return true;
2931 }
2932
2933 #ifdef CONFIG_CMA
2934 if ((alloc_flags & ALLOC_CMA) &&
2935 !free_area_empty(area, MIGRATE_CMA)) {
2936 return true;
2937 }
2938 #endif
2939 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) &&
2940 !free_area_empty(area, MIGRATE_HIGHATOMIC)) {
2941 return true;
2942 }
2943 }
2944 return false;
2945 }
2946
zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags)2947 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2948 int highest_zoneidx, unsigned int alloc_flags)
2949 {
2950 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
2951 zone_page_state(z, NR_FREE_PAGES));
2952 }
2953
zone_watermark_fast(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags, gfp_t gfp_mask)2954 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
2955 unsigned long mark, int highest_zoneidx,
2956 unsigned int alloc_flags, gfp_t gfp_mask)
2957 {
2958 long free_pages;
2959
2960 free_pages = zone_page_state(z, NR_FREE_PAGES);
2961
2962 /*
2963 * Fast check for order-0 only. If this fails then the reserves
2964 * need to be calculated.
2965 */
2966 if (!order) {
2967 long usable_free;
2968 long reserved;
2969
2970 usable_free = free_pages;
2971 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags);
2972
2973 /* reserved may over estimate high-atomic reserves. */
2974 usable_free -= min(usable_free, reserved);
2975 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx])
2976 return true;
2977 }
2978
2979 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
2980 free_pages))
2981 return true;
2982
2983 /*
2984 * Ignore watermark boosting for __GFP_HIGH order-0 allocations
2985 * when checking the min watermark. The min watermark is the
2986 * point where boosting is ignored so that kswapd is woken up
2987 * when below the low watermark.
2988 */
2989 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost
2990 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
2991 mark = z->_watermark[WMARK_MIN];
2992 return __zone_watermark_ok(z, order, mark, highest_zoneidx,
2993 alloc_flags, free_pages);
2994 }
2995
2996 return false;
2997 }
2998
zone_watermark_ok_safe(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx)2999 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3000 unsigned long mark, int highest_zoneidx)
3001 {
3002 long free_pages = zone_page_state(z, NR_FREE_PAGES);
3003
3004 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3005 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3006
3007 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
3008 free_pages);
3009 }
3010
3011 #ifdef CONFIG_NUMA
3012 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
3013
zone_allows_reclaim(struct zone *local_zone, struct zone *zone)3014 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3015 {
3016 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3017 node_reclaim_distance;
3018 }
3019 #else /* CONFIG_NUMA */
zone_allows_reclaim(struct zone *local_zone, struct zone *zone)3020 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3021 {
3022 return true;
3023 }
3024 #endif /* CONFIG_NUMA */
3025
3026 /*
3027 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3028 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3029 * premature use of a lower zone may cause lowmem pressure problems that
3030 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3031 * probably too small. It only makes sense to spread allocations to avoid
3032 * fragmentation between the Normal and DMA32 zones.
3033 */
3034 static inline unsigned int
alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)3035 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3036 {
3037 unsigned int alloc_flags;
3038
3039 /*
3040 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3041 * to save a branch.
3042 */
3043 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3044
3045 #ifdef CONFIG_ZONE_DMA32
3046 if (!zone)
3047 return alloc_flags;
3048
3049 if (zone_idx(zone) != ZONE_NORMAL)
3050 return alloc_flags;
3051
3052 /*
3053 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
3054 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3055 * on UMA that if Normal is populated then so is DMA32.
3056 */
3057 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3058 if (nr_online_nodes > 1 && !populated_zone(--zone))
3059 return alloc_flags;
3060
3061 alloc_flags |= ALLOC_NOFRAGMENT;
3062 #endif /* CONFIG_ZONE_DMA32 */
3063 return alloc_flags;
3064 }
3065
3066 /* Must be called after current_gfp_context() which can change gfp_mask */
gfp_to_alloc_flags_cma(gfp_t gfp_mask, unsigned int alloc_flags)3067 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
3068 unsigned int alloc_flags)
3069 {
3070 #ifdef CONFIG_CMA
3071 if (gfp_migratetype(gfp_mask) == get_cma_migratetype())
3072 alloc_flags |= ALLOC_CMA;
3073 #endif
3074 return alloc_flags;
3075 }
3076
3077 /*
3078 * get_page_from_freelist goes through the zonelist trying to allocate
3079 * a page.
3080 */
3081 static struct page *
get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, const struct alloc_context *ac)3082 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3083 const struct alloc_context *ac)
3084 {
3085 struct zoneref *z;
3086 struct zone *zone;
3087 struct pglist_data *last_pgdat = NULL;
3088 bool last_pgdat_dirty_ok = false;
3089 bool no_fallback;
3090
3091 retry:
3092 /*
3093 * Scan zonelist, looking for a zone with enough free.
3094 * See also cpuset_node_allowed() comment in kernel/cgroup/cpuset.c.
3095 */
3096 no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
3097 z = ac->preferred_zoneref;
3098 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
3099 ac->nodemask) {
3100 struct page *page;
3101 unsigned long mark;
3102
3103 if (cpusets_enabled() &&
3104 (alloc_flags & ALLOC_CPUSET) &&
3105 !__cpuset_zone_allowed(zone, gfp_mask))
3106 continue;
3107 /*
3108 * When allocating a page cache page for writing, we
3109 * want to get it from a node that is within its dirty
3110 * limit, such that no single node holds more than its
3111 * proportional share of globally allowed dirty pages.
3112 * The dirty limits take into account the node's
3113 * lowmem reserves and high watermark so that kswapd
3114 * should be able to balance it without having to
3115 * write pages from its LRU list.
3116 *
3117 * XXX: For now, allow allocations to potentially
3118 * exceed the per-node dirty limit in the slowpath
3119 * (spread_dirty_pages unset) before going into reclaim,
3120 * which is important when on a NUMA setup the allowed
3121 * nodes are together not big enough to reach the
3122 * global limit. The proper fix for these situations
3123 * will require awareness of nodes in the
3124 * dirty-throttling and the flusher threads.
3125 */
3126 if (ac->spread_dirty_pages) {
3127 if (last_pgdat != zone->zone_pgdat) {
3128 last_pgdat = zone->zone_pgdat;
3129 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat);
3130 }
3131
3132 if (!last_pgdat_dirty_ok)
3133 continue;
3134 }
3135
3136 if (no_fallback && nr_online_nodes > 1 &&
3137 zone != ac->preferred_zoneref->zone) {
3138 int local_nid;
3139
3140 /*
3141 * If moving to a remote node, retry but allow
3142 * fragmenting fallbacks. Locality is more important
3143 * than fragmentation avoidance.
3144 */
3145 local_nid = zone_to_nid(ac->preferred_zoneref->zone);
3146 if (zone_to_nid(zone) != local_nid) {
3147 alloc_flags &= ~ALLOC_NOFRAGMENT;
3148 goto retry;
3149 }
3150 }
3151
3152 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
3153 if (!zone_watermark_fast(zone, order, mark,
3154 ac->highest_zoneidx, alloc_flags,
3155 gfp_mask)) {
3156 int ret;
3157
3158 if (has_unaccepted_memory()) {
3159 if (try_to_accept_memory(zone, order))
3160 goto try_this_zone;
3161 }
3162
3163 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3164 /*
3165 * Watermark failed for this zone, but see if we can
3166 * grow this zone if it contains deferred pages.
3167 */
3168 if (deferred_pages_enabled()) {
3169 if (_deferred_grow_zone(zone, order))
3170 goto try_this_zone;
3171 }
3172 #endif
3173 /* Checked here to keep the fast path fast */
3174 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3175 if (alloc_flags & ALLOC_NO_WATERMARKS)
3176 goto try_this_zone;
3177
3178 if (!node_reclaim_enabled() ||
3179 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
3180 continue;
3181
3182 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3183 switch (ret) {
3184 case NODE_RECLAIM_NOSCAN:
3185 /* did not scan */
3186 continue;
3187 case NODE_RECLAIM_FULL:
3188 /* scanned but unreclaimable */
3189 continue;
3190 default:
3191 /* did we reclaim enough */
3192 if (zone_watermark_ok(zone, order, mark,
3193 ac->highest_zoneidx, alloc_flags))
3194 goto try_this_zone;
3195
3196 continue;
3197 }
3198 }
3199
3200 try_this_zone:
3201 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
3202 gfp_mask, alloc_flags, ac->migratetype);
3203 if (page) {
3204 prep_new_page(page, order, gfp_mask, alloc_flags);
3205
3206 /*
3207 * If this is a high-order atomic allocation then check
3208 * if the pageblock should be reserved for the future
3209 */
3210 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
3211 reserve_highatomic_pageblock(page, zone);
3212
3213 return page;
3214 } else {
3215 if (has_unaccepted_memory()) {
3216 if (try_to_accept_memory(zone, order))
3217 goto try_this_zone;
3218 }
3219
3220 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3221 /* Try again if zone has deferred pages */
3222 if (deferred_pages_enabled()) {
3223 if (_deferred_grow_zone(zone, order))
3224 goto try_this_zone;
3225 }
3226 #endif
3227 }
3228 }
3229
3230 /*
3231 * It's possible on a UMA machine to get through all zones that are
3232 * fragmented. If avoiding fragmentation, reset and try again.
3233 */
3234 if (no_fallback) {
3235 alloc_flags &= ~ALLOC_NOFRAGMENT;
3236 goto retry;
3237 }
3238
3239 return NULL;
3240 }
3241
warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)3242 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3243 {
3244 unsigned int filter = SHOW_MEM_FILTER_NODES;
3245
3246 /*
3247 * This documents exceptions given to allocations in certain
3248 * contexts that are allowed to allocate outside current's set
3249 * of allowed nodes.
3250 */
3251 if (!(gfp_mask & __GFP_NOMEMALLOC))
3252 if (tsk_is_oom_victim(current) ||
3253 (current->flags & (PF_MEMALLOC | PF_EXITING)))
3254 filter &= ~SHOW_MEM_FILTER_NODES;
3255 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3256 filter &= ~SHOW_MEM_FILTER_NODES;
3257
3258 __show_mem(filter, nodemask, gfp_zone(gfp_mask));
3259 }
3260
warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)3261 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3262 {
3263 struct va_format vaf;
3264 va_list args;
3265 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
3266
3267 if ((gfp_mask & __GFP_NOWARN) ||
3268 !__ratelimit(&nopage_rs) ||
3269 ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
3270 return;
3271
3272 va_start(args, fmt);
3273 vaf.fmt = fmt;
3274 vaf.va = &args;
3275 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
3276 current->comm, &vaf, gfp_mask, &gfp_mask,
3277 nodemask_pr_args(nodemask));
3278 va_end(args);
3279
3280 cpuset_print_current_mems_allowed();
3281 pr_cont("\n");
3282 dump_stack();
3283 warn_alloc_show_mem(gfp_mask, nodemask);
3284 }
3285
3286 static inline struct page *
__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac)3287 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3288 unsigned int alloc_flags,
3289 const struct alloc_context *ac)
3290 {
3291 struct page *page;
3292
3293 page = get_page_from_freelist(gfp_mask, order,
3294 alloc_flags|ALLOC_CPUSET, ac);
3295 /*
3296 * fallback to ignore cpuset restriction if our nodes
3297 * are depleted
3298 */
3299 if (!page)
3300 page = get_page_from_freelist(gfp_mask, order,
3301 alloc_flags, ac);
3302
3303 return page;
3304 }
3305
3306 static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac, unsigned long *did_some_progress)3307 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3308 const struct alloc_context *ac, unsigned long *did_some_progress)
3309 {
3310 struct oom_control oc = {
3311 .zonelist = ac->zonelist,
3312 .nodemask = ac->nodemask,
3313 .memcg = NULL,
3314 .gfp_mask = gfp_mask,
3315 .order = order,
3316 };
3317 struct page *page;
3318
3319 *did_some_progress = 0;
3320
3321 /*
3322 * Acquire the oom lock. If that fails, somebody else is
3323 * making progress for us.
3324 */
3325 if (!mutex_trylock(&oom_lock)) {
3326 *did_some_progress = 1;
3327 schedule_timeout_uninterruptible(1);
3328 return NULL;
3329 }
3330
3331 /*
3332 * Go through the zonelist yet one more time, keep very high watermark
3333 * here, this is only to catch a parallel oom killing, we must fail if
3334 * we're still under heavy pressure. But make sure that this reclaim
3335 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
3336 * allocation which will never fail due to oom_lock already held.
3337 */
3338 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
3339 ~__GFP_DIRECT_RECLAIM, order,
3340 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
3341 if (page)
3342 goto out;
3343
3344 /* Coredumps can quickly deplete all memory reserves */
3345 if (current->flags & PF_DUMPCORE)
3346 goto out;
3347 /* The OOM killer will not help higher order allocs */
3348 if (order > PAGE_ALLOC_COSTLY_ORDER)
3349 goto out;
3350 /*
3351 * We have already exhausted all our reclaim opportunities without any
3352 * success so it is time to admit defeat. We will skip the OOM killer
3353 * because it is very likely that the caller has a more reasonable
3354 * fallback than shooting a random task.
3355 *
3356 * The OOM killer may not free memory on a specific node.
3357 */
3358 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
3359 goto out;
3360 /* The OOM killer does not needlessly kill tasks for lowmem */
3361 if (ac->highest_zoneidx < ZONE_NORMAL)
3362 goto out;
3363 if (pm_suspended_storage())
3364 goto out;
3365 /*
3366 * XXX: GFP_NOFS allocations should rather fail than rely on
3367 * other request to make a forward progress.
3368 * We are in an unfortunate situation where out_of_memory cannot
3369 * do much for this context but let's try it to at least get
3370 * access to memory reserved if the current task is killed (see
3371 * out_of_memory). Once filesystems are ready to handle allocation
3372 * failures more gracefully we should just bail out here.
3373 */
3374
3375 /* Exhausted what can be done so it's blame time */
3376 if (out_of_memory(&oc) ||
3377 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
3378 *did_some_progress = 1;
3379
3380 /*
3381 * Help non-failing allocations by giving them access to memory
3382 * reserves
3383 */
3384 if (gfp_mask & __GFP_NOFAIL)
3385 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
3386 ALLOC_NO_WATERMARKS, ac);
3387 }
3388 out:
3389 mutex_unlock(&oom_lock);
3390 return page;
3391 }
3392
3393 /*
3394 * Maximum number of compaction retries with a progress before OOM
3395 * killer is consider as the only way to move forward.
3396 */
3397 #define MAX_COMPACT_RETRIES 16
3398
3399 #ifdef CONFIG_COMPACTION
3400 /* Try memory compaction for high-order allocations before reclaim */
3401 static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result)3402 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3403 unsigned int alloc_flags, const struct alloc_context *ac,
3404 enum compact_priority prio, enum compact_result *compact_result)
3405 {
3406 struct page *page = NULL;
3407 unsigned long pflags;
3408 unsigned int noreclaim_flag;
3409
3410 if (!order)
3411 return NULL;
3412
3413 psi_memstall_enter(&pflags);
3414 delayacct_compact_start();
3415 noreclaim_flag = memalloc_noreclaim_save();
3416
3417 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3418 prio, &page);
3419
3420 memalloc_noreclaim_restore(noreclaim_flag);
3421 psi_memstall_leave(&pflags);
3422 delayacct_compact_end();
3423
3424 if (*compact_result == COMPACT_SKIPPED)
3425 return NULL;
3426 /*
3427 * At least in one zone compaction wasn't deferred or skipped, so let's
3428 * count a compaction stall
3429 */
3430 count_vm_event(COMPACTSTALL);
3431
3432 /* Prep a captured page if available */
3433 if (page)
3434 prep_new_page(page, order, gfp_mask, alloc_flags);
3435
3436 /* Try get a page from the freelist if available */
3437 if (!page)
3438 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3439
3440 if (page) {
3441 struct zone *zone = page_zone(page);
3442
3443 zone->compact_blockskip_flush = false;
3444 compaction_defer_reset(zone, order, true);
3445 count_vm_event(COMPACTSUCCESS);
3446 return page;
3447 }
3448
3449 /*
3450 * It's bad if compaction run occurs and fails. The most likely reason
3451 * is that pages exist, but not enough to satisfy watermarks.
3452 */
3453 count_vm_event(COMPACTFAIL);
3454
3455 cond_resched();
3456
3457 return NULL;
3458 }
3459
3460 static inline bool
should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, enum compact_result compact_result, enum compact_priority *compact_priority, int *compaction_retries)3461 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3462 enum compact_result compact_result,
3463 enum compact_priority *compact_priority,
3464 int *compaction_retries)
3465 {
3466 int max_retries = MAX_COMPACT_RETRIES;
3467 int min_priority;
3468 bool ret = false;
3469 int retries = *compaction_retries;
3470 enum compact_priority priority = *compact_priority;
3471
3472 if (!order)
3473 return false;
3474
3475 if (fatal_signal_pending(current))
3476 return false;
3477
3478 /*
3479 * Compaction was skipped due to a lack of free order-0
3480 * migration targets. Continue if reclaim can help.
3481 */
3482 if (compact_result == COMPACT_SKIPPED) {
3483 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
3484 goto out;
3485 }
3486
3487 /*
3488 * Compaction managed to coalesce some page blocks, but the
3489 * allocation failed presumably due to a race. Retry some.
3490 */
3491 if (compact_result == COMPACT_SUCCESS) {
3492 /*
3493 * !costly requests are much more important than
3494 * __GFP_RETRY_MAYFAIL costly ones because they are de
3495 * facto nofail and invoke OOM killer to move on while
3496 * costly can fail and users are ready to cope with
3497 * that. 1/4 retries is rather arbitrary but we would
3498 * need much more detailed feedback from compaction to
3499 * make a better decision.
3500 */
3501 if (order > PAGE_ALLOC_COSTLY_ORDER)
3502 max_retries /= 4;
3503
3504 if (++(*compaction_retries) <= max_retries) {
3505 ret = true;
3506 goto out;
3507 }
3508 }
3509
3510 /*
3511 * Compaction failed. Retry with increasing priority.
3512 */
3513 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3514 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
3515
3516 if (*compact_priority > min_priority) {
3517 (*compact_priority)--;
3518 *compaction_retries = 0;
3519 ret = true;
3520 }
3521 out:
3522 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
3523 return ret;
3524 }
3525 #else
3526 static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result)3527 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3528 unsigned int alloc_flags, const struct alloc_context *ac,
3529 enum compact_priority prio, enum compact_result *compact_result)
3530 {
3531 *compact_result = COMPACT_SKIPPED;
3532 return NULL;
3533 }
3534
3535 static inline bool
should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, enum compact_result compact_result, enum compact_priority *compact_priority, int *compaction_retries)3536 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3537 enum compact_result compact_result,
3538 enum compact_priority *compact_priority,
3539 int *compaction_retries)
3540 {
3541 struct zone *zone;
3542 struct zoneref *z;
3543
3544 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3545 return false;
3546
3547 /*
3548 * There are setups with compaction disabled which would prefer to loop
3549 * inside the allocator rather than hit the oom killer prematurely.
3550 * Let's give them a good hope and keep retrying while the order-0
3551 * watermarks are OK.
3552 */
3553 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
3554 ac->highest_zoneidx, ac->nodemask) {
3555 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3556 ac->highest_zoneidx, alloc_flags))
3557 return true;
3558 }
3559 return false;
3560 }
3561 #endif /* CONFIG_COMPACTION */
3562
3563 #ifdef CONFIG_LOCKDEP
3564 static struct lockdep_map __fs_reclaim_map =
3565 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
3566
__need_reclaim(gfp_t gfp_mask)3567 static bool __need_reclaim(gfp_t gfp_mask)
3568 {
3569 /* no reclaim without waiting on it */
3570 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
3571 return false;
3572
3573 /* this guy won't enter reclaim */
3574 if (current->flags & PF_MEMALLOC)
3575 return false;
3576
3577 if (gfp_mask & __GFP_NOLOCKDEP)
3578 return false;
3579
3580 return true;
3581 }
3582
__fs_reclaim_acquire(unsigned long ip)3583 void __fs_reclaim_acquire(unsigned long ip)
3584 {
3585 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip);
3586 }
3587
__fs_reclaim_release(unsigned long ip)3588 void __fs_reclaim_release(unsigned long ip)
3589 {
3590 lock_release(&__fs_reclaim_map, ip);
3591 }
3592
fs_reclaim_acquire(gfp_t gfp_mask)3593 void fs_reclaim_acquire(gfp_t gfp_mask)
3594 {
3595 gfp_mask = current_gfp_context(gfp_mask);
3596
3597 if (__need_reclaim(gfp_mask)) {
3598 if (gfp_mask & __GFP_FS)
3599 __fs_reclaim_acquire(_RET_IP_);
3600
3601 #ifdef CONFIG_MMU_NOTIFIER
3602 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
3603 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
3604 #endif
3605
3606 }
3607 }
3608 EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
3609
fs_reclaim_release(gfp_t gfp_mask)3610 void fs_reclaim_release(gfp_t gfp_mask)
3611 {
3612 gfp_mask = current_gfp_context(gfp_mask);
3613
3614 if (__need_reclaim(gfp_mask)) {
3615 if (gfp_mask & __GFP_FS)
3616 __fs_reclaim_release(_RET_IP_);
3617 }
3618 }
3619 EXPORT_SYMBOL_GPL(fs_reclaim_release);
3620 #endif
3621
3622 /*
3623 * Zonelists may change due to hotplug during allocation. Detect when zonelists
3624 * have been rebuilt so allocation retries. Reader side does not lock and
3625 * retries the allocation if zonelist changes. Writer side is protected by the
3626 * embedded spin_lock.
3627 */
3628 static DEFINE_SEQLOCK(zonelist_update_seq);
3629
zonelist_iter_begin(void)3630 static unsigned int zonelist_iter_begin(void)
3631 {
3632 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
3633 return read_seqbegin(&zonelist_update_seq);
3634
3635 return 0;
3636 }
3637
check_retry_zonelist(unsigned int seq)3638 static unsigned int check_retry_zonelist(unsigned int seq)
3639 {
3640 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
3641 return read_seqretry(&zonelist_update_seq, seq);
3642
3643 return seq;
3644 }
3645
3646 /* Perform direct synchronous page reclaim */
3647 static unsigned long
__perform_reclaim(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac)3648 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
3649 const struct alloc_context *ac)
3650 {
3651 unsigned int noreclaim_flag;
3652 unsigned long progress;
3653
3654 cond_resched();
3655
3656 /* We now go into synchronous reclaim */
3657 cpuset_memory_pressure_bump();
3658 fs_reclaim_acquire(gfp_mask);
3659 noreclaim_flag = memalloc_noreclaim_save();
3660
3661 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3662 ac->nodemask);
3663
3664 memalloc_noreclaim_restore(noreclaim_flag);
3665 fs_reclaim_release(gfp_mask);
3666
3667 cond_resched();
3668
3669 return progress;
3670 }
3671
3672 /* The really slow allocator path where we enter direct reclaim */
3673 static inline struct page *
__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, unsigned long *did_some_progress)3674 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
3675 unsigned int alloc_flags, const struct alloc_context *ac,
3676 unsigned long *did_some_progress)
3677 {
3678 struct page *page = NULL;
3679 unsigned long pflags;
3680 bool drained = false;
3681
3682 psi_memstall_enter(&pflags);
3683 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
3684 if (unlikely(!(*did_some_progress)))
3685 goto out;
3686
3687 retry:
3688 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3689
3690 /*
3691 * If an allocation failed after direct reclaim, it could be because
3692 * pages are pinned on the per-cpu lists or in high alloc reserves.
3693 * Shrink them and try again
3694 */
3695 if (!page && !drained) {
3696 unreserve_highatomic_pageblock(ac, false);
3697 drain_all_pages(NULL);
3698 drained = true;
3699 goto retry;
3700 }
3701 out:
3702 psi_memstall_leave(&pflags);
3703
3704 return page;
3705 }
3706
wake_all_kswapds(unsigned int order, gfp_t gfp_mask, const struct alloc_context *ac)3707 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
3708 const struct alloc_context *ac)
3709 {
3710 struct zoneref *z;
3711 struct zone *zone;
3712 pg_data_t *last_pgdat = NULL;
3713 enum zone_type highest_zoneidx = ac->highest_zoneidx;
3714
3715 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
3716 ac->nodemask) {
3717 if (!managed_zone(zone))
3718 continue;
3719 if (last_pgdat != zone->zone_pgdat) {
3720 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
3721 last_pgdat = zone->zone_pgdat;
3722 }
3723 }
3724 }
3725
3726 static inline unsigned int
gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)3727 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
3728 {
3729 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
3730
3731 /*
3732 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE
3733 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3734 * to save two branches.
3735 */
3736 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE);
3737 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
3738
3739 /*
3740 * The caller may dip into page reserves a bit more if the caller
3741 * cannot run direct reclaim, or if the caller has realtime scheduling
3742 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
3743 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH).
3744 */
3745 alloc_flags |= (__force int)
3746 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
3747
3748 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
3749 /*
3750 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
3751 * if it can't schedule.
3752 */
3753 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
3754 alloc_flags |= ALLOC_NON_BLOCK;
3755
3756 if (order > 0)
3757 alloc_flags |= ALLOC_HIGHATOMIC;
3758 }
3759
3760 /*
3761 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably
3762 * GFP_ATOMIC) rather than fail, see the comment for
3763 * cpuset_node_allowed().
3764 */
3765 if (alloc_flags & ALLOC_MIN_RESERVE)
3766 alloc_flags &= ~ALLOC_CPUSET;
3767 } else if (unlikely(rt_task(current)) && in_task())
3768 alloc_flags |= ALLOC_MIN_RESERVE;
3769
3770 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
3771
3772 return alloc_flags;
3773 }
3774
oom_reserves_allowed(struct task_struct *tsk)3775 static bool oom_reserves_allowed(struct task_struct *tsk)
3776 {
3777 if (!tsk_is_oom_victim(tsk))
3778 return false;
3779
3780 /*
3781 * !MMU doesn't have oom reaper so give access to memory reserves
3782 * only to the thread with TIF_MEMDIE set
3783 */
3784 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
3785 return false;
3786
3787 return true;
3788 }
3789
3790 /*
3791 * Distinguish requests which really need access to full memory
3792 * reserves from oom victims which can live with a portion of it
3793 */
__gfp_pfmemalloc_flags(gfp_t gfp_mask)3794 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
3795 {
3796 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
3797 return 0;
3798 if (gfp_mask & __GFP_MEMALLOC)
3799 return ALLOC_NO_WATERMARKS;
3800 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
3801 return ALLOC_NO_WATERMARKS;
3802 if (!in_interrupt()) {
3803 if (current->flags & PF_MEMALLOC)
3804 return ALLOC_NO_WATERMARKS;
3805 else if (oom_reserves_allowed(current))
3806 return ALLOC_OOM;
3807 }
3808
3809 return 0;
3810 }
3811
gfp_pfmemalloc_allowed(gfp_t gfp_mask)3812 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3813 {
3814 return !!__gfp_pfmemalloc_flags(gfp_mask);
3815 }
3816
3817 /*
3818 * Checks whether it makes sense to retry the reclaim to make a forward progress
3819 * for the given allocation request.
3820 *
3821 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
3822 * without success, or when we couldn't even meet the watermark if we
3823 * reclaimed all remaining pages on the LRU lists.
3824 *
3825 * Returns true if a retry is viable or false to enter the oom path.
3826 */
3827 static inline bool
should_reclaim_retry(gfp_t gfp_mask, unsigned order, struct alloc_context *ac, int alloc_flags, bool did_some_progress, int *no_progress_loops)3828 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3829 struct alloc_context *ac, int alloc_flags,
3830 bool did_some_progress, int *no_progress_loops)
3831 {
3832 struct zone *zone;
3833 struct zoneref *z;
3834 bool ret = false;
3835
3836 /*
3837 * Costly allocations might have made a progress but this doesn't mean
3838 * their order will become available due to high fragmentation so
3839 * always increment the no progress counter for them
3840 */
3841 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
3842 *no_progress_loops = 0;
3843 else
3844 (*no_progress_loops)++;
3845
3846 if (*no_progress_loops > MAX_RECLAIM_RETRIES)
3847 goto out;
3848
3849
3850 /*
3851 * Keep reclaiming pages while there is a chance this will lead
3852 * somewhere. If none of the target zones can satisfy our allocation
3853 * request even if all reclaimable pages are considered then we are
3854 * screwed and have to go OOM.
3855 */
3856 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
3857 ac->highest_zoneidx, ac->nodemask) {
3858 unsigned long available;
3859 unsigned long reclaimable;
3860 unsigned long min_wmark = min_wmark_pages(zone);
3861 bool wmark;
3862
3863 available = reclaimable = zone_reclaimable_pages(zone);
3864 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
3865
3866 /*
3867 * Would the allocation succeed if we reclaimed all
3868 * reclaimable pages?
3869 */
3870 wmark = __zone_watermark_ok(zone, order, min_wmark,
3871 ac->highest_zoneidx, alloc_flags, available);
3872 trace_reclaim_retry_zone(z, order, reclaimable,
3873 available, min_wmark, *no_progress_loops, wmark);
3874 if (wmark) {
3875 ret = true;
3876 break;
3877 }
3878 }
3879
3880 /*
3881 * Memory allocation/reclaim might be called from a WQ context and the
3882 * current implementation of the WQ concurrency control doesn't
3883 * recognize that a particular WQ is congested if the worker thread is
3884 * looping without ever sleeping. Therefore we have to do a short sleep
3885 * here rather than calling cond_resched().
3886 */
3887 if (current->flags & PF_WQ_WORKER)
3888 schedule_timeout_uninterruptible(1);
3889 else
3890 cond_resched();
3891 out:
3892 /* Before OOM, exhaust highatomic_reserve */
3893 if (!ret)
3894 return unreserve_highatomic_pageblock(ac, true);
3895
3896 return ret;
3897 }
3898
3899 static inline bool
check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)3900 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
3901 {
3902 /*
3903 * It's possible that cpuset's mems_allowed and the nodemask from
3904 * mempolicy don't intersect. This should be normally dealt with by
3905 * policy_nodemask(), but it's possible to race with cpuset update in
3906 * such a way the check therein was true, and then it became false
3907 * before we got our cpuset_mems_cookie here.
3908 * This assumes that for all allocations, ac->nodemask can come only
3909 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
3910 * when it does not intersect with the cpuset restrictions) or the
3911 * caller can deal with a violated nodemask.
3912 */
3913 if (cpusets_enabled() && ac->nodemask &&
3914 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
3915 ac->nodemask = NULL;
3916 return true;
3917 }
3918
3919 /*
3920 * When updating a task's mems_allowed or mempolicy nodemask, it is
3921 * possible to race with parallel threads in such a way that our
3922 * allocation can fail while the mask is being updated. If we are about
3923 * to fail, check if the cpuset changed during allocation and if so,
3924 * retry.
3925 */
3926 if (read_mems_allowed_retry(cpuset_mems_cookie))
3927 return true;
3928
3929 return false;
3930 }
3931
3932 static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct alloc_context *ac)3933 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3934 struct alloc_context *ac)
3935 {
3936 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
3937 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
3938 struct page *page = NULL;
3939 unsigned int alloc_flags;
3940 unsigned long did_some_progress;
3941 enum compact_priority compact_priority;
3942 enum compact_result compact_result;
3943 int compaction_retries;
3944 int no_progress_loops;
3945 unsigned int cpuset_mems_cookie;
3946 unsigned int zonelist_iter_cookie;
3947 int reserve_flags;
3948
3949 restart:
3950 compaction_retries = 0;
3951 no_progress_loops = 0;
3952 compact_priority = DEF_COMPACT_PRIORITY;
3953 cpuset_mems_cookie = read_mems_allowed_begin();
3954 zonelist_iter_cookie = zonelist_iter_begin();
3955
3956 /*
3957 * The fast path uses conservative alloc_flags to succeed only until
3958 * kswapd needs to be woken up, and to avoid the cost of setting up
3959 * alloc_flags precisely. So we do that now.
3960 */
3961 alloc_flags = gfp_to_alloc_flags(gfp_mask, order);
3962
3963 /*
3964 * We need to recalculate the starting point for the zonelist iterator
3965 * because we might have used different nodemask in the fast path, or
3966 * there was a cpuset modification and we are retrying - otherwise we
3967 * could end up iterating over non-eligible zones endlessly.
3968 */
3969 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3970 ac->highest_zoneidx, ac->nodemask);
3971 if (!ac->preferred_zoneref->zone)
3972 goto nopage;
3973
3974 /*
3975 * Check for insane configurations where the cpuset doesn't contain
3976 * any suitable zone to satisfy the request - e.g. non-movable
3977 * GFP_HIGHUSER allocations from MOVABLE nodes only.
3978 */
3979 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
3980 struct zoneref *z = first_zones_zonelist(ac->zonelist,
3981 ac->highest_zoneidx,
3982 &cpuset_current_mems_allowed);
3983 if (!z->zone)
3984 goto nopage;
3985 }
3986
3987 if (alloc_flags & ALLOC_KSWAPD)
3988 wake_all_kswapds(order, gfp_mask, ac);
3989
3990 /*
3991 * The adjusted alloc_flags might result in immediate success, so try
3992 * that first
3993 */
3994 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3995 if (page)
3996 goto got_pg;
3997
3998 /*
3999 * For costly allocations, try direct compaction first, as it's likely
4000 * that we have enough base pages and don't need to reclaim. For non-
4001 * movable high-order allocations, do that as well, as compaction will
4002 * try prevent permanent fragmentation by migrating from blocks of the
4003 * same migratetype.
4004 * Don't try this for allocations that are allowed to ignore
4005 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
4006 */
4007 if (can_direct_reclaim &&
4008 (costly_order ||
4009 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4010 && !gfp_pfmemalloc_allowed(gfp_mask)) {
4011 page = __alloc_pages_direct_compact(gfp_mask, order,
4012 alloc_flags, ac,
4013 INIT_COMPACT_PRIORITY,
4014 &compact_result);
4015 if (page)
4016 goto got_pg;
4017
4018 /*
4019 * Checks for costly allocations with __GFP_NORETRY, which
4020 * includes some THP page fault allocations
4021 */
4022 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4023 /*
4024 * If allocating entire pageblock(s) and compaction
4025 * failed because all zones are below low watermarks
4026 * or is prohibited because it recently failed at this
4027 * order, fail immediately unless the allocator has
4028 * requested compaction and reclaim retry.
4029 *
4030 * Reclaim is
4031 * - potentially very expensive because zones are far
4032 * below their low watermarks or this is part of very
4033 * bursty high order allocations,
4034 * - not guaranteed to help because isolate_freepages()
4035 * may not iterate over freed pages as part of its
4036 * linear scan, and
4037 * - unlikely to make entire pageblocks free on its
4038 * own.
4039 */
4040 if (compact_result == COMPACT_SKIPPED ||
4041 compact_result == COMPACT_DEFERRED)
4042 goto nopage;
4043
4044 /*
4045 * Looks like reclaim/compaction is worth trying, but
4046 * sync compaction could be very expensive, so keep
4047 * using async compaction.
4048 */
4049 compact_priority = INIT_COMPACT_PRIORITY;
4050 }
4051 }
4052
4053 retry:
4054 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4055 if (alloc_flags & ALLOC_KSWAPD)
4056 wake_all_kswapds(order, gfp_mask, ac);
4057
4058 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4059 if (reserve_flags)
4060 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
4061 (alloc_flags & ALLOC_KSWAPD);
4062
4063 /*
4064 * Reset the nodemask and zonelist iterators if memory policies can be
4065 * ignored. These allocations are high priority and system rather than
4066 * user oriented.
4067 */
4068 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
4069 ac->nodemask = NULL;
4070 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4071 ac->highest_zoneidx, ac->nodemask);
4072 }
4073
4074 /* Attempt with potentially adjusted zonelist and alloc_flags */
4075 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4076 if (page)
4077 goto got_pg;
4078
4079 /* Caller is not willing to reclaim, we can't balance anything */
4080 if (!can_direct_reclaim)
4081 goto nopage;
4082
4083 /* Avoid recursion of direct reclaim */
4084 if (current->flags & PF_MEMALLOC)
4085 goto nopage;
4086
4087 /* Try direct reclaim and then allocating */
4088 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4089 &did_some_progress);
4090 if (page)
4091 goto got_pg;
4092
4093 /* Try direct compaction and then allocating */
4094 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4095 compact_priority, &compact_result);
4096 if (page)
4097 goto got_pg;
4098
4099 /* Do not loop if specifically requested */
4100 if (gfp_mask & __GFP_NORETRY)
4101 goto nopage;
4102
4103 /*
4104 * Do not retry costly high order allocations unless they are
4105 * __GFP_RETRY_MAYFAIL
4106 */
4107 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
4108 goto nopage;
4109
4110 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4111 did_some_progress > 0, &no_progress_loops))
4112 goto retry;
4113
4114 /*
4115 * It doesn't make any sense to retry for the compaction if the order-0
4116 * reclaim is not able to make any progress because the current
4117 * implementation of the compaction depends on the sufficient amount
4118 * of free memory (see __compaction_suitable)
4119 */
4120 if (did_some_progress > 0 &&
4121 should_compact_retry(ac, order, alloc_flags,
4122 compact_result, &compact_priority,
4123 &compaction_retries))
4124 goto retry;
4125
4126
4127 /*
4128 * Deal with possible cpuset update races or zonelist updates to avoid
4129 * a unnecessary OOM kill.
4130 */
4131 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4132 check_retry_zonelist(zonelist_iter_cookie))
4133 goto restart;
4134
4135 /* Reclaim has failed us, start killing things */
4136 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4137 if (page)
4138 goto got_pg;
4139
4140 /* Avoid allocations with no watermarks from looping endlessly */
4141 if (tsk_is_oom_victim(current) &&
4142 (alloc_flags & ALLOC_OOM ||
4143 (gfp_mask & __GFP_NOMEMALLOC)))
4144 goto nopage;
4145
4146 /* Retry as long as the OOM killer is making progress */
4147 if (did_some_progress) {
4148 no_progress_loops = 0;
4149 goto retry;
4150 }
4151
4152 nopage:
4153 /*
4154 * Deal with possible cpuset update races or zonelist updates to avoid
4155 * a unnecessary OOM kill.
4156 */
4157 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4158 check_retry_zonelist(zonelist_iter_cookie))
4159 goto restart;
4160
4161 /*
4162 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4163 * we always retry
4164 */
4165 if (gfp_mask & __GFP_NOFAIL) {
4166 /*
4167 * All existing users of the __GFP_NOFAIL are blockable, so warn
4168 * of any new users that actually require GFP_NOWAIT
4169 */
4170 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
4171 goto fail;
4172
4173 /*
4174 * PF_MEMALLOC request from this context is rather bizarre
4175 * because we cannot reclaim anything and only can loop waiting
4176 * for somebody to do a work for us
4177 */
4178 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
4179
4180 /*
4181 * non failing costly orders are a hard requirement which we
4182 * are not prepared for much so let's warn about these users
4183 * so that we can identify them and convert them to something
4184 * else.
4185 */
4186 WARN_ON_ONCE_GFP(costly_order, gfp_mask);
4187
4188 /*
4189 * Help non-failing allocations by giving some access to memory
4190 * reserves normally used for high priority non-blocking
4191 * allocations but do not use ALLOC_NO_WATERMARKS because this
4192 * could deplete whole memory reserves which would just make
4193 * the situation worse.
4194 */
4195 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac);
4196 if (page)
4197 goto got_pg;
4198
4199 cond_resched();
4200 goto retry;
4201 }
4202 fail:
4203 warn_alloc(gfp_mask, ac->nodemask,
4204 "page allocation failure: order:%u", order);
4205 got_pg:
4206 return page;
4207 }
4208
prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid, nodemask_t *nodemask, struct alloc_context *ac, gfp_t *alloc_gfp, unsigned int *alloc_flags)4209 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4210 int preferred_nid, nodemask_t *nodemask,
4211 struct alloc_context *ac, gfp_t *alloc_gfp,
4212 unsigned int *alloc_flags)
4213 {
4214 ac->highest_zoneidx = gfp_zone(gfp_mask);
4215 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4216 ac->nodemask = nodemask;
4217 ac->migratetype = gfp_migratetype(gfp_mask);
4218
4219 if (cpusets_enabled()) {
4220 *alloc_gfp |= __GFP_HARDWALL;
4221 /*
4222 * When we are in the interrupt context, it is irrelevant
4223 * to the current task context. It means that any node ok.
4224 */
4225 if (in_task() && !ac->nodemask)
4226 ac->nodemask = &cpuset_current_mems_allowed;
4227 else
4228 *alloc_flags |= ALLOC_CPUSET;
4229 }
4230
4231 might_alloc(gfp_mask);
4232
4233 #ifdef CONFIG_HYPERHOLD_ZSWAPD
4234 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
4235 wake_all_zswapd();
4236 #endif
4237
4238 if (should_fail_alloc_page(gfp_mask, order))
4239 return false;
4240
4241 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
4242
4243 /* Dirty zone balancing only done in the fast path */
4244 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4245
4246 /*
4247 * The preferred zone is used for statistics but crucially it is
4248 * also used as the starting point for the zonelist iterator. It
4249 * may get reset for allocations that ignore memory policies.
4250 */
4251 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4252 ac->highest_zoneidx, ac->nodemask);
4253
4254 return true;
4255 }
4256
4257 /*
4258 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
4259 * @gfp: GFP flags for the allocation
4260 * @preferred_nid: The preferred NUMA node ID to allocate from
4261 * @nodemask: Set of nodes to allocate from, may be NULL
4262 * @nr_pages: The number of pages desired on the list or array
4263 * @page_list: Optional list to store the allocated pages
4264 * @page_array: Optional array to store the pages
4265 *
4266 * This is a batched version of the page allocator that attempts to
4267 * allocate nr_pages quickly. Pages are added to page_list if page_list
4268 * is not NULL, otherwise it is assumed that the page_array is valid.
4269 *
4270 * For lists, nr_pages is the number of pages that should be allocated.
4271 *
4272 * For arrays, only NULL elements are populated with pages and nr_pages
4273 * is the maximum number of pages that will be stored in the array.
4274 *
4275 * Returns the number of pages on the list or array.
4276 */
__alloc_pages_bulk(gfp_t gfp, int preferred_nid, nodemask_t *nodemask, int nr_pages, struct list_head *page_list, struct page **page_array)4277 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
4278 nodemask_t *nodemask, int nr_pages,
4279 struct list_head *page_list,
4280 struct page **page_array)
4281 {
4282 struct page *page;
4283 unsigned long __maybe_unused UP_flags;
4284 struct zone *zone;
4285 struct zoneref *z;
4286 struct per_cpu_pages *pcp;
4287 struct list_head *pcp_list;
4288 struct alloc_context ac;
4289 gfp_t alloc_gfp;
4290 unsigned int alloc_flags = ALLOC_WMARK_LOW;
4291 int nr_populated = 0, nr_account = 0;
4292
4293 /*
4294 * Skip populated array elements to determine if any pages need
4295 * to be allocated before disabling IRQs.
4296 */
4297 while (page_array && nr_populated < nr_pages && page_array[nr_populated])
4298 nr_populated++;
4299
4300 /* No pages requested? */
4301 if (unlikely(nr_pages <= 0))
4302 goto out;
4303
4304 /* Already populated array? */
4305 if (unlikely(page_array && nr_pages - nr_populated == 0))
4306 goto out;
4307
4308 /* Bulk allocator does not support memcg accounting. */
4309 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT))
4310 goto failed;
4311
4312 /* Use the single page allocator for one page. */
4313 if (nr_pages - nr_populated == 1)
4314 goto failed;
4315
4316 #ifdef CONFIG_PAGE_OWNER
4317 /*
4318 * PAGE_OWNER may recurse into the allocator to allocate space to
4319 * save the stack with pagesets.lock held. Releasing/reacquiring
4320 * removes much of the performance benefit of bulk allocation so
4321 * force the caller to allocate one page at a time as it'll have
4322 * similar performance to added complexity to the bulk allocator.
4323 */
4324 if (static_branch_unlikely(&page_owner_inited))
4325 goto failed;
4326 #endif
4327
4328 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
4329 gfp &= gfp_allowed_mask;
4330 alloc_gfp = gfp;
4331 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
4332 goto out;
4333 gfp = alloc_gfp;
4334
4335 /* Find an allowed local zone that meets the low watermark. */
4336 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) {
4337 unsigned long mark;
4338
4339 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
4340 !__cpuset_zone_allowed(zone, gfp)) {
4341 continue;
4342 }
4343
4344 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
4345 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
4346 goto failed;
4347 }
4348
4349 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
4350 if (zone_watermark_fast(zone, 0, mark,
4351 zonelist_zone_idx(ac.preferred_zoneref),
4352 alloc_flags, gfp)) {
4353 break;
4354 }
4355 }
4356
4357 /*
4358 * If there are no allowed local zones that meets the watermarks then
4359 * try to allocate a single page and reclaim if necessary.
4360 */
4361 if (unlikely(!zone))
4362 goto failed;
4363
4364 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
4365 pcp_trylock_prepare(UP_flags);
4366 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
4367 if (!pcp)
4368 goto failed_irq;
4369
4370 /* Attempt the batch allocation */
4371 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
4372 while (nr_populated < nr_pages) {
4373
4374 /* Skip existing pages */
4375 if (page_array && page_array[nr_populated]) {
4376 nr_populated++;
4377 continue;
4378 }
4379
4380 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
4381 pcp, pcp_list);
4382 if (unlikely(!page)) {
4383 /* Try and allocate at least one page */
4384 if (!nr_account) {
4385 pcp_spin_unlock(pcp);
4386 goto failed_irq;
4387 }
4388 break;
4389 }
4390 nr_account++;
4391
4392 prep_new_page(page, 0, gfp, 0);
4393 if (page_list)
4394 list_add(&page->lru, page_list);
4395 else
4396 page_array[nr_populated] = page;
4397 nr_populated++;
4398 }
4399
4400 pcp_spin_unlock(pcp);
4401 pcp_trylock_finish(UP_flags);
4402
4403 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
4404 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
4405
4406 out:
4407 return nr_populated;
4408
4409 failed_irq:
4410 pcp_trylock_finish(UP_flags);
4411
4412 failed:
4413 page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
4414 if (page) {
4415 if (page_list)
4416 list_add(&page->lru, page_list);
4417 else
4418 page_array[nr_populated] = page;
4419 nr_populated++;
4420 }
4421
4422 goto out;
4423 }
4424 EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
4425
4426 /*
4427 * This is the 'heart' of the zoned buddy allocator.
4428 */
__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask)4429 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
4430 nodemask_t *nodemask)
4431 {
4432 struct page *page;
4433 unsigned int alloc_flags = ALLOC_WMARK_LOW;
4434 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
4435 struct alloc_context ac = { };
4436
4437 /*
4438 * There are several places where we assume that the order value is sane
4439 * so bail out early if the request is out of bound.
4440 */
4441 if (WARN_ON_ONCE_GFP(order > MAX_ORDER, gfp))
4442 return NULL;
4443
4444 gfp &= gfp_allowed_mask;
4445 /*
4446 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
4447 * resp. GFP_NOIO which has to be inherited for all allocation requests
4448 * from a particular context which has been marked by
4449 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
4450 * movable zones are not used during allocation.
4451 */
4452 gfp = current_gfp_context(gfp);
4453 alloc_gfp = gfp;
4454 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
4455 &alloc_gfp, &alloc_flags))
4456 return NULL;
4457
4458 /*
4459 * Forbid the first pass from falling back to types that fragment
4460 * memory until all local zones are considered.
4461 */
4462 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
4463
4464 /* First allocation attempt */
4465 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
4466 if (likely(page))
4467 goto out;
4468
4469 alloc_gfp = gfp;
4470 ac.spread_dirty_pages = false;
4471
4472 /*
4473 * Restore the original nodemask if it was potentially replaced with
4474 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
4475 */
4476 ac.nodemask = nodemask;
4477
4478 page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
4479
4480 out:
4481 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page &&
4482 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
4483 __free_pages(page, order);
4484 page = NULL;
4485 }
4486
4487 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
4488 kmsan_alloc_page(page, order, alloc_gfp);
4489
4490 return page;
4491 }
4492 EXPORT_SYMBOL(__alloc_pages);
4493
__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask)4494 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
4495 nodemask_t *nodemask)
4496 {
4497 struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
4498 preferred_nid, nodemask);
4499 struct folio *folio = (struct folio *)page;
4500
4501 if (folio && order > 1)
4502 folio_prep_large_rmappable(folio);
4503 return folio;
4504 }
4505 EXPORT_SYMBOL(__folio_alloc);
4506
4507 /*
4508 * Common helper functions. Never use with __GFP_HIGHMEM because the returned
4509 * address cannot represent highmem pages. Use alloc_pages and then kmap if
4510 * you need to access high mem.
4511 */
__get_free_pages(gfp_t gfp_mask, unsigned int order)4512 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
4513 {
4514 struct page *page;
4515
4516 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
4517 if (!page)
4518 return 0;
4519 return (unsigned long) page_address(page);
4520 }
4521 EXPORT_SYMBOL(__get_free_pages);
4522
get_zeroed_page(gfp_t gfp_mask)4523 unsigned long get_zeroed_page(gfp_t gfp_mask)
4524 {
4525 return __get_free_page(gfp_mask | __GFP_ZERO);
4526 }
4527 EXPORT_SYMBOL(get_zeroed_page);
4528
4529 /**
4530 * __free_pages - Free pages allocated with alloc_pages().
4531 * @page: The page pointer returned from alloc_pages().
4532 * @order: The order of the allocation.
4533 *
4534 * This function can free multi-page allocations that are not compound
4535 * pages. It does not check that the @order passed in matches that of
4536 * the allocation, so it is easy to leak memory. Freeing more memory
4537 * than was allocated will probably emit a warning.
4538 *
4539 * If the last reference to this page is speculative, it will be released
4540 * by put_page() which only frees the first page of a non-compound
4541 * allocation. To prevent the remaining pages from being leaked, we free
4542 * the subsequent pages here. If you want to use the page's reference
4543 * count to decide when to free the allocation, you should allocate a
4544 * compound page, and use put_page() instead of __free_pages().
4545 *
4546 * Context: May be called in interrupt context or while holding a normal
4547 * spinlock, but not in NMI context or while holding a raw spinlock.
4548 */
__free_pages(struct page *page, unsigned int order)4549 void __free_pages(struct page *page, unsigned int order)
4550 {
4551 /* get PageHead before we drop reference */
4552 int head = PageHead(page);
4553
4554 if (put_page_testzero(page))
4555 free_the_page(page, order);
4556 else if (!head)
4557 while (order-- > 0)
4558 free_the_page(page + (1 << order), order);
4559 }
4560 EXPORT_SYMBOL(__free_pages);
4561
free_pages(unsigned long addr, unsigned int order)4562 void free_pages(unsigned long addr, unsigned int order)
4563 {
4564 if (addr != 0) {
4565 VM_BUG_ON(!virt_addr_valid((void *)addr));
4566 __free_pages(virt_to_page((void *)addr), order);
4567 }
4568 }
4569
4570 EXPORT_SYMBOL(free_pages);
4571
4572 /*
4573 * Page Fragment:
4574 * An arbitrary-length arbitrary-offset area of memory which resides
4575 * within a 0 or higher order page. Multiple fragments within that page
4576 * are individually refcounted, in the page's reference counter.
4577 *
4578 * The page_frag functions below provide a simple allocation framework for
4579 * page fragments. This is used by the network stack and network device
4580 * drivers to provide a backing region of memory for use as either an
4581 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
4582 */
__page_frag_cache_refill(struct page_frag_cache *nc, gfp_t gfp_mask)4583 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
4584 gfp_t gfp_mask)
4585 {
4586 struct page *page = NULL;
4587 gfp_t gfp = gfp_mask;
4588
4589 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4590 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
4591 __GFP_NOMEMALLOC;
4592 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4593 PAGE_FRAG_CACHE_MAX_ORDER);
4594 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
4595 #endif
4596 if (unlikely(!page))
4597 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
4598
4599 nc->va = page ? page_address(page) : NULL;
4600
4601 return page;
4602 }
4603
__page_frag_cache_drain(struct page *page, unsigned int count)4604 void __page_frag_cache_drain(struct page *page, unsigned int count)
4605 {
4606 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
4607
4608 if (page_ref_sub_and_test(page, count))
4609 free_the_page(page, compound_order(page));
4610 }
4611 EXPORT_SYMBOL(__page_frag_cache_drain);
4612
page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask, unsigned int align_mask)4613 void *page_frag_alloc_align(struct page_frag_cache *nc,
4614 unsigned int fragsz, gfp_t gfp_mask,
4615 unsigned int align_mask)
4616 {
4617 unsigned int size = PAGE_SIZE;
4618 struct page *page;
4619 int offset;
4620
4621 if (unlikely(!nc->va)) {
4622 refill:
4623 page = __page_frag_cache_refill(nc, gfp_mask);
4624 if (!page)
4625 return NULL;
4626
4627 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4628 /* if size can vary use size else just use PAGE_SIZE */
4629 size = nc->size;
4630 #endif
4631 /* Even if we own the page, we do not use atomic_set().
4632 * This would break get_page_unless_zero() users.
4633 */
4634 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
4635
4636 /* reset page count bias and offset to start of new frag */
4637 nc->pfmemalloc = page_is_pfmemalloc(page);
4638 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
4639 nc->offset = size;
4640 }
4641
4642 offset = nc->offset - fragsz;
4643 if (unlikely(offset < 0)) {
4644 page = virt_to_page(nc->va);
4645
4646 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
4647 goto refill;
4648
4649 if (unlikely(nc->pfmemalloc)) {
4650 free_the_page(page, compound_order(page));
4651 goto refill;
4652 }
4653
4654 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4655 /* if size can vary use size else just use PAGE_SIZE */
4656 size = nc->size;
4657 #endif
4658 /* OK, page count is 0, we can safely set it */
4659 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
4660
4661 /* reset page count bias and offset to start of new frag */
4662 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
4663 offset = size - fragsz;
4664 if (unlikely(offset < 0)) {
4665 /*
4666 * The caller is trying to allocate a fragment
4667 * with fragsz > PAGE_SIZE but the cache isn't big
4668 * enough to satisfy the request, this may
4669 * happen in low memory conditions.
4670 * We don't release the cache page because
4671 * it could make memory pressure worse
4672 * so we simply return NULL here.
4673 */
4674 return NULL;
4675 }
4676 }
4677
4678 nc->pagecnt_bias--;
4679 offset &= align_mask;
4680 nc->offset = offset;
4681
4682 return nc->va + offset;
4683 }
4684 EXPORT_SYMBOL(page_frag_alloc_align);
4685
4686 /*
4687 * Frees a page fragment allocated out of either a compound or order 0 page.
4688 */
page_frag_free(void *addr)4689 void page_frag_free(void *addr)
4690 {
4691 struct page *page = virt_to_head_page(addr);
4692
4693 if (unlikely(put_page_testzero(page)))
4694 free_the_page(page, compound_order(page));
4695 }
4696 EXPORT_SYMBOL(page_frag_free);
4697
make_alloc_exact(unsigned long addr, unsigned int order, size_t size)4698 static void *make_alloc_exact(unsigned long addr, unsigned int order,
4699 size_t size)
4700 {
4701 if (addr) {
4702 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE);
4703 struct page *page = virt_to_page((void *)addr);
4704 struct page *last = page + nr;
4705
4706 split_page_owner(page, 1 << order);
4707 split_page_memcg(page, 1 << order);
4708 while (page < --last)
4709 set_page_refcounted(last);
4710
4711 last = page + (1UL << order);
4712 for (page += nr; page < last; page++)
4713 __free_pages_ok(page, 0, FPI_TO_TAIL);
4714 }
4715 return (void *)addr;
4716 }
4717
4718 /**
4719 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
4720 * @size: the number of bytes to allocate
4721 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
4722 *
4723 * This function is similar to alloc_pages(), except that it allocates the
4724 * minimum number of pages to satisfy the request. alloc_pages() can only
4725 * allocate memory in power-of-two pages.
4726 *
4727 * This function is also limited by MAX_ORDER.
4728 *
4729 * Memory allocated by this function must be released by free_pages_exact().
4730 *
4731 * Return: pointer to the allocated area or %NULL in case of error.
4732 */
alloc_pages_exact(size_t size, gfp_t gfp_mask)4733 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
4734 {
4735 unsigned int order = get_order(size);
4736 unsigned long addr;
4737
4738 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
4739 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
4740
4741 addr = __get_free_pages(gfp_mask, order);
4742 return make_alloc_exact(addr, order, size);
4743 }
4744 EXPORT_SYMBOL(alloc_pages_exact);
4745
4746 /**
4747 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
4748 * pages on a node.
4749 * @nid: the preferred node ID where memory should be allocated
4750 * @size: the number of bytes to allocate
4751 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
4752 *
4753 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
4754 * back.
4755 *
4756 * Return: pointer to the allocated area or %NULL in case of error.
4757 */
alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)4758 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
4759 {
4760 unsigned int order = get_order(size);
4761 struct page *p;
4762
4763 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
4764 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
4765
4766 p = alloc_pages_node(nid, gfp_mask, order);
4767 if (!p)
4768 return NULL;
4769 return make_alloc_exact((unsigned long)page_address(p), order, size);
4770 }
4771
4772 /**
4773 * free_pages_exact - release memory allocated via alloc_pages_exact()
4774 * @virt: the value returned by alloc_pages_exact.
4775 * @size: size of allocation, same value as passed to alloc_pages_exact().
4776 *
4777 * Release the memory allocated by a previous call to alloc_pages_exact.
4778 */
free_pages_exact(void *virt, size_t size)4779 void free_pages_exact(void *virt, size_t size)
4780 {
4781 unsigned long addr = (unsigned long)virt;
4782 unsigned long end = addr + PAGE_ALIGN(size);
4783
4784 while (addr < end) {
4785 free_page(addr);
4786 addr += PAGE_SIZE;
4787 }
4788 }
4789 EXPORT_SYMBOL(free_pages_exact);
4790
4791 /**
4792 * nr_free_zone_pages - count number of pages beyond high watermark
4793 * @offset: The zone index of the highest zone
4794 *
4795 * nr_free_zone_pages() counts the number of pages which are beyond the
4796 * high watermark within all zones at or below a given zone index. For each
4797 * zone, the number of pages is calculated as:
4798 *
4799 * nr_free_zone_pages = managed_pages - high_pages
4800 *
4801 * Return: number of pages beyond high watermark.
4802 */
nr_free_zone_pages(int offset)4803 static unsigned long nr_free_zone_pages(int offset)
4804 {
4805 struct zoneref *z;
4806 struct zone *zone;
4807
4808 /* Just pick one node, since fallback list is circular */
4809 unsigned long sum = 0;
4810
4811 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
4812
4813 for_each_zone_zonelist(zone, z, zonelist, offset) {
4814 unsigned long size = zone_managed_pages(zone);
4815 unsigned long high = high_wmark_pages(zone);
4816 if (size > high)
4817 sum += size - high;
4818 }
4819
4820 return sum;
4821 }
4822
4823 /**
4824 * nr_free_buffer_pages - count number of pages beyond high watermark
4825 *
4826 * nr_free_buffer_pages() counts the number of pages which are beyond the high
4827 * watermark within ZONE_DMA and ZONE_NORMAL.
4828 *
4829 * Return: number of pages beyond high watermark within ZONE_DMA and
4830 * ZONE_NORMAL.
4831 */
nr_free_buffer_pages(void)4832 unsigned long nr_free_buffer_pages(void)
4833 {
4834 return nr_free_zone_pages(gfp_zone(GFP_USER));
4835 }
4836 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
4837
zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)4838 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
4839 {
4840 zoneref->zone = zone;
4841 zoneref->zone_idx = zone_idx(zone);
4842 }
4843
4844 /*
4845 * Builds allocation fallback zone lists.
4846 *
4847 * Add all populated zones of a node to the zonelist.
4848 */
build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)4849 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
4850 {
4851 struct zone *zone;
4852 enum zone_type zone_type = MAX_NR_ZONES;
4853 int nr_zones = 0;
4854
4855 do {
4856 zone_type--;
4857 zone = pgdat->node_zones + zone_type;
4858 if (populated_zone(zone)) {
4859 zoneref_set_zone(zone, &zonerefs[nr_zones++]);
4860 check_highest_zone(zone_type);
4861 }
4862 } while (zone_type);
4863
4864 return nr_zones;
4865 }
4866
4867 #ifdef CONFIG_NUMA
4868
__parse_numa_zonelist_order(char *s)4869 static int __parse_numa_zonelist_order(char *s)
4870 {
4871 /*
4872 * We used to support different zonelists modes but they turned
4873 * out to be just not useful. Let's keep the warning in place
4874 * if somebody still use the cmd line parameter so that we do
4875 * not fail it silently
4876 */
4877 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
4878 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s);
4879 return -EINVAL;
4880 }
4881 return 0;
4882 }
4883
4884 static char numa_zonelist_order[] = "Node";
4885 #define NUMA_ZONELIST_ORDER_LEN 16
4886 /*
4887 * sysctl handler for numa_zonelist_order
4888 */
numa_zonelist_order_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos)4889 static int numa_zonelist_order_handler(struct ctl_table *table, int write,
4890 void *buffer, size_t *length, loff_t *ppos)
4891 {
4892 if (write)
4893 return __parse_numa_zonelist_order(buffer);
4894 return proc_dostring(table, write, buffer, length, ppos);
4895 }
4896
4897 static int node_load[MAX_NUMNODES];
4898
4899 /**
4900 * find_next_best_node - find the next node that should appear in a given node's fallback list
4901 * @node: node whose fallback list we're appending
4902 * @used_node_mask: nodemask_t of already used nodes
4903 *
4904 * We use a number of factors to determine which is the next node that should
4905 * appear on a given node's fallback list. The node should not have appeared
4906 * already in @node's fallback list, and it should be the next closest node
4907 * according to the distance array (which contains arbitrary distance values
4908 * from each node to each node in the system), and should also prefer nodes
4909 * with no CPUs, since presumably they'll have very little allocation pressure
4910 * on them otherwise.
4911 *
4912 * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
4913 */
find_next_best_node(int node, nodemask_t *used_node_mask)4914 int find_next_best_node(int node, nodemask_t *used_node_mask)
4915 {
4916 int n, val;
4917 int min_val = INT_MAX;
4918 int best_node = NUMA_NO_NODE;
4919
4920 /* Use the local node if we haven't already */
4921 if (!node_isset(node, *used_node_mask)) {
4922 node_set(node, *used_node_mask);
4923 return node;
4924 }
4925
4926 for_each_node_state(n, N_MEMORY) {
4927
4928 /* Don't want a node to appear more than once */
4929 if (node_isset(n, *used_node_mask))
4930 continue;
4931
4932 /* Use the distance array to find the distance */
4933 val = node_distance(node, n);
4934
4935 /* Penalize nodes under us ("prefer the next node") */
4936 val += (n < node);
4937
4938 /* Give preference to headless and unused nodes */
4939 if (!cpumask_empty(cpumask_of_node(n)))
4940 val += PENALTY_FOR_NODE_WITH_CPUS;
4941
4942 /* Slight preference for less loaded node */
4943 val *= MAX_NUMNODES;
4944 val += node_load[n];
4945
4946 if (val < min_val) {
4947 min_val = val;
4948 best_node = n;
4949 }
4950 }
4951
4952 if (best_node >= 0)
4953 node_set(best_node, *used_node_mask);
4954
4955 return best_node;
4956 }
4957
4958
4959 /*
4960 * Build zonelists ordered by node and zones within node.
4961 * This results in maximum locality--normal zone overflows into local
4962 * DMA zone, if any--but risks exhausting DMA zone.
4963 */
build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, unsigned nr_nodes)4964 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
4965 unsigned nr_nodes)
4966 {
4967 struct zoneref *zonerefs;
4968 int i;
4969
4970 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
4971
4972 for (i = 0; i < nr_nodes; i++) {
4973 int nr_zones;
4974
4975 pg_data_t *node = NODE_DATA(node_order[i]);
4976
4977 nr_zones = build_zonerefs_node(node, zonerefs);
4978 zonerefs += nr_zones;
4979 }
4980 zonerefs->zone = NULL;
4981 zonerefs->zone_idx = 0;
4982 }
4983
4984 /*
4985 * Build gfp_thisnode zonelists
4986 */
build_thisnode_zonelists(pg_data_t *pgdat)4987 static void build_thisnode_zonelists(pg_data_t *pgdat)
4988 {
4989 struct zoneref *zonerefs;
4990 int nr_zones;
4991
4992 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
4993 nr_zones = build_zonerefs_node(pgdat, zonerefs);
4994 zonerefs += nr_zones;
4995 zonerefs->zone = NULL;
4996 zonerefs->zone_idx = 0;
4997 }
4998
4999 /*
5000 * Build zonelists ordered by zone and nodes within zones.
5001 * This results in conserving DMA zone[s] until all Normal memory is
5002 * exhausted, but results in overflowing to remote node while memory
5003 * may still exist in local DMA zone.
5004 */
5005
build_zonelists(pg_data_t *pgdat)5006 static void build_zonelists(pg_data_t *pgdat)
5007 {
5008 static int node_order[MAX_NUMNODES];
5009 int node, nr_nodes = 0;
5010 nodemask_t used_mask = NODE_MASK_NONE;
5011 int local_node, prev_node;
5012
5013 /* NUMA-aware ordering of nodes */
5014 local_node = pgdat->node_id;
5015 prev_node = local_node;
5016
5017 memset(node_order, 0, sizeof(node_order));
5018 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5019 /*
5020 * We don't want to pressure a particular node.
5021 * So adding penalty to the first node in same
5022 * distance group to make it round-robin.
5023 */
5024 if (node_distance(local_node, node) !=
5025 node_distance(local_node, prev_node))
5026 node_load[node] += 1;
5027
5028 node_order[nr_nodes++] = node;
5029 prev_node = node;
5030 }
5031
5032 build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
5033 build_thisnode_zonelists(pgdat);
5034 pr_info("Fallback order for Node %d: ", local_node);
5035 for (node = 0; node < nr_nodes; node++)
5036 pr_cont("%d ", node_order[node]);
5037 pr_cont("\n");
5038 }
5039
5040 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5041 /*
5042 * Return node id of node used for "local" allocations.
5043 * I.e., first node id of first zone in arg node's generic zonelist.
5044 * Used for initializing percpu 'numa_mem', which is used primarily
5045 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5046 */
local_memory_node(int node)5047 int local_memory_node(int node)
5048 {
5049 struct zoneref *z;
5050
5051 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
5052 gfp_zone(GFP_KERNEL),
5053 NULL);
5054 return zone_to_nid(z->zone);
5055 }
5056 #endif
5057
5058 static void setup_min_unmapped_ratio(void);
5059 static void setup_min_slab_ratio(void);
5060 #else /* CONFIG_NUMA */
5061
build_zonelists(pg_data_t *pgdat)5062 static void build_zonelists(pg_data_t *pgdat)
5063 {
5064 int node, local_node;
5065 struct zoneref *zonerefs;
5066 int nr_zones;
5067
5068 local_node = pgdat->node_id;
5069
5070 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5071 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5072 zonerefs += nr_zones;
5073
5074 /*
5075 * Now we build the zonelist so that it contains the zones
5076 * of all the other nodes.
5077 * We don't want to pressure a particular node, so when
5078 * building the zones for node N, we make sure that the
5079 * zones coming right after the local ones are those from
5080 * node N+1 (modulo N)
5081 */
5082 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
5083 if (!node_online(node))
5084 continue;
5085 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5086 zonerefs += nr_zones;
5087 }
5088 for (node = 0; node < local_node; node++) {
5089 if (!node_online(node))
5090 continue;
5091 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5092 zonerefs += nr_zones;
5093 }
5094
5095 zonerefs->zone = NULL;
5096 zonerefs->zone_idx = 0;
5097 }
5098
5099 #endif /* CONFIG_NUMA */
5100
5101 /*
5102 * Boot pageset table. One per cpu which is going to be used for all
5103 * zones and all nodes. The parameters will be set in such a way
5104 * that an item put on a list will immediately be handed over to
5105 * the buddy list. This is safe since pageset manipulation is done
5106 * with interrupts disabled.
5107 *
5108 * The boot_pagesets must be kept even after bootup is complete for
5109 * unused processors and/or zones. They do play a role for bootstrapping
5110 * hotplugged processors.
5111 *
5112 * zoneinfo_show() and maybe other functions do
5113 * not check if the processor is online before following the pageset pointer.
5114 * Other parts of the kernel may not check if the zone is available.
5115 */
5116 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
5117 /* These effectively disable the pcplists in the boot pageset completely */
5118 #define BOOT_PAGESET_HIGH 0
5119 #define BOOT_PAGESET_BATCH 1
5120 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
5121 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
5122
__build_all_zonelists(void *data)5123 static void __build_all_zonelists(void *data)
5124 {
5125 int nid;
5126 int __maybe_unused cpu;
5127 pg_data_t *self = data;
5128 unsigned long flags;
5129
5130 /*
5131 * The zonelist_update_seq must be acquired with irqsave because the
5132 * reader can be invoked from IRQ with GFP_ATOMIC.
5133 */
5134 write_seqlock_irqsave(&zonelist_update_seq, flags);
5135 /*
5136 * Also disable synchronous printk() to prevent any printk() from
5137 * trying to hold port->lock, for
5138 * tty_insert_flip_string_and_push_buffer() on other CPU might be
5139 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held.
5140 */
5141 printk_deferred_enter();
5142
5143 #ifdef CONFIG_NUMA
5144 memset(node_load, 0, sizeof(node_load));
5145 #endif
5146
5147 /*
5148 * This node is hotadded and no memory is yet present. So just
5149 * building zonelists is fine - no need to touch other nodes.
5150 */
5151 if (self && !node_online(self->node_id)) {
5152 build_zonelists(self);
5153 } else {
5154 /*
5155 * All possible nodes have pgdat preallocated
5156 * in free_area_init
5157 */
5158 for_each_node(nid) {
5159 pg_data_t *pgdat = NODE_DATA(nid);
5160
5161 build_zonelists(pgdat);
5162 }
5163
5164 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5165 /*
5166 * We now know the "local memory node" for each node--
5167 * i.e., the node of the first zone in the generic zonelist.
5168 * Set up numa_mem percpu variable for on-line cpus. During
5169 * boot, only the boot cpu should be on-line; we'll init the
5170 * secondary cpus' numa_mem as they come on-line. During
5171 * node/memory hotplug, we'll fixup all on-line cpus.
5172 */
5173 for_each_online_cpu(cpu)
5174 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5175 #endif
5176 }
5177
5178 printk_deferred_exit();
5179 write_sequnlock_irqrestore(&zonelist_update_seq, flags);
5180 }
5181
5182 static noinline void __init
build_all_zonelists_init(void)5183 build_all_zonelists_init(void)
5184 {
5185 int cpu;
5186
5187 __build_all_zonelists(NULL);
5188
5189 /*
5190 * Initialize the boot_pagesets that are going to be used
5191 * for bootstrapping processors. The real pagesets for
5192 * each zone will be allocated later when the per cpu
5193 * allocator is available.
5194 *
5195 * boot_pagesets are used also for bootstrapping offline
5196 * cpus if the system is already booted because the pagesets
5197 * are needed to initialize allocators on a specific cpu too.
5198 * F.e. the percpu allocator needs the page allocator which
5199 * needs the percpu allocator in order to allocate its pagesets
5200 * (a chicken-egg dilemma).
5201 */
5202 for_each_possible_cpu(cpu)
5203 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
5204
5205 mminit_verify_zonelist();
5206 cpuset_init_current_mems_allowed();
5207 }
5208
5209 /*
5210 * unless system_state == SYSTEM_BOOTING.
5211 *
5212 * __ref due to call of __init annotated helper build_all_zonelists_init
5213 * [protected by SYSTEM_BOOTING].
5214 */
build_all_zonelists(pg_data_t *pgdat)5215 void __ref build_all_zonelists(pg_data_t *pgdat)
5216 {
5217 unsigned long vm_total_pages;
5218
5219 if (system_state == SYSTEM_BOOTING) {
5220 build_all_zonelists_init();
5221 } else {
5222 __build_all_zonelists(pgdat);
5223 /* cpuset refresh routine should be here */
5224 }
5225 /* Get the number of free pages beyond high watermark in all zones. */
5226 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
5227 /*
5228 * Disable grouping by mobility if the number of pages in the
5229 * system is too low to allow the mechanism to work. It would be
5230 * more accurate, but expensive to check per-zone. This check is
5231 * made on memory-hotadd so a system can start with mobility
5232 * disabled and enable it later
5233 */
5234 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
5235 page_group_by_mobility_disabled = 1;
5236 else
5237 page_group_by_mobility_disabled = 0;
5238
5239 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n",
5240 nr_online_nodes,
5241 page_group_by_mobility_disabled ? "off" : "on",
5242 vm_total_pages);
5243 #ifdef CONFIG_NUMA
5244 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
5245 #endif
5246 }
5247
zone_batchsize(struct zone *zone)5248 static int zone_batchsize(struct zone *zone)
5249 {
5250 #ifdef CONFIG_MMU
5251 int batch;
5252
5253 /*
5254 * The number of pages to batch allocate is either ~0.1%
5255 * of the zone or 1MB, whichever is smaller. The batch
5256 * size is striking a balance between allocation latency
5257 * and zone lock contention.
5258 */
5259 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE);
5260 batch /= 4; /* We effectively *= 4 below */
5261 if (batch < 1)
5262 batch = 1;
5263
5264 /*
5265 * Clamp the batch to a 2^n - 1 value. Having a power
5266 * of 2 value was found to be more likely to have
5267 * suboptimal cache aliasing properties in some cases.
5268 *
5269 * For example if 2 tasks are alternately allocating
5270 * batches of pages, one task can end up with a lot
5271 * of pages of one half of the possible page colors
5272 * and the other with pages of the other colors.
5273 */
5274 batch = rounddown_pow_of_two(batch + batch/2) - 1;
5275
5276 return batch;
5277
5278 #else
5279 /* The deferral and batching of frees should be suppressed under NOMMU
5280 * conditions.
5281 *
5282 * The problem is that NOMMU needs to be able to allocate large chunks
5283 * of contiguous memory as there's no hardware page translation to
5284 * assemble apparent contiguous memory from discontiguous pages.
5285 *
5286 * Queueing large contiguous runs of pages for batching, however,
5287 * causes the pages to actually be freed in smaller chunks. As there
5288 * can be a significant delay between the individual batches being
5289 * recycled, this leads to the once large chunks of space being
5290 * fragmented and becoming unavailable for high-order allocations.
5291 */
5292 return 0;
5293 #endif
5294 }
5295
5296 static int percpu_pagelist_high_fraction;
zone_highsize(struct zone *zone, int batch, int cpu_online)5297 static int zone_highsize(struct zone *zone, int batch, int cpu_online)
5298 {
5299 #ifdef CONFIG_MMU
5300 int high;
5301 int nr_split_cpus;
5302 unsigned long total_pages;
5303
5304 if (!percpu_pagelist_high_fraction) {
5305 /*
5306 * By default, the high value of the pcp is based on the zone
5307 * low watermark so that if they are full then background
5308 * reclaim will not be started prematurely.
5309 */
5310 total_pages = low_wmark_pages(zone);
5311 } else {
5312 /*
5313 * If percpu_pagelist_high_fraction is configured, the high
5314 * value is based on a fraction of the managed pages in the
5315 * zone.
5316 */
5317 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction;
5318 }
5319
5320 /*
5321 * Split the high value across all online CPUs local to the zone. Note
5322 * that early in boot that CPUs may not be online yet and that during
5323 * CPU hotplug that the cpumask is not yet updated when a CPU is being
5324 * onlined. For memory nodes that have no CPUs, split pcp->high across
5325 * all online CPUs to mitigate the risk that reclaim is triggered
5326 * prematurely due to pages stored on pcp lists.
5327 */
5328 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
5329 if (!nr_split_cpus)
5330 nr_split_cpus = num_online_cpus();
5331 high = total_pages / nr_split_cpus;
5332
5333 /*
5334 * Ensure high is at least batch*4. The multiple is based on the
5335 * historical relationship between high and batch.
5336 */
5337 high = max(high, batch << 2);
5338
5339 return high;
5340 #else
5341 return 0;
5342 #endif
5343 }
5344
5345 /*
5346 * pcp->high and pcp->batch values are related and generally batch is lower
5347 * than high. They are also related to pcp->count such that count is lower
5348 * than high, and as soon as it reaches high, the pcplist is flushed.
5349 *
5350 * However, guaranteeing these relations at all times would require e.g. write
5351 * barriers here but also careful usage of read barriers at the read side, and
5352 * thus be prone to error and bad for performance. Thus the update only prevents
5353 * store tearing. Any new users of pcp->batch and pcp->high should ensure they
5354 * can cope with those fields changing asynchronously, and fully trust only the
5355 * pcp->count field on the local CPU with interrupts disabled.
5356 *
5357 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
5358 * outside of boot time (or some other assurance that no concurrent updaters
5359 * exist).
5360 */
pageset_update(struct per_cpu_pages *pcp, unsigned long high, unsigned long batch)5361 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
5362 unsigned long batch)
5363 {
5364 WRITE_ONCE(pcp->batch, batch);
5365 WRITE_ONCE(pcp->high, high);
5366 }
5367
per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)5368 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
5369 {
5370 int pindex;
5371
5372 memset(pcp, 0, sizeof(*pcp));
5373 memset(pzstats, 0, sizeof(*pzstats));
5374
5375 spin_lock_init(&pcp->lock);
5376 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
5377 INIT_LIST_HEAD(&pcp->lists[pindex]);
5378
5379 /*
5380 * Set batch and high values safe for a boot pageset. A true percpu
5381 * pageset's initialization will update them subsequently. Here we don't
5382 * need to be as careful as pageset_update() as nobody can access the
5383 * pageset yet.
5384 */
5385 pcp->high = BOOT_PAGESET_HIGH;
5386 pcp->batch = BOOT_PAGESET_BATCH;
5387 pcp->free_factor = 0;
5388 }
5389
__zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high, unsigned long batch)5390 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high,
5391 unsigned long batch)
5392 {
5393 struct per_cpu_pages *pcp;
5394 int cpu;
5395
5396 for_each_possible_cpu(cpu) {
5397 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
5398 pageset_update(pcp, high, batch);
5399 }
5400 }
5401
5402 /*
5403 * Calculate and set new high and batch values for all per-cpu pagesets of a
5404 * zone based on the zone's size.
5405 */
zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)5406 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
5407 {
5408 int new_high, new_batch;
5409
5410 new_batch = max(1, zone_batchsize(zone));
5411 new_high = zone_highsize(zone, new_batch, cpu_online);
5412
5413 if (zone->pageset_high == new_high &&
5414 zone->pageset_batch == new_batch)
5415 return;
5416
5417 zone->pageset_high = new_high;
5418 zone->pageset_batch = new_batch;
5419
5420 __zone_set_pageset_high_and_batch(zone, new_high, new_batch);
5421 }
5422
setup_zone_pageset(struct zone *zone)5423 void __meminit setup_zone_pageset(struct zone *zone)
5424 {
5425 int cpu;
5426
5427 /* Size may be 0 on !SMP && !NUMA */
5428 if (sizeof(struct per_cpu_zonestat) > 0)
5429 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
5430
5431 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
5432 for_each_possible_cpu(cpu) {
5433 struct per_cpu_pages *pcp;
5434 struct per_cpu_zonestat *pzstats;
5435
5436 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
5437 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
5438 per_cpu_pages_init(pcp, pzstats);
5439 }
5440
5441 zone_set_pageset_high_and_batch(zone, 0);
5442 }
5443
5444 /*
5445 * The zone indicated has a new number of managed_pages; batch sizes and percpu
5446 * page high values need to be recalculated.
5447 */
zone_pcp_update(struct zone *zone, int cpu_online)5448 static void zone_pcp_update(struct zone *zone, int cpu_online)
5449 {
5450 mutex_lock(&pcp_batch_high_lock);
5451 zone_set_pageset_high_and_batch(zone, cpu_online);
5452 mutex_unlock(&pcp_batch_high_lock);
5453 }
5454
5455 /*
5456 * Allocate per cpu pagesets and initialize them.
5457 * Before this call only boot pagesets were available.
5458 */
setup_per_cpu_pageset(void)5459 void __init setup_per_cpu_pageset(void)
5460 {
5461 struct pglist_data *pgdat;
5462 struct zone *zone;
5463 int __maybe_unused cpu;
5464
5465 for_each_populated_zone(zone)
5466 setup_zone_pageset(zone);
5467
5468 #ifdef CONFIG_NUMA
5469 /*
5470 * Unpopulated zones continue using the boot pagesets.
5471 * The numa stats for these pagesets need to be reset.
5472 * Otherwise, they will end up skewing the stats of
5473 * the nodes these zones are associated with.
5474 */
5475 for_each_possible_cpu(cpu) {
5476 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
5477 memset(pzstats->vm_numa_event, 0,
5478 sizeof(pzstats->vm_numa_event));
5479 }
5480 #endif
5481
5482 for_each_online_pgdat(pgdat)
5483 pgdat->per_cpu_nodestats =
5484 alloc_percpu(struct per_cpu_nodestat);
5485 }
5486
zone_pcp_init(struct zone *zone)5487 __meminit void zone_pcp_init(struct zone *zone)
5488 {
5489 /*
5490 * per cpu subsystem is not up at this point. The following code
5491 * relies on the ability of the linker to provide the
5492 * offset of a (static) per cpu variable into the per cpu area.
5493 */
5494 zone->per_cpu_pageset = &boot_pageset;
5495 zone->per_cpu_zonestats = &boot_zonestats;
5496 zone->pageset_high = BOOT_PAGESET_HIGH;
5497 zone->pageset_batch = BOOT_PAGESET_BATCH;
5498
5499 if (populated_zone(zone))
5500 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name,
5501 zone->present_pages, zone_batchsize(zone));
5502 }
5503
adjust_managed_page_count(struct page *page, long count)5504 void adjust_managed_page_count(struct page *page, long count)
5505 {
5506 atomic_long_add(count, &page_zone(page)->managed_pages);
5507 totalram_pages_add(count);
5508 #ifdef CONFIG_HIGHMEM
5509 if (PageHighMem(page))
5510 totalhigh_pages_add(count);
5511 #endif
5512 }
5513 EXPORT_SYMBOL(adjust_managed_page_count);
5514
free_reserved_area(void *start, void *end, int poison, const char *s)5515 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
5516 {
5517 void *pos;
5518 unsigned long pages = 0;
5519
5520 start = (void *)PAGE_ALIGN((unsigned long)start);
5521 end = (void *)((unsigned long)end & PAGE_MASK);
5522 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
5523 struct page *page = virt_to_page(pos);
5524 void *direct_map_addr;
5525
5526 /*
5527 * 'direct_map_addr' might be different from 'pos'
5528 * because some architectures' virt_to_page()
5529 * work with aliases. Getting the direct map
5530 * address ensures that we get a _writeable_
5531 * alias for the memset().
5532 */
5533 direct_map_addr = page_address(page);
5534 /*
5535 * Perform a kasan-unchecked memset() since this memory
5536 * has not been initialized.
5537 */
5538 direct_map_addr = kasan_reset_tag(direct_map_addr);
5539 if ((unsigned int)poison <= 0xFF)
5540 memset(direct_map_addr, poison, PAGE_SIZE);
5541
5542 free_reserved_page(page);
5543 }
5544
5545 if (pages && s)
5546 pr_info("Freeing %s memory: %ldK\n", s, K(pages));
5547
5548 return pages;
5549 }
5550
page_alloc_cpu_dead(unsigned int cpu)5551 static int page_alloc_cpu_dead(unsigned int cpu)
5552 {
5553 struct zone *zone;
5554
5555 lru_add_drain_cpu(cpu);
5556 mlock_drain_remote(cpu);
5557 drain_pages(cpu);
5558
5559 /*
5560 * Spill the event counters of the dead processor
5561 * into the current processors event counters.
5562 * This artificially elevates the count of the current
5563 * processor.
5564 */
5565 vm_events_fold_cpu(cpu);
5566
5567 /*
5568 * Zero the differential counters of the dead processor
5569 * so that the vm statistics are consistent.
5570 *
5571 * This is only okay since the processor is dead and cannot
5572 * race with what we are doing.
5573 */
5574 cpu_vm_stats_fold(cpu);
5575
5576 for_each_populated_zone(zone)
5577 zone_pcp_update(zone, 0);
5578
5579 return 0;
5580 }
5581
page_alloc_cpu_online(unsigned int cpu)5582 static int page_alloc_cpu_online(unsigned int cpu)
5583 {
5584 struct zone *zone;
5585
5586 for_each_populated_zone(zone)
5587 zone_pcp_update(zone, 1);
5588 return 0;
5589 }
5590
page_alloc_init_cpuhp(void)5591 void __init page_alloc_init_cpuhp(void)
5592 {
5593 int ret;
5594
5595 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC,
5596 "mm/page_alloc:pcp",
5597 page_alloc_cpu_online,
5598 page_alloc_cpu_dead);
5599 WARN_ON(ret < 0);
5600 }
5601
5602 /*
5603 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
5604 * or min_free_kbytes changes.
5605 */
calculate_totalreserve_pages(void)5606 static void calculate_totalreserve_pages(void)
5607 {
5608 struct pglist_data *pgdat;
5609 unsigned long reserve_pages = 0;
5610 enum zone_type i, j;
5611
5612 for_each_online_pgdat(pgdat) {
5613
5614 pgdat->totalreserve_pages = 0;
5615
5616 for (i = 0; i < MAX_NR_ZONES; i++) {
5617 struct zone *zone = pgdat->node_zones + i;
5618 long max = 0;
5619 unsigned long managed_pages = zone_managed_pages(zone);
5620
5621 /* Find valid and maximum lowmem_reserve in the zone */
5622 for (j = i; j < MAX_NR_ZONES; j++) {
5623 if (zone->lowmem_reserve[j] > max)
5624 max = zone->lowmem_reserve[j];
5625 }
5626
5627 /* we treat the high watermark as reserved pages. */
5628 max += high_wmark_pages(zone);
5629
5630 if (max > managed_pages)
5631 max = managed_pages;
5632
5633 pgdat->totalreserve_pages += max;
5634
5635 reserve_pages += max;
5636 }
5637 }
5638 totalreserve_pages = reserve_pages;
5639 }
5640
5641 /*
5642 * setup_per_zone_lowmem_reserve - called whenever
5643 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
5644 * has a correct pages reserved value, so an adequate number of
5645 * pages are left in the zone after a successful __alloc_pages().
5646 */
setup_per_zone_lowmem_reserve(void)5647 static void setup_per_zone_lowmem_reserve(void)
5648 {
5649 struct pglist_data *pgdat;
5650 enum zone_type i, j;
5651
5652 for_each_online_pgdat(pgdat) {
5653 for (i = 0; i < MAX_NR_ZONES - 1; i++) {
5654 struct zone *zone = &pgdat->node_zones[i];
5655 int ratio = sysctl_lowmem_reserve_ratio[i];
5656 bool clear = !ratio || !zone_managed_pages(zone);
5657 unsigned long managed_pages = 0;
5658
5659 for (j = i + 1; j < MAX_NR_ZONES; j++) {
5660 struct zone *upper_zone = &pgdat->node_zones[j];
5661
5662 managed_pages += zone_managed_pages(upper_zone);
5663
5664 if (clear)
5665 zone->lowmem_reserve[j] = 0;
5666 else
5667 zone->lowmem_reserve[j] = managed_pages / ratio;
5668 }
5669 }
5670 }
5671
5672 /* update totalreserve_pages */
5673 calculate_totalreserve_pages();
5674 }
5675
__setup_per_zone_wmarks(void)5676 static void __setup_per_zone_wmarks(void)
5677 {
5678 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
5679 unsigned long lowmem_pages = 0;
5680 struct zone *zone;
5681 unsigned long flags;
5682
5683 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */
5684 for_each_zone(zone) {
5685 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE)
5686 lowmem_pages += zone_managed_pages(zone);
5687 }
5688
5689 for_each_zone(zone) {
5690 u64 tmp;
5691
5692 spin_lock_irqsave(&zone->lock, flags);
5693 tmp = (u64)pages_min * zone_managed_pages(zone);
5694 do_div(tmp, lowmem_pages);
5695 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) {
5696 /*
5697 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
5698 * need highmem and movable zones pages, so cap pages_min
5699 * to a small value here.
5700 *
5701 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
5702 * deltas control async page reclaim, and so should
5703 * not be capped for highmem and movable zones.
5704 */
5705 unsigned long min_pages;
5706
5707 min_pages = zone_managed_pages(zone) / 1024;
5708 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
5709 zone->_watermark[WMARK_MIN] = min_pages;
5710 } else {
5711 /*
5712 * If it's a lowmem zone, reserve a number of pages
5713 * proportionate to the zone's size.
5714 */
5715 zone->_watermark[WMARK_MIN] = tmp;
5716 }
5717
5718 /*
5719 * Set the kswapd watermarks distance according to the
5720 * scale factor in proportion to available memory, but
5721 * ensure a minimum size on small systems.
5722 */
5723 tmp = max_t(u64, tmp >> 2,
5724 mult_frac(zone_managed_pages(zone),
5725 watermark_scale_factor, 10000));
5726
5727 zone->watermark_boost = 0;
5728 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
5729 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp;
5730 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp;
5731
5732 spin_unlock_irqrestore(&zone->lock, flags);
5733 }
5734
5735 /* update totalreserve_pages */
5736 calculate_totalreserve_pages();
5737 }
5738
5739 /**
5740 * setup_per_zone_wmarks - called when min_free_kbytes changes
5741 * or when memory is hot-{added|removed}
5742 *
5743 * Ensures that the watermark[min,low,high] values for each zone are set
5744 * correctly with respect to min_free_kbytes.
5745 */
setup_per_zone_wmarks(void)5746 void setup_per_zone_wmarks(void)
5747 {
5748 struct zone *zone;
5749 static DEFINE_SPINLOCK(lock);
5750
5751 spin_lock(&lock);
5752 __setup_per_zone_wmarks();
5753 spin_unlock(&lock);
5754
5755 /*
5756 * The watermark size have changed so update the pcpu batch
5757 * and high limits or the limits may be inappropriate.
5758 */
5759 for_each_zone(zone)
5760 zone_pcp_update(zone, 0);
5761 }
5762
5763 /*
5764 * Initialise min_free_kbytes.
5765 *
5766 * For small machines we want it small (128k min). For large machines
5767 * we want it large (256MB max). But it is not linear, because network
5768 * bandwidth does not increase linearly with machine size. We use
5769 *
5770 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
5771 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
5772 *
5773 * which yields
5774 *
5775 * 16MB: 512k
5776 * 32MB: 724k
5777 * 64MB: 1024k
5778 * 128MB: 1448k
5779 * 256MB: 2048k
5780 * 512MB: 2896k
5781 * 1024MB: 4096k
5782 * 2048MB: 5792k
5783 * 4096MB: 8192k
5784 * 8192MB: 11584k
5785 * 16384MB: 16384k
5786 */
calculate_min_free_kbytes(void)5787 void calculate_min_free_kbytes(void)
5788 {
5789 unsigned long lowmem_kbytes;
5790 int new_min_free_kbytes;
5791
5792 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5793 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5794
5795 if (new_min_free_kbytes > user_min_free_kbytes)
5796 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144);
5797 else
5798 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
5799 new_min_free_kbytes, user_min_free_kbytes);
5800
5801 }
5802
init_per_zone_wmark_min(void)5803 int __meminit init_per_zone_wmark_min(void)
5804 {
5805 calculate_min_free_kbytes();
5806 setup_per_zone_wmarks();
5807 refresh_zone_stat_thresholds();
5808 setup_per_zone_lowmem_reserve();
5809
5810 #ifdef CONFIG_NUMA
5811 setup_min_unmapped_ratio();
5812 setup_min_slab_ratio();
5813 #endif
5814
5815 khugepaged_min_free_kbytes_update();
5816
5817 return 0;
5818 }
5819 postcore_initcall(init_per_zone_wmark_min)
5820
5821 /*
5822 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
5823 * that we can call two helper functions whenever min_free_kbytes
5824 * changes.
5825 */
min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos)5826 static int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
5827 void *buffer, size_t *length, loff_t *ppos)
5828 {
5829 int rc;
5830
5831 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5832 if (rc)
5833 return rc;
5834
5835 if (write) {
5836 user_min_free_kbytes = min_free_kbytes;
5837 setup_per_zone_wmarks();
5838 }
5839 return 0;
5840 }
5841
watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos)5842 static int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
5843 void *buffer, size_t *length, loff_t *ppos)
5844 {
5845 int rc;
5846
5847 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5848 if (rc)
5849 return rc;
5850
5851 if (write)
5852 setup_per_zone_wmarks();
5853
5854 return 0;
5855 }
5856
5857 #ifdef CONFIG_NUMA
setup_min_unmapped_ratio(void)5858 static void setup_min_unmapped_ratio(void)
5859 {
5860 pg_data_t *pgdat;
5861 struct zone *zone;
5862
5863 for_each_online_pgdat(pgdat)
5864 pgdat->min_unmapped_pages = 0;
5865
5866 for_each_zone(zone)
5867 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
5868 sysctl_min_unmapped_ratio) / 100;
5869 }
5870
5871
sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos)5872 static int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
5873 void *buffer, size_t *length, loff_t *ppos)
5874 {
5875 int rc;
5876
5877 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5878 if (rc)
5879 return rc;
5880
5881 setup_min_unmapped_ratio();
5882
5883 return 0;
5884 }
5885
setup_min_slab_ratio(void)5886 static void setup_min_slab_ratio(void)
5887 {
5888 pg_data_t *pgdat;
5889 struct zone *zone;
5890
5891 for_each_online_pgdat(pgdat)
5892 pgdat->min_slab_pages = 0;
5893
5894 for_each_zone(zone)
5895 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
5896 sysctl_min_slab_ratio) / 100;
5897 }
5898
sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos)5899 static int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
5900 void *buffer, size_t *length, loff_t *ppos)
5901 {
5902 int rc;
5903
5904 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5905 if (rc)
5906 return rc;
5907
5908 setup_min_slab_ratio();
5909
5910 return 0;
5911 }
5912 #endif
5913
5914 /*
5915 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5916 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5917 * whenever sysctl_lowmem_reserve_ratio changes.
5918 *
5919 * The reserve ratio obviously has absolutely no relation with the
5920 * minimum watermarks. The lowmem reserve ratio can only make sense
5921 * if in function of the boot time zone sizes.
5922 */
lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos)5923 static int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table,
5924 int write, void *buffer, size_t *length, loff_t *ppos)
5925 {
5926 int i;
5927
5928 proc_dointvec_minmax(table, write, buffer, length, ppos);
5929
5930 for (i = 0; i < MAX_NR_ZONES; i++) {
5931 if (sysctl_lowmem_reserve_ratio[i] < 1)
5932 sysctl_lowmem_reserve_ratio[i] = 0;
5933 }
5934
5935 setup_per_zone_lowmem_reserve();
5936 return 0;
5937 }
5938
5939 /*
5940 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
5941 * cpu. It is the fraction of total pages in each zone that a hot per cpu
5942 * pagelist can have before it gets flushed back to buddy allocator.
5943 */
percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos)5944 static int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table,
5945 int write, void *buffer, size_t *length, loff_t *ppos)
5946 {
5947 struct zone *zone;
5948 int old_percpu_pagelist_high_fraction;
5949 int ret;
5950
5951 mutex_lock(&pcp_batch_high_lock);
5952 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
5953
5954 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5955 if (!write || ret < 0)
5956 goto out;
5957
5958 /* Sanity checking to avoid pcp imbalance */
5959 if (percpu_pagelist_high_fraction &&
5960 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) {
5961 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction;
5962 ret = -EINVAL;
5963 goto out;
5964 }
5965
5966 /* No change? */
5967 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction)
5968 goto out;
5969
5970 for_each_populated_zone(zone)
5971 zone_set_pageset_high_and_batch(zone, 0);
5972 out:
5973 mutex_unlock(&pcp_batch_high_lock);
5974 return ret;
5975 }
5976
5977 static struct ctl_table page_alloc_sysctl_table[] = {
5978 {
5979 .procname = "min_free_kbytes",
5980 .data = &min_free_kbytes,
5981 .maxlen = sizeof(min_free_kbytes),
5982 .mode = 0644,
5983 .proc_handler = min_free_kbytes_sysctl_handler,
5984 .extra1 = SYSCTL_ZERO,
5985 },
5986 {
5987 .procname = "watermark_boost_factor",
5988 .data = &watermark_boost_factor,
5989 .maxlen = sizeof(watermark_boost_factor),
5990 .mode = 0644,
5991 .proc_handler = proc_dointvec_minmax,
5992 .extra1 = SYSCTL_ZERO,
5993 },
5994 {
5995 .procname = "watermark_scale_factor",
5996 .data = &watermark_scale_factor,
5997 .maxlen = sizeof(watermark_scale_factor),
5998 .mode = 0644,
5999 .proc_handler = watermark_scale_factor_sysctl_handler,
6000 .extra1 = SYSCTL_ONE,
6001 .extra2 = SYSCTL_THREE_THOUSAND,
6002 },
6003 {
6004 .procname = "percpu_pagelist_high_fraction",
6005 .data = &percpu_pagelist_high_fraction,
6006 .maxlen = sizeof(percpu_pagelist_high_fraction),
6007 .mode = 0644,
6008 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler,
6009 .extra1 = SYSCTL_ZERO,
6010 },
6011 {
6012 .procname = "lowmem_reserve_ratio",
6013 .data = &sysctl_lowmem_reserve_ratio,
6014 .maxlen = sizeof(sysctl_lowmem_reserve_ratio),
6015 .mode = 0644,
6016 .proc_handler = lowmem_reserve_ratio_sysctl_handler,
6017 },
6018 #ifdef CONFIG_NUMA
6019 {
6020 .procname = "numa_zonelist_order",
6021 .data = &numa_zonelist_order,
6022 .maxlen = NUMA_ZONELIST_ORDER_LEN,
6023 .mode = 0644,
6024 .proc_handler = numa_zonelist_order_handler,
6025 },
6026 {
6027 .procname = "min_unmapped_ratio",
6028 .data = &sysctl_min_unmapped_ratio,
6029 .maxlen = sizeof(sysctl_min_unmapped_ratio),
6030 .mode = 0644,
6031 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler,
6032 .extra1 = SYSCTL_ZERO,
6033 .extra2 = SYSCTL_ONE_HUNDRED,
6034 },
6035 {
6036 .procname = "min_slab_ratio",
6037 .data = &sysctl_min_slab_ratio,
6038 .maxlen = sizeof(sysctl_min_slab_ratio),
6039 .mode = 0644,
6040 .proc_handler = sysctl_min_slab_ratio_sysctl_handler,
6041 .extra1 = SYSCTL_ZERO,
6042 .extra2 = SYSCTL_ONE_HUNDRED,
6043 },
6044 #endif
6045 {}
6046 };
6047
page_alloc_sysctl_init(void)6048 void __init page_alloc_sysctl_init(void)
6049 {
6050 register_sysctl_init("vm", page_alloc_sysctl_table);
6051 }
6052
6053 #ifdef CONFIG_CONTIG_ALLOC
6054 /* Usage: See admin-guide/dynamic-debug-howto.rst */
alloc_contig_dump_pages(struct list_head *page_list)6055 static void alloc_contig_dump_pages(struct list_head *page_list)
6056 {
6057 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure");
6058
6059 if (DYNAMIC_DEBUG_BRANCH(descriptor)) {
6060 struct page *page;
6061
6062 dump_stack();
6063 list_for_each_entry(page, page_list, lru)
6064 dump_page(page, "migration failure");
6065 }
6066 }
6067
6068 /* [start, end) must belong to a single zone. */
__alloc_contig_migrate_range(struct compact_control *cc, unsigned long start, unsigned long end)6069 int __alloc_contig_migrate_range(struct compact_control *cc,
6070 unsigned long start, unsigned long end)
6071 {
6072 /* This function is based on compact_zone() from compaction.c. */
6073 unsigned int nr_reclaimed;
6074 unsigned long pfn = start;
6075 unsigned int tries = 0;
6076 int ret = 0;
6077 struct migration_target_control mtc = {
6078 .nid = zone_to_nid(cc->zone),
6079 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
6080 };
6081
6082 lru_cache_disable();
6083
6084 while (pfn < end || !list_empty(&cc->migratepages)) {
6085 if (fatal_signal_pending(current)) {
6086 ret = -EINTR;
6087 break;
6088 }
6089
6090 if (list_empty(&cc->migratepages)) {
6091 cc->nr_migratepages = 0;
6092 ret = isolate_migratepages_range(cc, pfn, end);
6093 if (ret && ret != -EAGAIN)
6094 break;
6095 pfn = cc->migrate_pfn;
6096 tries = 0;
6097 } else if (++tries == 5) {
6098 ret = -EBUSY;
6099 break;
6100 }
6101
6102 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6103 &cc->migratepages);
6104 cc->nr_migratepages -= nr_reclaimed;
6105
6106 ret = migrate_pages(&cc->migratepages, alloc_migration_target,
6107 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL);
6108
6109 /*
6110 * On -ENOMEM, migrate_pages() bails out right away. It is pointless
6111 * to retry again over this error, so do the same here.
6112 */
6113 if (ret == -ENOMEM)
6114 break;
6115 }
6116
6117 lru_cache_enable();
6118 if (ret < 0) {
6119 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
6120 alloc_contig_dump_pages(&cc->migratepages);
6121 putback_movable_pages(&cc->migratepages);
6122 return ret;
6123 }
6124 return 0;
6125 }
6126
6127 /**
6128 * alloc_contig_range() -- tries to allocate given range of pages
6129 * @start: start PFN to allocate
6130 * @end: one-past-the-last PFN to allocate
6131 * @migratetype: migratetype of the underlying pageblocks (either
6132 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
6133 * in range must have the same migratetype and it must
6134 * be either of the two.
6135 * @gfp_mask: GFP mask to use during compaction
6136 *
6137 * The PFN range does not have to be pageblock aligned. The PFN range must
6138 * belong to a single zone.
6139 *
6140 * The first thing this routine does is attempt to MIGRATE_ISOLATE all
6141 * pageblocks in the range. Once isolated, the pageblocks should not
6142 * be modified by others.
6143 *
6144 * Return: zero on success or negative error code. On success all
6145 * pages which PFN is in [start, end) are allocated for the caller and
6146 * need to be freed with free_contig_range().
6147 */
alloc_contig_range(unsigned long start, unsigned long end, unsigned migratetype, gfp_t gfp_mask)6148 int alloc_contig_range(unsigned long start, unsigned long end,
6149 unsigned migratetype, gfp_t gfp_mask)
6150 {
6151 unsigned long outer_start, outer_end;
6152 int order;
6153 int ret = 0;
6154
6155 struct compact_control cc = {
6156 .nr_migratepages = 0,
6157 .order = -1,
6158 .zone = page_zone(pfn_to_page(start)),
6159 .mode = MIGRATE_SYNC,
6160 .ignore_skip_hint = true,
6161 .no_set_skip_hint = true,
6162 .gfp_mask = current_gfp_context(gfp_mask),
6163 .alloc_contig = true,
6164 };
6165 INIT_LIST_HEAD(&cc.migratepages);
6166
6167 /*
6168 * What we do here is we mark all pageblocks in range as
6169 * MIGRATE_ISOLATE. Because pageblock and max order pages may
6170 * have different sizes, and due to the way page allocator
6171 * work, start_isolate_page_range() has special handlings for this.
6172 *
6173 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
6174 * migrate the pages from an unaligned range (ie. pages that
6175 * we are interested in). This will put all the pages in
6176 * range back to page allocator as MIGRATE_ISOLATE.
6177 *
6178 * When this is done, we take the pages in range from page
6179 * allocator removing them from the buddy system. This way
6180 * page allocator will never consider using them.
6181 *
6182 * This lets us mark the pageblocks back as
6183 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
6184 * aligned range but not in the unaligned, original range are
6185 * put back to page allocator so that buddy can use them.
6186 */
6187
6188 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask);
6189 if (ret)
6190 goto done;
6191
6192 drain_all_pages(cc.zone);
6193
6194 /*
6195 * In case of -EBUSY, we'd like to know which page causes problem.
6196 * So, just fall through. test_pages_isolated() has a tracepoint
6197 * which will report the busy page.
6198 *
6199 * It is possible that busy pages could become available before
6200 * the call to test_pages_isolated, and the range will actually be
6201 * allocated. So, if we fall through be sure to clear ret so that
6202 * -EBUSY is not accidentally used or returned to caller.
6203 */
6204 ret = __alloc_contig_migrate_range(&cc, start, end);
6205 if (ret && ret != -EBUSY)
6206 goto done;
6207 ret = 0;
6208
6209 /*
6210 * Pages from [start, end) are within a pageblock_nr_pages
6211 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
6212 * more, all pages in [start, end) are free in page allocator.
6213 * What we are going to do is to allocate all pages from
6214 * [start, end) (that is remove them from page allocator).
6215 *
6216 * The only problem is that pages at the beginning and at the
6217 * end of interesting range may be not aligned with pages that
6218 * page allocator holds, ie. they can be part of higher order
6219 * pages. Because of this, we reserve the bigger range and
6220 * once this is done free the pages we are not interested in.
6221 *
6222 * We don't have to hold zone->lock here because the pages are
6223 * isolated thus they won't get removed from buddy.
6224 */
6225
6226 order = 0;
6227 outer_start = start;
6228 while (!PageBuddy(pfn_to_page(outer_start))) {
6229 if (++order > MAX_ORDER) {
6230 outer_start = start;
6231 break;
6232 }
6233 outer_start &= ~0UL << order;
6234 }
6235
6236 if (outer_start != start) {
6237 order = buddy_order(pfn_to_page(outer_start));
6238
6239 /*
6240 * outer_start page could be small order buddy page and
6241 * it doesn't include start page. Adjust outer_start
6242 * in this case to report failed page properly
6243 * on tracepoint in test_pages_isolated()
6244 */
6245 if (outer_start + (1UL << order) <= start)
6246 outer_start = start;
6247 }
6248
6249 /* Make sure the range is really isolated. */
6250 if (test_pages_isolated(outer_start, end, 0)) {
6251 ret = -EBUSY;
6252 goto done;
6253 }
6254
6255 /* Grab isolated pages from freelists. */
6256 outer_end = isolate_freepages_range(&cc, outer_start, end);
6257 if (!outer_end) {
6258 ret = -EBUSY;
6259 goto done;
6260 }
6261
6262 /* Free head and tail (if any) */
6263 if (start != outer_start)
6264 free_contig_range(outer_start, start - outer_start);
6265 if (end != outer_end)
6266 free_contig_range(end, outer_end - end);
6267
6268 done:
6269 undo_isolate_page_range(start, end, migratetype);
6270 return ret;
6271 }
6272 EXPORT_SYMBOL(alloc_contig_range);
6273
__alloc_contig_pages(unsigned long start_pfn, unsigned long nr_pages, gfp_t gfp_mask)6274 static int __alloc_contig_pages(unsigned long start_pfn,
6275 unsigned long nr_pages, gfp_t gfp_mask)
6276 {
6277 unsigned long end_pfn = start_pfn + nr_pages;
6278
6279 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
6280 gfp_mask);
6281 }
6282
pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)6283 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
6284 unsigned long nr_pages)
6285 {
6286 unsigned long i, end_pfn = start_pfn + nr_pages;
6287 struct page *page;
6288
6289 for (i = start_pfn; i < end_pfn; i++) {
6290 page = pfn_to_online_page(i);
6291 if (!page)
6292 return false;
6293
6294 if (page_zone(page) != z)
6295 return false;
6296
6297 if (PageReserved(page))
6298 return false;
6299
6300 if (PageHuge(page))
6301 return false;
6302 }
6303 return true;
6304 }
6305
zone_spans_last_pfn(const struct zone *zone, unsigned long start_pfn, unsigned long nr_pages)6306 static bool zone_spans_last_pfn(const struct zone *zone,
6307 unsigned long start_pfn, unsigned long nr_pages)
6308 {
6309 unsigned long last_pfn = start_pfn + nr_pages - 1;
6310
6311 return zone_spans_pfn(zone, last_pfn);
6312 }
6313
6314 /**
6315 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
6316 * @nr_pages: Number of contiguous pages to allocate
6317 * @gfp_mask: GFP mask to limit search and used during compaction
6318 * @nid: Target node
6319 * @nodemask: Mask for other possible nodes
6320 *
6321 * This routine is a wrapper around alloc_contig_range(). It scans over zones
6322 * on an applicable zonelist to find a contiguous pfn range which can then be
6323 * tried for allocation with alloc_contig_range(). This routine is intended
6324 * for allocation requests which can not be fulfilled with the buddy allocator.
6325 *
6326 * The allocated memory is always aligned to a page boundary. If nr_pages is a
6327 * power of two, then allocated range is also guaranteed to be aligned to same
6328 * nr_pages (e.g. 1GB request would be aligned to 1GB).
6329 *
6330 * Allocated pages can be freed with free_contig_range() or by manually calling
6331 * __free_page() on each allocated page.
6332 *
6333 * Return: pointer to contiguous pages on success, or NULL if not successful.
6334 */
alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, int nid, nodemask_t *nodemask)6335 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
6336 int nid, nodemask_t *nodemask)
6337 {
6338 unsigned long ret, pfn, flags;
6339 struct zonelist *zonelist;
6340 struct zone *zone;
6341 struct zoneref *z;
6342
6343 zonelist = node_zonelist(nid, gfp_mask);
6344 for_each_zone_zonelist_nodemask(zone, z, zonelist,
6345 gfp_zone(gfp_mask), nodemask) {
6346 spin_lock_irqsave(&zone->lock, flags);
6347
6348 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
6349 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
6350 if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
6351 /*
6352 * We release the zone lock here because
6353 * alloc_contig_range() will also lock the zone
6354 * at some point. If there's an allocation
6355 * spinning on this lock, it may win the race
6356 * and cause alloc_contig_range() to fail...
6357 */
6358 spin_unlock_irqrestore(&zone->lock, flags);
6359 ret = __alloc_contig_pages(pfn, nr_pages,
6360 gfp_mask);
6361 if (!ret)
6362 return pfn_to_page(pfn);
6363 spin_lock_irqsave(&zone->lock, flags);
6364 }
6365 pfn += nr_pages;
6366 }
6367 spin_unlock_irqrestore(&zone->lock, flags);
6368 }
6369 return NULL;
6370 }
6371 #endif /* CONFIG_CONTIG_ALLOC */
6372
free_contig_range(unsigned long pfn, unsigned long nr_pages)6373 void free_contig_range(unsigned long pfn, unsigned long nr_pages)
6374 {
6375 unsigned long count = 0;
6376
6377 for (; nr_pages--; pfn++) {
6378 struct page *page = pfn_to_page(pfn);
6379
6380 count += page_count(page) != 1;
6381 __free_page(page);
6382 }
6383 WARN(count != 0, "%lu pages are still in use!\n", count);
6384 }
6385 EXPORT_SYMBOL(free_contig_range);
6386
6387 /*
6388 * Effectively disable pcplists for the zone by setting the high limit to 0
6389 * and draining all cpus. A concurrent page freeing on another CPU that's about
6390 * to put the page on pcplist will either finish before the drain and the page
6391 * will be drained, or observe the new high limit and skip the pcplist.
6392 *
6393 * Must be paired with a call to zone_pcp_enable().
6394 */
zone_pcp_disable(struct zone *zone)6395 void zone_pcp_disable(struct zone *zone)
6396 {
6397 mutex_lock(&pcp_batch_high_lock);
6398 __zone_set_pageset_high_and_batch(zone, 0, 1);
6399 __drain_all_pages(zone, true);
6400 }
6401
zone_pcp_enable(struct zone *zone)6402 void zone_pcp_enable(struct zone *zone)
6403 {
6404 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch);
6405 mutex_unlock(&pcp_batch_high_lock);
6406 }
6407
zone_pcp_reset(struct zone *zone)6408 void zone_pcp_reset(struct zone *zone)
6409 {
6410 int cpu;
6411 struct per_cpu_zonestat *pzstats;
6412
6413 if (zone->per_cpu_pageset != &boot_pageset) {
6414 for_each_online_cpu(cpu) {
6415 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
6416 drain_zonestat(zone, pzstats);
6417 }
6418 free_percpu(zone->per_cpu_pageset);
6419 zone->per_cpu_pageset = &boot_pageset;
6420 if (zone->per_cpu_zonestats != &boot_zonestats) {
6421 free_percpu(zone->per_cpu_zonestats);
6422 zone->per_cpu_zonestats = &boot_zonestats;
6423 }
6424 }
6425 }
6426
6427 #ifdef CONFIG_MEMORY_HOTREMOVE
6428 /*
6429 * All pages in the range must be in a single zone, must not contain holes,
6430 * must span full sections, and must be isolated before calling this function.
6431 */
__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)6432 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
6433 {
6434 unsigned long pfn = start_pfn;
6435 struct page *page;
6436 struct zone *zone;
6437 unsigned int order;
6438 unsigned long flags;
6439
6440 offline_mem_sections(pfn, end_pfn);
6441 zone = page_zone(pfn_to_page(pfn));
6442 spin_lock_irqsave(&zone->lock, flags);
6443 while (pfn < end_pfn) {
6444 page = pfn_to_page(pfn);
6445 /*
6446 * The HWPoisoned page may be not in buddy system, and
6447 * page_count() is not 0.
6448 */
6449 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
6450 pfn++;
6451 continue;
6452 }
6453 /*
6454 * At this point all remaining PageOffline() pages have a
6455 * reference count of 0 and can simply be skipped.
6456 */
6457 if (PageOffline(page)) {
6458 BUG_ON(page_count(page));
6459 BUG_ON(PageBuddy(page));
6460 pfn++;
6461 continue;
6462 }
6463
6464 BUG_ON(page_count(page));
6465 BUG_ON(!PageBuddy(page));
6466 order = buddy_order(page);
6467 del_page_from_free_list(page, zone, order);
6468 pfn += (1 << order);
6469 }
6470 spin_unlock_irqrestore(&zone->lock, flags);
6471 }
6472 #endif
6473
6474 /*
6475 * This function returns a stable result only if called under zone lock.
6476 */
is_free_buddy_page(struct page *page)6477 bool is_free_buddy_page(struct page *page)
6478 {
6479 unsigned long pfn = page_to_pfn(page);
6480 unsigned int order;
6481
6482 for (order = 0; order <= MAX_ORDER; order++) {
6483 struct page *page_head = page - (pfn & ((1 << order) - 1));
6484
6485 if (PageBuddy(page_head) &&
6486 buddy_order_unsafe(page_head) >= order)
6487 break;
6488 }
6489
6490 return order <= MAX_ORDER;
6491 }
6492 EXPORT_SYMBOL(is_free_buddy_page);
6493
6494 #ifdef CONFIG_MEMORY_FAILURE
6495 /*
6496 * Break down a higher-order page in sub-pages, and keep our target out of
6497 * buddy allocator.
6498 */
break_down_buddy_pages(struct zone *zone, struct page *page, struct page *target, int low, int high, int migratetype)6499 static void break_down_buddy_pages(struct zone *zone, struct page *page,
6500 struct page *target, int low, int high,
6501 int migratetype)
6502 {
6503 unsigned long size = 1 << high;
6504 struct page *current_buddy, *next_page;
6505
6506 while (high > low) {
6507 high--;
6508 size >>= 1;
6509
6510 if (target >= &page[size]) {
6511 next_page = page + size;
6512 current_buddy = page;
6513 } else {
6514 next_page = page;
6515 current_buddy = page + size;
6516 }
6517 page = next_page;
6518
6519 if (set_page_guard(zone, current_buddy, high, migratetype))
6520 continue;
6521
6522 if (current_buddy != target) {
6523 add_to_free_list(current_buddy, zone, high, migratetype);
6524 set_buddy_order(current_buddy, high);
6525 }
6526 }
6527 }
6528
6529 /*
6530 * Take a page that will be marked as poisoned off the buddy allocator.
6531 */
take_page_off_buddy(struct page *page)6532 bool take_page_off_buddy(struct page *page)
6533 {
6534 struct zone *zone = page_zone(page);
6535 unsigned long pfn = page_to_pfn(page);
6536 unsigned long flags;
6537 unsigned int order;
6538 bool ret = false;
6539
6540 spin_lock_irqsave(&zone->lock, flags);
6541 for (order = 0; order <= MAX_ORDER; order++) {
6542 struct page *page_head = page - (pfn & ((1 << order) - 1));
6543 int page_order = buddy_order(page_head);
6544
6545 if (PageBuddy(page_head) && page_order >= order) {
6546 unsigned long pfn_head = page_to_pfn(page_head);
6547 int migratetype = get_pfnblock_migratetype(page_head,
6548 pfn_head);
6549
6550 del_page_from_free_list(page_head, zone, page_order);
6551 break_down_buddy_pages(zone, page_head, page, 0,
6552 page_order, migratetype);
6553 SetPageHWPoisonTakenOff(page);
6554 if (!is_migrate_isolate(migratetype))
6555 __mod_zone_freepage_state(zone, -1, migratetype);
6556 ret = true;
6557 break;
6558 }
6559 if (page_count(page_head) > 0)
6560 break;
6561 }
6562 spin_unlock_irqrestore(&zone->lock, flags);
6563 return ret;
6564 }
6565
6566 /*
6567 * Cancel takeoff done by take_page_off_buddy().
6568 */
put_page_back_buddy(struct page *page)6569 bool put_page_back_buddy(struct page *page)
6570 {
6571 struct zone *zone = page_zone(page);
6572 unsigned long pfn = page_to_pfn(page);
6573 unsigned long flags;
6574 int migratetype = get_pfnblock_migratetype(page, pfn);
6575 bool ret = false;
6576
6577 spin_lock_irqsave(&zone->lock, flags);
6578 if (put_page_testzero(page)) {
6579 ClearPageHWPoisonTakenOff(page);
6580 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE);
6581 if (TestClearPageHWPoison(page)) {
6582 ret = true;
6583 }
6584 }
6585 spin_unlock_irqrestore(&zone->lock, flags);
6586
6587 return ret;
6588 }
6589 #endif
6590
6591 #ifdef CONFIG_ZONE_DMA
has_managed_dma(void)6592 bool has_managed_dma(void)
6593 {
6594 struct pglist_data *pgdat;
6595
6596 for_each_online_pgdat(pgdat) {
6597 struct zone *zone = &pgdat->node_zones[ZONE_DMA];
6598
6599 if (managed_zone(zone))
6600 return true;
6601 }
6602 return false;
6603 }
6604 #endif /* CONFIG_ZONE_DMA */
6605
6606 #ifdef CONFIG_UNACCEPTED_MEMORY
6607
6608 /* Counts number of zones with unaccepted pages. */
6609 static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages);
6610
6611 static bool lazy_accept = true;
6612
accept_memory_parse(char *p)6613 static int __init accept_memory_parse(char *p)
6614 {
6615 if (!strcmp(p, "lazy")) {
6616 lazy_accept = true;
6617 return 0;
6618 } else if (!strcmp(p, "eager")) {
6619 lazy_accept = false;
6620 return 0;
6621 } else {
6622 return -EINVAL;
6623 }
6624 }
6625 early_param("accept_memory", accept_memory_parse);
6626
page_contains_unaccepted(struct page *page, unsigned int order)6627 static bool page_contains_unaccepted(struct page *page, unsigned int order)
6628 {
6629 phys_addr_t start = page_to_phys(page);
6630 phys_addr_t end = start + (PAGE_SIZE << order);
6631
6632 return range_contains_unaccepted_memory(start, end);
6633 }
6634
accept_page(struct page *page, unsigned int order)6635 static void accept_page(struct page *page, unsigned int order)
6636 {
6637 phys_addr_t start = page_to_phys(page);
6638
6639 accept_memory(start, start + (PAGE_SIZE << order));
6640 }
6641
try_to_accept_memory_one(struct zone *zone)6642 static bool try_to_accept_memory_one(struct zone *zone)
6643 {
6644 unsigned long flags;
6645 struct page *page;
6646 bool last;
6647
6648 if (list_empty(&zone->unaccepted_pages))
6649 return false;
6650
6651 spin_lock_irqsave(&zone->lock, flags);
6652 page = list_first_entry_or_null(&zone->unaccepted_pages,
6653 struct page, lru);
6654 if (!page) {
6655 spin_unlock_irqrestore(&zone->lock, flags);
6656 return false;
6657 }
6658
6659 list_del(&page->lru);
6660 last = list_empty(&zone->unaccepted_pages);
6661
6662 __mod_zone_freepage_state(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
6663 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
6664 spin_unlock_irqrestore(&zone->lock, flags);
6665
6666 accept_page(page, MAX_ORDER);
6667
6668 __free_pages_ok(page, MAX_ORDER, FPI_TO_TAIL);
6669
6670 if (last)
6671 static_branch_dec(&zones_with_unaccepted_pages);
6672
6673 return true;
6674 }
6675
try_to_accept_memory(struct zone *zone, unsigned int order)6676 static bool try_to_accept_memory(struct zone *zone, unsigned int order)
6677 {
6678 long to_accept;
6679 int ret = false;
6680
6681 /* How much to accept to get to high watermark? */
6682 to_accept = high_wmark_pages(zone) -
6683 (zone_page_state(zone, NR_FREE_PAGES) -
6684 __zone_watermark_unusable_free(zone, order, 0));
6685
6686 /* Accept at least one page */
6687 do {
6688 if (!try_to_accept_memory_one(zone))
6689 break;
6690 ret = true;
6691 to_accept -= MAX_ORDER_NR_PAGES;
6692 } while (to_accept > 0);
6693
6694 return ret;
6695 }
6696
has_unaccepted_memory(void)6697 static inline bool has_unaccepted_memory(void)
6698 {
6699 return static_branch_unlikely(&zones_with_unaccepted_pages);
6700 }
6701
__free_unaccepted(struct page *page)6702 static bool __free_unaccepted(struct page *page)
6703 {
6704 struct zone *zone = page_zone(page);
6705 unsigned long flags;
6706 bool first = false;
6707
6708 if (!lazy_accept)
6709 return false;
6710
6711 spin_lock_irqsave(&zone->lock, flags);
6712 first = list_empty(&zone->unaccepted_pages);
6713 list_add_tail(&page->lru, &zone->unaccepted_pages);
6714 __mod_zone_freepage_state(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
6715 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
6716 spin_unlock_irqrestore(&zone->lock, flags);
6717
6718 if (first)
6719 static_branch_inc(&zones_with_unaccepted_pages);
6720
6721 return true;
6722 }
6723
6724 #else
6725
page_contains_unaccepted(struct page *page, unsigned int order)6726 static bool page_contains_unaccepted(struct page *page, unsigned int order)
6727 {
6728 return false;
6729 }
6730
accept_page(struct page *page, unsigned int order)6731 static void accept_page(struct page *page, unsigned int order)
6732 {
6733 }
6734
try_to_accept_memory(struct zone *zone, unsigned int order)6735 static bool try_to_accept_memory(struct zone *zone, unsigned int order)
6736 {
6737 return false;
6738 }
6739
has_unaccepted_memory(void)6740 static inline bool has_unaccepted_memory(void)
6741 {
6742 return false;
6743 }
6744
__free_unaccepted(struct page *page)6745 static bool __free_unaccepted(struct page *page)
6746 {
6747 BUILD_BUG();
6748 return false;
6749 }
6750
6751 #endif /* CONFIG_UNACCEPTED_MEMORY */
6752