xref: /kernel/linux/linux-6.6/mm/util.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/string.h>
5#include <linux/compiler.h>
6#include <linux/export.h>
7#include <linux/err.h>
8#include <linux/sched.h>
9#include <linux/sched/mm.h>
10#include <linux/sched/signal.h>
11#include <linux/sched/task_stack.h>
12#include <linux/security.h>
13#include <linux/swap.h>
14#include <linux/swapops.h>
15#include <linux/mman.h>
16#include <linux/hugetlb.h>
17#include <linux/vmalloc.h>
18#include <linux/userfaultfd_k.h>
19#include <linux/elf.h>
20#include <linux/elf-randomize.h>
21#include <linux/personality.h>
22#include <linux/random.h>
23#include <linux/processor.h>
24#include <linux/sizes.h>
25#include <linux/compat.h>
26
27#include <linux/uaccess.h>
28
29#include "internal.h"
30#include "swap.h"
31
32/**
33 * kfree_const - conditionally free memory
34 * @x: pointer to the memory
35 *
36 * Function calls kfree only if @x is not in .rodata section.
37 */
38void kfree_const(const void *x)
39{
40	if (!is_kernel_rodata((unsigned long)x))
41		kfree(x);
42}
43EXPORT_SYMBOL(kfree_const);
44
45/**
46 * kstrdup - allocate space for and copy an existing string
47 * @s: the string to duplicate
48 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
49 *
50 * Return: newly allocated copy of @s or %NULL in case of error
51 */
52noinline
53char *kstrdup(const char *s, gfp_t gfp)
54{
55	size_t len;
56	char *buf;
57
58	if (!s)
59		return NULL;
60
61	len = strlen(s) + 1;
62	buf = kmalloc_track_caller(len, gfp);
63	if (buf)
64		memcpy(buf, s, len);
65	return buf;
66}
67EXPORT_SYMBOL(kstrdup);
68
69/**
70 * kstrdup_const - conditionally duplicate an existing const string
71 * @s: the string to duplicate
72 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
73 *
74 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
75 * must not be passed to krealloc().
76 *
77 * Return: source string if it is in .rodata section otherwise
78 * fallback to kstrdup.
79 */
80const char *kstrdup_const(const char *s, gfp_t gfp)
81{
82	if (is_kernel_rodata((unsigned long)s))
83		return s;
84
85	return kstrdup(s, gfp);
86}
87EXPORT_SYMBOL(kstrdup_const);
88
89/**
90 * kstrndup - allocate space for and copy an existing string
91 * @s: the string to duplicate
92 * @max: read at most @max chars from @s
93 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
94 *
95 * Note: Use kmemdup_nul() instead if the size is known exactly.
96 *
97 * Return: newly allocated copy of @s or %NULL in case of error
98 */
99char *kstrndup(const char *s, size_t max, gfp_t gfp)
100{
101	size_t len;
102	char *buf;
103
104	if (!s)
105		return NULL;
106
107	len = strnlen(s, max);
108	buf = kmalloc_track_caller(len+1, gfp);
109	if (buf) {
110		memcpy(buf, s, len);
111		buf[len] = '\0';
112	}
113	return buf;
114}
115EXPORT_SYMBOL(kstrndup);
116
117/**
118 * kmemdup - duplicate region of memory
119 *
120 * @src: memory region to duplicate
121 * @len: memory region length
122 * @gfp: GFP mask to use
123 *
124 * Return: newly allocated copy of @src or %NULL in case of error,
125 * result is physically contiguous. Use kfree() to free.
126 */
127void *kmemdup(const void *src, size_t len, gfp_t gfp)
128{
129	void *p;
130
131	p = kmalloc_track_caller(len, gfp);
132	if (p)
133		memcpy(p, src, len);
134	return p;
135}
136EXPORT_SYMBOL(kmemdup);
137
138/**
139 * kvmemdup - duplicate region of memory
140 *
141 * @src: memory region to duplicate
142 * @len: memory region length
143 * @gfp: GFP mask to use
144 *
145 * Return: newly allocated copy of @src or %NULL in case of error,
146 * result may be not physically contiguous. Use kvfree() to free.
147 */
148void *kvmemdup(const void *src, size_t len, gfp_t gfp)
149{
150	void *p;
151
152	p = kvmalloc(len, gfp);
153	if (p)
154		memcpy(p, src, len);
155	return p;
156}
157EXPORT_SYMBOL(kvmemdup);
158
159/**
160 * kmemdup_nul - Create a NUL-terminated string from unterminated data
161 * @s: The data to stringify
162 * @len: The size of the data
163 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
164 *
165 * Return: newly allocated copy of @s with NUL-termination or %NULL in
166 * case of error
167 */
168char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
169{
170	char *buf;
171
172	if (!s)
173		return NULL;
174
175	buf = kmalloc_track_caller(len + 1, gfp);
176	if (buf) {
177		memcpy(buf, s, len);
178		buf[len] = '\0';
179	}
180	return buf;
181}
182EXPORT_SYMBOL(kmemdup_nul);
183
184/**
185 * memdup_user - duplicate memory region from user space
186 *
187 * @src: source address in user space
188 * @len: number of bytes to copy
189 *
190 * Return: an ERR_PTR() on failure.  Result is physically
191 * contiguous, to be freed by kfree().
192 */
193void *memdup_user(const void __user *src, size_t len)
194{
195	void *p;
196
197	p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
198	if (!p)
199		return ERR_PTR(-ENOMEM);
200
201	if (copy_from_user(p, src, len)) {
202		kfree(p);
203		return ERR_PTR(-EFAULT);
204	}
205
206	return p;
207}
208EXPORT_SYMBOL(memdup_user);
209
210/**
211 * vmemdup_user - duplicate memory region from user space
212 *
213 * @src: source address in user space
214 * @len: number of bytes to copy
215 *
216 * Return: an ERR_PTR() on failure.  Result may be not
217 * physically contiguous.  Use kvfree() to free.
218 */
219void *vmemdup_user(const void __user *src, size_t len)
220{
221	void *p;
222
223	p = kvmalloc(len, GFP_USER);
224	if (!p)
225		return ERR_PTR(-ENOMEM);
226
227	if (copy_from_user(p, src, len)) {
228		kvfree(p);
229		return ERR_PTR(-EFAULT);
230	}
231
232	return p;
233}
234EXPORT_SYMBOL(vmemdup_user);
235
236/**
237 * strndup_user - duplicate an existing string from user space
238 * @s: The string to duplicate
239 * @n: Maximum number of bytes to copy, including the trailing NUL.
240 *
241 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
242 */
243char *strndup_user(const char __user *s, long n)
244{
245	char *p;
246	long length;
247
248	length = strnlen_user(s, n);
249
250	if (!length)
251		return ERR_PTR(-EFAULT);
252
253	if (length > n)
254		return ERR_PTR(-EINVAL);
255
256	p = memdup_user(s, length);
257
258	if (IS_ERR(p))
259		return p;
260
261	p[length - 1] = '\0';
262
263	return p;
264}
265EXPORT_SYMBOL(strndup_user);
266
267/**
268 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
269 *
270 * @src: source address in user space
271 * @len: number of bytes to copy
272 *
273 * Return: an ERR_PTR() on failure.
274 */
275void *memdup_user_nul(const void __user *src, size_t len)
276{
277	char *p;
278
279	/*
280	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
281	 * cause pagefault, which makes it pointless to use GFP_NOFS
282	 * or GFP_ATOMIC.
283	 */
284	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
285	if (!p)
286		return ERR_PTR(-ENOMEM);
287
288	if (copy_from_user(p, src, len)) {
289		kfree(p);
290		return ERR_PTR(-EFAULT);
291	}
292	p[len] = '\0';
293
294	return p;
295}
296EXPORT_SYMBOL(memdup_user_nul);
297
298/* Check if the vma is being used as a stack by this task */
299int vma_is_stack_for_current(struct vm_area_struct *vma)
300{
301	struct task_struct * __maybe_unused t = current;
302
303	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
304}
305
306/*
307 * Change backing file, only valid to use during initial VMA setup.
308 */
309void vma_set_file(struct vm_area_struct *vma, struct file *file)
310{
311	/* Changing an anonymous vma with this is illegal */
312	get_file(file);
313	swap(vma->vm_file, file);
314	fput(file);
315}
316EXPORT_SYMBOL(vma_set_file);
317
318#ifndef STACK_RND_MASK
319#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
320#endif
321
322unsigned long randomize_stack_top(unsigned long stack_top)
323{
324	unsigned long random_variable = 0;
325
326	if (current->flags & PF_RANDOMIZE) {
327		random_variable = get_random_long();
328		random_variable &= STACK_RND_MASK;
329		random_variable <<= PAGE_SHIFT;
330	}
331#ifdef CONFIG_STACK_GROWSUP
332	return PAGE_ALIGN(stack_top) + random_variable;
333#else
334	return PAGE_ALIGN(stack_top) - random_variable;
335#endif
336}
337
338/**
339 * randomize_page - Generate a random, page aligned address
340 * @start:	The smallest acceptable address the caller will take.
341 * @range:	The size of the area, starting at @start, within which the
342 *		random address must fall.
343 *
344 * If @start + @range would overflow, @range is capped.
345 *
346 * NOTE: Historical use of randomize_range, which this replaces, presumed that
347 * @start was already page aligned.  We now align it regardless.
348 *
349 * Return: A page aligned address within [start, start + range).  On error,
350 * @start is returned.
351 */
352unsigned long randomize_page(unsigned long start, unsigned long range)
353{
354	if (!PAGE_ALIGNED(start)) {
355		range -= PAGE_ALIGN(start) - start;
356		start = PAGE_ALIGN(start);
357	}
358
359	if (start > ULONG_MAX - range)
360		range = ULONG_MAX - start;
361
362	range >>= PAGE_SHIFT;
363
364	if (range == 0)
365		return start;
366
367	return start + (get_random_long() % range << PAGE_SHIFT);
368}
369
370#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
371unsigned long __weak arch_randomize_brk(struct mm_struct *mm)
372{
373	/* Is the current task 32bit ? */
374	if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
375		return randomize_page(mm->brk, SZ_32M);
376
377	return randomize_page(mm->brk, SZ_1G);
378}
379
380unsigned long arch_mmap_rnd(void)
381{
382	unsigned long rnd;
383
384#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
385	if (is_compat_task())
386		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
387	else
388#endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
389		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
390
391	return rnd << PAGE_SHIFT;
392}
393
394static int mmap_is_legacy(struct rlimit *rlim_stack)
395{
396	if (current->personality & ADDR_COMPAT_LAYOUT)
397		return 1;
398
399	/* On parisc the stack always grows up - so a unlimited stack should
400	 * not be an indicator to use the legacy memory layout. */
401	if (rlim_stack->rlim_cur == RLIM_INFINITY &&
402		!IS_ENABLED(CONFIG_STACK_GROWSUP))
403		return 1;
404
405	return sysctl_legacy_va_layout;
406}
407
408/*
409 * Leave enough space between the mmap area and the stack to honour ulimit in
410 * the face of randomisation.
411 */
412#define MIN_GAP		(SZ_128M)
413#define MAX_GAP		(STACK_TOP / 6 * 5)
414
415static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
416{
417#ifdef CONFIG_STACK_GROWSUP
418	/*
419	 * For an upwards growing stack the calculation is much simpler.
420	 * Memory for the maximum stack size is reserved at the top of the
421	 * task. mmap_base starts directly below the stack and grows
422	 * downwards.
423	 */
424	return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd);
425#else
426	unsigned long gap = rlim_stack->rlim_cur;
427	unsigned long pad = stack_guard_gap;
428
429	/* Account for stack randomization if necessary */
430	if (current->flags & PF_RANDOMIZE)
431		pad += (STACK_RND_MASK << PAGE_SHIFT);
432
433	/* Values close to RLIM_INFINITY can overflow. */
434	if (gap + pad > gap)
435		gap += pad;
436
437	if (gap < MIN_GAP)
438		gap = MIN_GAP;
439	else if (gap > MAX_GAP)
440		gap = MAX_GAP;
441
442	return PAGE_ALIGN(STACK_TOP - gap - rnd);
443#endif
444}
445
446void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
447{
448	unsigned long random_factor = 0UL;
449
450	if (current->flags & PF_RANDOMIZE)
451		random_factor = arch_mmap_rnd();
452
453	if (mmap_is_legacy(rlim_stack)) {
454		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
455		mm->get_unmapped_area = arch_get_unmapped_area;
456	} else {
457		mm->mmap_base = mmap_base(random_factor, rlim_stack);
458		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
459	}
460}
461#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
462void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
463{
464	mm->mmap_base = TASK_UNMAPPED_BASE;
465	mm->get_unmapped_area = arch_get_unmapped_area;
466}
467#endif
468
469/**
470 * __account_locked_vm - account locked pages to an mm's locked_vm
471 * @mm:          mm to account against
472 * @pages:       number of pages to account
473 * @inc:         %true if @pages should be considered positive, %false if not
474 * @task:        task used to check RLIMIT_MEMLOCK
475 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
476 *
477 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
478 * that mmap_lock is held as writer.
479 *
480 * Return:
481 * * 0       on success
482 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
483 */
484int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
485			struct task_struct *task, bool bypass_rlim)
486{
487	unsigned long locked_vm, limit;
488	int ret = 0;
489
490	mmap_assert_write_locked(mm);
491
492	locked_vm = mm->locked_vm;
493	if (inc) {
494		if (!bypass_rlim) {
495			limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
496			if (locked_vm + pages > limit)
497				ret = -ENOMEM;
498		}
499		if (!ret)
500			mm->locked_vm = locked_vm + pages;
501	} else {
502		WARN_ON_ONCE(pages > locked_vm);
503		mm->locked_vm = locked_vm - pages;
504	}
505
506	pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
507		 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
508		 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
509		 ret ? " - exceeded" : "");
510
511	return ret;
512}
513EXPORT_SYMBOL_GPL(__account_locked_vm);
514
515/**
516 * account_locked_vm - account locked pages to an mm's locked_vm
517 * @mm:          mm to account against, may be NULL
518 * @pages:       number of pages to account
519 * @inc:         %true if @pages should be considered positive, %false if not
520 *
521 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
522 *
523 * Return:
524 * * 0       on success, or if mm is NULL
525 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
526 */
527int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
528{
529	int ret;
530
531	if (pages == 0 || !mm)
532		return 0;
533
534	mmap_write_lock(mm);
535	ret = __account_locked_vm(mm, pages, inc, current,
536				  capable(CAP_IPC_LOCK));
537	mmap_write_unlock(mm);
538
539	return ret;
540}
541EXPORT_SYMBOL_GPL(account_locked_vm);
542
543unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
544	unsigned long len, unsigned long prot,
545	unsigned long flag, unsigned long pgoff)
546{
547	unsigned long ret;
548	struct mm_struct *mm = current->mm;
549	unsigned long populate;
550	LIST_HEAD(uf);
551
552	ret = security_mmap_file(file, prot, flag);
553	if (!ret) {
554		if (mmap_write_lock_killable(mm))
555			return -EINTR;
556		ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate,
557			      &uf);
558		mmap_write_unlock(mm);
559		userfaultfd_unmap_complete(mm, &uf);
560		if (populate)
561			mm_populate(ret, populate);
562	}
563	return ret;
564}
565
566unsigned long vm_mmap(struct file *file, unsigned long addr,
567	unsigned long len, unsigned long prot,
568	unsigned long flag, unsigned long offset)
569{
570	if (unlikely(offset + PAGE_ALIGN(len) < offset))
571		return -EINVAL;
572	if (unlikely(offset_in_page(offset)))
573		return -EINVAL;
574
575	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
576}
577EXPORT_SYMBOL(vm_mmap);
578
579/**
580 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
581 * failure, fall back to non-contiguous (vmalloc) allocation.
582 * @size: size of the request.
583 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
584 * @node: numa node to allocate from
585 *
586 * Uses kmalloc to get the memory but if the allocation fails then falls back
587 * to the vmalloc allocator. Use kvfree for freeing the memory.
588 *
589 * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
590 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
591 * preferable to the vmalloc fallback, due to visible performance drawbacks.
592 *
593 * Return: pointer to the allocated memory of %NULL in case of failure
594 */
595void *kvmalloc_node(size_t size, gfp_t flags, int node)
596{
597	gfp_t kmalloc_flags = flags;
598	void *ret;
599
600	/*
601	 * We want to attempt a large physically contiguous block first because
602	 * it is less likely to fragment multiple larger blocks and therefore
603	 * contribute to a long term fragmentation less than vmalloc fallback.
604	 * However make sure that larger requests are not too disruptive - no
605	 * OOM killer and no allocation failure warnings as we have a fallback.
606	 */
607	if (size > PAGE_SIZE) {
608		kmalloc_flags |= __GFP_NOWARN;
609
610		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
611			kmalloc_flags |= __GFP_NORETRY;
612
613		/* nofail semantic is implemented by the vmalloc fallback */
614		kmalloc_flags &= ~__GFP_NOFAIL;
615	}
616
617	ret = kmalloc_node(size, kmalloc_flags, node);
618
619	/*
620	 * It doesn't really make sense to fallback to vmalloc for sub page
621	 * requests
622	 */
623	if (ret || size <= PAGE_SIZE)
624		return ret;
625
626	/* non-sleeping allocations are not supported by vmalloc */
627	if (!gfpflags_allow_blocking(flags))
628		return NULL;
629
630	/* Don't even allow crazy sizes */
631	if (unlikely(size > INT_MAX)) {
632		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
633		return NULL;
634	}
635
636	/*
637	 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
638	 * since the callers already cannot assume anything
639	 * about the resulting pointer, and cannot play
640	 * protection games.
641	 */
642	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
643			flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
644			node, __builtin_return_address(0));
645}
646EXPORT_SYMBOL(kvmalloc_node);
647
648/**
649 * kvfree() - Free memory.
650 * @addr: Pointer to allocated memory.
651 *
652 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
653 * It is slightly more efficient to use kfree() or vfree() if you are certain
654 * that you know which one to use.
655 *
656 * Context: Either preemptible task context or not-NMI interrupt.
657 */
658void kvfree(const void *addr)
659{
660	if (is_vmalloc_addr(addr))
661		vfree(addr);
662	else
663		kfree(addr);
664}
665EXPORT_SYMBOL(kvfree);
666
667/**
668 * kvfree_sensitive - Free a data object containing sensitive information.
669 * @addr: address of the data object to be freed.
670 * @len: length of the data object.
671 *
672 * Use the special memzero_explicit() function to clear the content of a
673 * kvmalloc'ed object containing sensitive data to make sure that the
674 * compiler won't optimize out the data clearing.
675 */
676void kvfree_sensitive(const void *addr, size_t len)
677{
678	if (likely(!ZERO_OR_NULL_PTR(addr))) {
679		memzero_explicit((void *)addr, len);
680		kvfree(addr);
681	}
682}
683EXPORT_SYMBOL(kvfree_sensitive);
684
685void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
686{
687	void *newp;
688
689	if (oldsize >= newsize)
690		return (void *)p;
691	newp = kvmalloc(newsize, flags);
692	if (!newp)
693		return NULL;
694	memcpy(newp, p, oldsize);
695	kvfree(p);
696	return newp;
697}
698EXPORT_SYMBOL(kvrealloc);
699
700/**
701 * __vmalloc_array - allocate memory for a virtually contiguous array.
702 * @n: number of elements.
703 * @size: element size.
704 * @flags: the type of memory to allocate (see kmalloc).
705 */
706void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
707{
708	size_t bytes;
709
710	if (unlikely(check_mul_overflow(n, size, &bytes)))
711		return NULL;
712	return __vmalloc(bytes, flags);
713}
714EXPORT_SYMBOL(__vmalloc_array);
715
716/**
717 * vmalloc_array - allocate memory for a virtually contiguous array.
718 * @n: number of elements.
719 * @size: element size.
720 */
721void *vmalloc_array(size_t n, size_t size)
722{
723	return __vmalloc_array(n, size, GFP_KERNEL);
724}
725EXPORT_SYMBOL(vmalloc_array);
726
727/**
728 * __vcalloc - allocate and zero memory for a virtually contiguous array.
729 * @n: number of elements.
730 * @size: element size.
731 * @flags: the type of memory to allocate (see kmalloc).
732 */
733void *__vcalloc(size_t n, size_t size, gfp_t flags)
734{
735	return __vmalloc_array(n, size, flags | __GFP_ZERO);
736}
737EXPORT_SYMBOL(__vcalloc);
738
739/**
740 * vcalloc - allocate and zero memory for a virtually contiguous array.
741 * @n: number of elements.
742 * @size: element size.
743 */
744void *vcalloc(size_t n, size_t size)
745{
746	return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO);
747}
748EXPORT_SYMBOL(vcalloc);
749
750struct anon_vma *folio_anon_vma(struct folio *folio)
751{
752	unsigned long mapping = (unsigned long)folio->mapping;
753
754	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
755		return NULL;
756	return (void *)(mapping - PAGE_MAPPING_ANON);
757}
758
759/**
760 * folio_mapping - Find the mapping where this folio is stored.
761 * @folio: The folio.
762 *
763 * For folios which are in the page cache, return the mapping that this
764 * page belongs to.  Folios in the swap cache return the swap mapping
765 * this page is stored in (which is different from the mapping for the
766 * swap file or swap device where the data is stored).
767 *
768 * You can call this for folios which aren't in the swap cache or page
769 * cache and it will return NULL.
770 */
771struct address_space *folio_mapping(struct folio *folio)
772{
773	struct address_space *mapping;
774
775	/* This happens if someone calls flush_dcache_page on slab page */
776	if (unlikely(folio_test_slab(folio)))
777		return NULL;
778
779	if (unlikely(folio_test_swapcache(folio)))
780		return swap_address_space(folio->swap);
781
782	mapping = folio->mapping;
783	if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
784		return NULL;
785
786	return mapping;
787}
788EXPORT_SYMBOL(folio_mapping);
789
790/**
791 * folio_copy - Copy the contents of one folio to another.
792 * @dst: Folio to copy to.
793 * @src: Folio to copy from.
794 *
795 * The bytes in the folio represented by @src are copied to @dst.
796 * Assumes the caller has validated that @dst is at least as large as @src.
797 * Can be called in atomic context for order-0 folios, but if the folio is
798 * larger, it may sleep.
799 */
800void folio_copy(struct folio *dst, struct folio *src)
801{
802	long i = 0;
803	long nr = folio_nr_pages(src);
804
805	for (;;) {
806		copy_highpage(folio_page(dst, i), folio_page(src, i));
807		if (++i == nr)
808			break;
809		cond_resched();
810	}
811}
812
813int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
814int sysctl_overcommit_ratio __read_mostly = 50;
815unsigned long sysctl_overcommit_kbytes __read_mostly;
816int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
817unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
818unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
819
820int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
821		size_t *lenp, loff_t *ppos)
822{
823	int ret;
824
825	ret = proc_dointvec(table, write, buffer, lenp, ppos);
826	if (ret == 0 && write)
827		sysctl_overcommit_kbytes = 0;
828	return ret;
829}
830
831static void sync_overcommit_as(struct work_struct *dummy)
832{
833	percpu_counter_sync(&vm_committed_as);
834}
835
836int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
837		size_t *lenp, loff_t *ppos)
838{
839	struct ctl_table t;
840	int new_policy = -1;
841	int ret;
842
843	/*
844	 * The deviation of sync_overcommit_as could be big with loose policy
845	 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
846	 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
847	 * with the strict "NEVER", and to avoid possible race condition (even
848	 * though user usually won't too frequently do the switching to policy
849	 * OVERCOMMIT_NEVER), the switch is done in the following order:
850	 *	1. changing the batch
851	 *	2. sync percpu count on each CPU
852	 *	3. switch the policy
853	 */
854	if (write) {
855		t = *table;
856		t.data = &new_policy;
857		ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
858		if (ret || new_policy == -1)
859			return ret;
860
861		mm_compute_batch(new_policy);
862		if (new_policy == OVERCOMMIT_NEVER)
863			schedule_on_each_cpu(sync_overcommit_as);
864		sysctl_overcommit_memory = new_policy;
865	} else {
866		ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
867	}
868
869	return ret;
870}
871
872int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
873		size_t *lenp, loff_t *ppos)
874{
875	int ret;
876
877	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
878	if (ret == 0 && write)
879		sysctl_overcommit_ratio = 0;
880	return ret;
881}
882
883/*
884 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
885 */
886unsigned long vm_commit_limit(void)
887{
888	unsigned long allowed;
889
890	if (sysctl_overcommit_kbytes)
891		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
892	else
893		allowed = ((totalram_pages() - hugetlb_total_pages())
894			   * sysctl_overcommit_ratio / 100);
895	allowed += total_swap_pages;
896
897	return allowed;
898}
899
900/*
901 * Make sure vm_committed_as in one cacheline and not cacheline shared with
902 * other variables. It can be updated by several CPUs frequently.
903 */
904struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
905
906/*
907 * The global memory commitment made in the system can be a metric
908 * that can be used to drive ballooning decisions when Linux is hosted
909 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
910 * balancing memory across competing virtual machines that are hosted.
911 * Several metrics drive this policy engine including the guest reported
912 * memory commitment.
913 *
914 * The time cost of this is very low for small platforms, and for big
915 * platform like a 2S/36C/72T Skylake server, in worst case where
916 * vm_committed_as's spinlock is under severe contention, the time cost
917 * could be about 30~40 microseconds.
918 */
919unsigned long vm_memory_committed(void)
920{
921	return percpu_counter_sum_positive(&vm_committed_as);
922}
923EXPORT_SYMBOL_GPL(vm_memory_committed);
924
925/*
926 * Check that a process has enough memory to allocate a new virtual
927 * mapping. 0 means there is enough memory for the allocation to
928 * succeed and -ENOMEM implies there is not.
929 *
930 * We currently support three overcommit policies, which are set via the
931 * vm.overcommit_memory sysctl.  See Documentation/mm/overcommit-accounting.rst
932 *
933 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
934 * Additional code 2002 Jul 20 by Robert Love.
935 *
936 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
937 *
938 * Note this is a helper function intended to be used by LSMs which
939 * wish to use this logic.
940 */
941int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
942{
943	long allowed;
944
945	vm_acct_memory(pages);
946
947	/*
948	 * Sometimes we want to use more memory than we have
949	 */
950	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
951		return 0;
952
953	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
954		if (pages > totalram_pages() + total_swap_pages)
955			goto error;
956		return 0;
957	}
958
959	allowed = vm_commit_limit();
960	/*
961	 * Reserve some for root
962	 */
963	if (!cap_sys_admin)
964		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
965
966	/*
967	 * Don't let a single process grow so big a user can't recover
968	 */
969	if (mm) {
970		long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
971
972		allowed -= min_t(long, mm->total_vm / 32, reserve);
973	}
974
975	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
976		return 0;
977error:
978	pr_warn_ratelimited("%s: pid: %d, comm: %s, not enough memory for the allocation\n",
979			    __func__, current->pid, current->comm);
980	vm_unacct_memory(pages);
981
982	return -ENOMEM;
983}
984
985/**
986 * get_cmdline() - copy the cmdline value to a buffer.
987 * @task:     the task whose cmdline value to copy.
988 * @buffer:   the buffer to copy to.
989 * @buflen:   the length of the buffer. Larger cmdline values are truncated
990 *            to this length.
991 *
992 * Return: the size of the cmdline field copied. Note that the copy does
993 * not guarantee an ending NULL byte.
994 */
995int get_cmdline(struct task_struct *task, char *buffer, int buflen)
996{
997	int res = 0;
998	unsigned int len;
999	struct mm_struct *mm = get_task_mm(task);
1000	unsigned long arg_start, arg_end, env_start, env_end;
1001	if (!mm)
1002		goto out;
1003	if (!mm->arg_end)
1004		goto out_mm;	/* Shh! No looking before we're done */
1005
1006	spin_lock(&mm->arg_lock);
1007	arg_start = mm->arg_start;
1008	arg_end = mm->arg_end;
1009	env_start = mm->env_start;
1010	env_end = mm->env_end;
1011	spin_unlock(&mm->arg_lock);
1012
1013	len = arg_end - arg_start;
1014
1015	if (len > buflen)
1016		len = buflen;
1017
1018	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
1019
1020	/*
1021	 * If the nul at the end of args has been overwritten, then
1022	 * assume application is using setproctitle(3).
1023	 */
1024	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
1025		len = strnlen(buffer, res);
1026		if (len < res) {
1027			res = len;
1028		} else {
1029			len = env_end - env_start;
1030			if (len > buflen - res)
1031				len = buflen - res;
1032			res += access_process_vm(task, env_start,
1033						 buffer+res, len,
1034						 FOLL_FORCE);
1035			res = strnlen(buffer, res);
1036		}
1037	}
1038out_mm:
1039	mmput(mm);
1040out:
1041	return res;
1042}
1043
1044int __weak memcmp_pages(struct page *page1, struct page *page2)
1045{
1046	char *addr1, *addr2;
1047	int ret;
1048
1049	addr1 = kmap_atomic(page1);
1050	addr2 = kmap_atomic(page2);
1051	ret = memcmp(addr1, addr2, PAGE_SIZE);
1052	kunmap_atomic(addr2);
1053	kunmap_atomic(addr1);
1054	return ret;
1055}
1056
1057#ifdef CONFIG_PRINTK
1058/**
1059 * mem_dump_obj - Print available provenance information
1060 * @object: object for which to find provenance information.
1061 *
1062 * This function uses pr_cont(), so that the caller is expected to have
1063 * printed out whatever preamble is appropriate.  The provenance information
1064 * depends on the type of object and on how much debugging is enabled.
1065 * For example, for a slab-cache object, the slab name is printed, and,
1066 * if available, the return address and stack trace from the allocation
1067 * and last free path of that object.
1068 */
1069void mem_dump_obj(void *object)
1070{
1071	const char *type;
1072
1073	if (kmem_valid_obj(object)) {
1074		kmem_dump_obj(object);
1075		return;
1076	}
1077
1078	if (vmalloc_dump_obj(object))
1079		return;
1080
1081	if (is_vmalloc_addr(object))
1082		type = "vmalloc memory";
1083	else if (virt_addr_valid(object))
1084		type = "non-slab/vmalloc memory";
1085	else if (object == NULL)
1086		type = "NULL pointer";
1087	else if (object == ZERO_SIZE_PTR)
1088		type = "zero-size pointer";
1089	else
1090		type = "non-paged memory";
1091
1092	pr_cont(" %s\n", type);
1093}
1094EXPORT_SYMBOL_GPL(mem_dump_obj);
1095#endif
1096
1097/*
1098 * A driver might set a page logically offline -- PageOffline() -- and
1099 * turn the page inaccessible in the hypervisor; after that, access to page
1100 * content can be fatal.
1101 *
1102 * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1103 * pages after checking PageOffline(); however, these PFN walkers can race
1104 * with drivers that set PageOffline().
1105 *
1106 * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1107 * synchronize with such drivers, achieving that a page cannot be set
1108 * PageOffline() while frozen.
1109 *
1110 * page_offline_begin()/page_offline_end() is used by drivers that care about
1111 * such races when setting a page PageOffline().
1112 */
1113static DECLARE_RWSEM(page_offline_rwsem);
1114
1115void page_offline_freeze(void)
1116{
1117	down_read(&page_offline_rwsem);
1118}
1119
1120void page_offline_thaw(void)
1121{
1122	up_read(&page_offline_rwsem);
1123}
1124
1125void page_offline_begin(void)
1126{
1127	down_write(&page_offline_rwsem);
1128}
1129EXPORT_SYMBOL(page_offline_begin);
1130
1131void page_offline_end(void)
1132{
1133	up_write(&page_offline_rwsem);
1134}
1135EXPORT_SYMBOL(page_offline_end);
1136
1137#ifndef flush_dcache_folio
1138void flush_dcache_folio(struct folio *folio)
1139{
1140	long i, nr = folio_nr_pages(folio);
1141
1142	for (i = 0; i < nr; i++)
1143		flush_dcache_page(folio_page(folio, i));
1144}
1145EXPORT_SYMBOL(flush_dcache_folio);
1146#endif
1147