xref: /kernel/linux/linux-6.6/arch/sh/mm/mmap.c (revision 62306a36)
1/*
2 * arch/sh/mm/mmap.c
3 *
4 * Copyright (C) 2008 - 2009  Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License.  See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/io.h>
11#include <linux/mm.h>
12#include <linux/sched/mm.h>
13#include <linux/mman.h>
14#include <linux/module.h>
15#include <asm/page.h>
16#include <asm/processor.h>
17
18unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
19EXPORT_SYMBOL(shm_align_mask);
20
21#ifdef CONFIG_MMU
22static const pgprot_t protection_map[16] = {
23	[VM_NONE]					= PAGE_NONE,
24	[VM_READ]					= PAGE_READONLY,
25	[VM_WRITE]					= PAGE_COPY,
26	[VM_WRITE | VM_READ]				= PAGE_COPY,
27	[VM_EXEC]					= PAGE_EXECREAD,
28	[VM_EXEC | VM_READ]				= PAGE_EXECREAD,
29	[VM_EXEC | VM_WRITE]				= PAGE_COPY,
30	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY,
31	[VM_SHARED]					= PAGE_NONE,
32	[VM_SHARED | VM_READ]				= PAGE_READONLY,
33	[VM_SHARED | VM_WRITE]				= PAGE_WRITEONLY,
34	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
35	[VM_SHARED | VM_EXEC]				= PAGE_EXECREAD,
36	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_EXECREAD,
37	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_RWX,
38	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_RWX
39};
40DECLARE_VM_GET_PAGE_PROT
41
42/*
43 * To avoid cache aliases, we map the shared page with same color.
44 */
45static inline unsigned long COLOUR_ALIGN(unsigned long addr,
46					 unsigned long pgoff)
47{
48	unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
49	unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
50
51	return base + off;
52}
53
54unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
55	unsigned long len, unsigned long pgoff, unsigned long flags)
56{
57	struct mm_struct *mm = current->mm;
58	struct vm_area_struct *vma;
59	int do_colour_align;
60	struct vm_unmapped_area_info info;
61
62	if (flags & MAP_FIXED) {
63		/* We do not accept a shared mapping if it would violate
64		 * cache aliasing constraints.
65		 */
66		if ((flags & MAP_SHARED) &&
67		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
68			return -EINVAL;
69		return addr;
70	}
71
72	if (unlikely(len > TASK_SIZE))
73		return -ENOMEM;
74
75	do_colour_align = 0;
76	if (filp || (flags & MAP_SHARED))
77		do_colour_align = 1;
78
79	if (addr) {
80		if (do_colour_align)
81			addr = COLOUR_ALIGN(addr, pgoff);
82		else
83			addr = PAGE_ALIGN(addr);
84
85		vma = find_vma(mm, addr);
86		if (TASK_SIZE - len >= addr &&
87		    (!vma || addr + len <= vm_start_gap(vma)))
88			return addr;
89	}
90
91	info.flags = 0;
92	info.length = len;
93	info.low_limit = TASK_UNMAPPED_BASE;
94	info.high_limit = TASK_SIZE;
95	info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
96	info.align_offset = pgoff << PAGE_SHIFT;
97	return vm_unmapped_area(&info);
98}
99
100unsigned long
101arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
102			  const unsigned long len, const unsigned long pgoff,
103			  const unsigned long flags)
104{
105	struct vm_area_struct *vma;
106	struct mm_struct *mm = current->mm;
107	unsigned long addr = addr0;
108	int do_colour_align;
109	struct vm_unmapped_area_info info;
110
111	if (flags & MAP_FIXED) {
112		/* We do not accept a shared mapping if it would violate
113		 * cache aliasing constraints.
114		 */
115		if ((flags & MAP_SHARED) &&
116		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
117			return -EINVAL;
118		return addr;
119	}
120
121	if (unlikely(len > TASK_SIZE))
122		return -ENOMEM;
123
124	do_colour_align = 0;
125	if (filp || (flags & MAP_SHARED))
126		do_colour_align = 1;
127
128	/* requesting a specific address */
129	if (addr) {
130		if (do_colour_align)
131			addr = COLOUR_ALIGN(addr, pgoff);
132		else
133			addr = PAGE_ALIGN(addr);
134
135		vma = find_vma(mm, addr);
136		if (TASK_SIZE - len >= addr &&
137		    (!vma || addr + len <= vm_start_gap(vma)))
138			return addr;
139	}
140
141	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
142	info.length = len;
143	info.low_limit = PAGE_SIZE;
144	info.high_limit = mm->mmap_base;
145	info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
146	info.align_offset = pgoff << PAGE_SHIFT;
147	addr = vm_unmapped_area(&info);
148
149	/*
150	 * A failed mmap() very likely causes application failure,
151	 * so fall back to the bottom-up function here. This scenario
152	 * can happen with large stack limits and large mmap()
153	 * allocations.
154	 */
155	if (addr & ~PAGE_MASK) {
156		VM_BUG_ON(addr != -ENOMEM);
157		info.flags = 0;
158		info.low_limit = TASK_UNMAPPED_BASE;
159		info.high_limit = TASK_SIZE;
160		addr = vm_unmapped_area(&info);
161	}
162
163	return addr;
164}
165#endif /* CONFIG_MMU */
166
167/*
168 * You really shouldn't be using read() or write() on /dev/mem.  This
169 * might go away in the future.
170 */
171int valid_phys_addr_range(phys_addr_t addr, size_t count)
172{
173	if (addr < __MEMORY_START)
174		return 0;
175	if (addr + count > __pa(high_memory))
176		return 0;
177
178	return 1;
179}
180
181int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
182{
183	return 1;
184}
185