]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/arm/mm/mmap.c
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[thirdparty/linux.git] / arch / arm / mm / mmap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/arch/arm/mm/mmap.c
4 */
5 #include <linux/fs.h>
6 #include <linux/mm.h>
7 #include <linux/mman.h>
8 #include <linux/shm.h>
9 #include <linux/sched/signal.h>
10 #include <linux/sched/mm.h>
11 #include <linux/io.h>
12 #include <linux/personality.h>
13 #include <linux/random.h>
14 #include <asm/cachetype.h>
15
16 #define COLOUR_ALIGN(addr,pgoff) \
17 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
18 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
19
20 /* gap between mmap and stack */
21 #define MIN_GAP (128*1024*1024UL)
22 #define MAX_GAP ((TASK_SIZE)/6*5)
23
24 static int mmap_is_legacy(void)
25 {
26 if (current->personality & ADDR_COMPAT_LAYOUT)
27 return 1;
28
29 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
30 return 1;
31
32 return sysctl_legacy_va_layout;
33 }
34
35 static unsigned long mmap_base(unsigned long rnd)
36 {
37 unsigned long gap = rlimit(RLIMIT_STACK);
38
39 if (gap < MIN_GAP)
40 gap = MIN_GAP;
41 else if (gap > MAX_GAP)
42 gap = MAX_GAP;
43
44 return PAGE_ALIGN(TASK_SIZE - gap - rnd);
45 }
46
47 /*
48 * We need to ensure that shared mappings are correctly aligned to
49 * avoid aliasing issues with VIPT caches. We need to ensure that
50 * a specific page of an object is always mapped at a multiple of
51 * SHMLBA bytes.
52 *
53 * We unconditionally provide this function for all cases, however
54 * in the VIVT case, we optimise out the alignment rules.
55 */
56 unsigned long
57 arch_get_unmapped_area(struct file *filp, unsigned long addr,
58 unsigned long len, unsigned long pgoff, unsigned long flags)
59 {
60 struct mm_struct *mm = current->mm;
61 struct vm_area_struct *vma;
62 int do_align = 0;
63 int aliasing = cache_is_vipt_aliasing();
64 struct vm_unmapped_area_info info;
65
66 /*
67 * We only need to do colour alignment if either the I or D
68 * caches alias.
69 */
70 if (aliasing)
71 do_align = filp || (flags & MAP_SHARED);
72
73 /*
74 * We enforce the MAP_FIXED case.
75 */
76 if (flags & MAP_FIXED) {
77 if (aliasing && flags & MAP_SHARED &&
78 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
79 return -EINVAL;
80 return addr;
81 }
82
83 if (len > TASK_SIZE)
84 return -ENOMEM;
85
86 if (addr) {
87 if (do_align)
88 addr = COLOUR_ALIGN(addr, pgoff);
89 else
90 addr = PAGE_ALIGN(addr);
91
92 vma = find_vma(mm, addr);
93 if (TASK_SIZE - len >= addr &&
94 (!vma || addr + len <= vm_start_gap(vma)))
95 return addr;
96 }
97
98 info.flags = 0;
99 info.length = len;
100 info.low_limit = mm->mmap_base;
101 info.high_limit = TASK_SIZE;
102 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
103 info.align_offset = pgoff << PAGE_SHIFT;
104 return vm_unmapped_area(&info);
105 }
106
107 unsigned long
108 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
109 const unsigned long len, const unsigned long pgoff,
110 const unsigned long flags)
111 {
112 struct vm_area_struct *vma;
113 struct mm_struct *mm = current->mm;
114 unsigned long addr = addr0;
115 int do_align = 0;
116 int aliasing = cache_is_vipt_aliasing();
117 struct vm_unmapped_area_info info;
118
119 /*
120 * We only need to do colour alignment if either the I or D
121 * caches alias.
122 */
123 if (aliasing)
124 do_align = filp || (flags & MAP_SHARED);
125
126 /* requested length too big for entire address space */
127 if (len > TASK_SIZE)
128 return -ENOMEM;
129
130 if (flags & MAP_FIXED) {
131 if (aliasing && flags & MAP_SHARED &&
132 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
133 return -EINVAL;
134 return addr;
135 }
136
137 /* requesting a specific address */
138 if (addr) {
139 if (do_align)
140 addr = COLOUR_ALIGN(addr, pgoff);
141 else
142 addr = PAGE_ALIGN(addr);
143 vma = find_vma(mm, addr);
144 if (TASK_SIZE - len >= addr &&
145 (!vma || addr + len <= vm_start_gap(vma)))
146 return addr;
147 }
148
149 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
150 info.length = len;
151 info.low_limit = FIRST_USER_ADDRESS;
152 info.high_limit = mm->mmap_base;
153 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
154 info.align_offset = pgoff << PAGE_SHIFT;
155 addr = vm_unmapped_area(&info);
156
157 /*
158 * A failed mmap() very likely causes application failure,
159 * so fall back to the bottom-up function here. This scenario
160 * can happen with large stack limits and large mmap()
161 * allocations.
162 */
163 if (addr & ~PAGE_MASK) {
164 VM_BUG_ON(addr != -ENOMEM);
165 info.flags = 0;
166 info.low_limit = mm->mmap_base;
167 info.high_limit = TASK_SIZE;
168 addr = vm_unmapped_area(&info);
169 }
170
171 return addr;
172 }
173
174 unsigned long arch_mmap_rnd(void)
175 {
176 unsigned long rnd;
177
178 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
179
180 return rnd << PAGE_SHIFT;
181 }
182
183 void arch_pick_mmap_layout(struct mm_struct *mm)
184 {
185 unsigned long random_factor = 0UL;
186
187 if (current->flags & PF_RANDOMIZE)
188 random_factor = arch_mmap_rnd();
189
190 if (mmap_is_legacy()) {
191 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
192 mm->get_unmapped_area = arch_get_unmapped_area;
193 } else {
194 mm->mmap_base = mmap_base(random_factor);
195 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
196 }
197 }
198
199 /*
200 * You really shouldn't be using read() or write() on /dev/mem. This
201 * might go away in the future.
202 */
203 int valid_phys_addr_range(phys_addr_t addr, size_t size)
204 {
205 if (addr < PHYS_OFFSET)
206 return 0;
207 if (addr + size > __pa(high_memory - 1) + 1)
208 return 0;
209
210 return 1;
211 }
212
213 /*
214 * Do not allow /dev/mem mappings beyond the supported physical range.
215 */
216 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
217 {
218 return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
219 }
220
221 #ifdef CONFIG_STRICT_DEVMEM
222
223 #include <linux/ioport.h>
224
225 /*
226 * devmem_is_allowed() checks to see if /dev/mem access to a certain
227 * address is valid. The argument is a physical page number.
228 * We mimic x86 here by disallowing access to system RAM as well as
229 * device-exclusive MMIO regions. This effectively disable read()/write()
230 * on /dev/mem.
231 */
232 int devmem_is_allowed(unsigned long pfn)
233 {
234 if (iomem_is_exclusive(pfn << PAGE_SHIFT))
235 return 0;
236 if (!page_is_ram(pfn))
237 return 1;
238 return 0;
239 }
240
241 #endif