]> git.ipfire.org Git - ipfire-2.x.git/blame - src/patches/suse-2.6.27.31/patches.suse/silent-stack-overflow-2.patch
Add a patch to fix Intel E100 wake-on-lan problems.
[ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.suse / silent-stack-overflow-2.patch
CommitLineData
2cb7cef9
BS
1From: Nick Piggin <npiggin@suse.de>
2Subject: avoid silent stack overflow over the heap
3Patch-mainline: no
4References: bnc#44807 bnc#211997
5
6This is a rewrite of Andrea Arcangeli's patch, which implements a stack
7guard feature. That is, it prevents the stack from growing right next
8to another vma, and prevents other vmas being allocated right next to the
9stack. This will cause a segfault rather than the stack silently overwriting
10other memory areas (eg. the heap) in the case that the app has a stack
11overflow.
12
13I have rewritten it so as not to require changes to expand_stack prototype,
14support for growsup stacks, and support for powerpc and ia64.
15
16
17Signed-off-by: Nick Piggin <npiggin@suse.de>
18---
19 arch/ia64/kernel/sys_ia64.c | 11 +++++
20 arch/powerpc/mm/slice.c | 82 +++++++++++++++++++++++++-----------------
21 arch/x86/kernel/sys_x86_64.c | 52 ++++++++++++++++++++------
22 include/linux/mm.h | 1
23 kernel/sysctl.c | 8 ++++
24 mm/mmap.c | 83 ++++++++++++++++++++++++++++++++++++-------
25 6 files changed, 178 insertions(+), 59 deletions(-)
26
27--- a/arch/ia64/kernel/sys_ia64.c
28+++ b/arch/ia64/kernel/sys_ia64.c
29@@ -59,6 +59,8 @@ arch_get_unmapped_area (struct file *fil
30 start_addr = addr = (addr + align_mask) & ~align_mask;
31
32 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
33+ unsigned long guard;
34+
35 /* At this point: (!vma || addr < vma->vm_end). */
36 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
37 if (start_addr != TASK_UNMAPPED_BASE) {
38@@ -68,7 +70,14 @@ arch_get_unmapped_area (struct file *fil
39 }
40 return -ENOMEM;
41 }
42- if (!vma || addr + len <= vma->vm_start) {
43+ if (!vma)
44+ goto got_it;
45+ guard = 0;
46+ if (vma->vm_flags & VM_GROWSDOWN)
47+ guard = min(TASK_SIZE - (addr + len),
48+ (unsigned long)guard << PAGE_SHIFT);
49+ if (addr + len + guard <= vma->vm_start) {
50+got_it:
51 /* Remember the address where we stopped this search: */
52 mm->free_area_cache = addr + len;
53 return addr;
54--- a/arch/powerpc/mm/slice.c
55+++ b/arch/powerpc/mm/slice.c
56@@ -94,11 +94,21 @@ static int slice_area_is_free(struct mm_
57 unsigned long len)
58 {
59 struct vm_area_struct *vma;
60+ unsigned long guard;
61
62 if ((mm->task_size - len) < addr)
63 return 0;
64 vma = find_vma(mm, addr);
65- return (!vma || (addr + len) <= vma->vm_start);
66+ if (!vma)
67+ return 1;
68+
69+ guard = 0;
70+ if (vma->vm_flags & VM_GROWSDOWN)
71+ guard = min(mm->task_size - (addr + len),
72+ (unsigned long)heap_stack_gap << PAGE_SHIFT);
73+ if (addr + len + guard <= vma->vm_start)
74+ return 1;
75+ return 0;
76 }
77
78 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
79@@ -242,8 +252,10 @@ static unsigned long slice_find_area_bot
80
81 full_search:
82 for (;;) {
83+ unsigned long guard;
84+
85 addr = _ALIGN_UP(addr, 1ul << pshift);
86- if ((TASK_SIZE - len) < addr)
87+ if ((mm->task_size - len) < addr)
88 break;
89 vma = find_vma(mm, addr);
90 BUG_ON(vma && (addr >= vma->vm_end));
91@@ -256,7 +268,14 @@ full_search:
92 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
93 continue;
94 }
95- if (!vma || addr + len <= vma->vm_start) {
96+ if (!vma)
97+ goto got_it;
98+ guard = 0;
99+ if (vma->vm_flags & VM_GROWSDOWN)
100+ guard = min(mm->task_size - (addr + len),
101+ (unsigned long)heap_stack_gap << PAGE_SHIFT);
102+ if (addr + len + guard <= vma->vm_start) {
103+got_it:
104 /*
105 * Remember the place where we stopped the search:
106 */
107@@ -264,8 +283,8 @@ full_search:
108 mm->free_area_cache = addr + len;
109 return addr;
110 }
111- if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
112- mm->cached_hole_size = vma->vm_start - addr;
113+ if (use_cache && (addr + guard + mm->cached_hole_size) < vma->vm_start)
114+ mm->cached_hole_size = vma->vm_start - (addr + guard);
115 addr = vma->vm_end;
116 }
117
118@@ -284,37 +303,23 @@ static unsigned long slice_find_area_top
119 int psize, int use_cache)
120 {
121 struct vm_area_struct *vma;
122- unsigned long addr;
123+ unsigned long start_addr, addr;
124 struct slice_mask mask;
125 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
126
127- /* check if free_area_cache is useful for us */
128 if (use_cache) {
129 if (len <= mm->cached_hole_size) {
130+ start_addr = addr = mm->mmap_base;
131 mm->cached_hole_size = 0;
132- mm->free_area_cache = mm->mmap_base;
133- }
134-
135- /* either no address requested or can't fit in requested
136- * address hole
137- */
138- addr = mm->free_area_cache;
139-
140- /* make sure it can fit in the remaining address space */
141- if (addr > len) {
142- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
143- mask = slice_range_to_mask(addr, len);
144- if (slice_check_fit(mask, available) &&
145- slice_area_is_free(mm, addr, len))
146- /* remember the address as a hint for
147- * next time
148- */
149- return (mm->free_area_cache = addr);
150- }
151- }
152+ } else
153+ start_addr = addr = mm->free_area_cache;
154+ } else
155+ start_addr = addr = mm->mmap_base;
156
157- addr = mm->mmap_base;
158+full_search:
159 while (addr > len) {
160+ unsigned long guard;
161+
162 /* Go down by chunk size */
163 addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
164
165@@ -336,7 +341,15 @@ static unsigned long slice_find_area_top
166 * return with success:
167 */
168 vma = find_vma(mm, addr);
169- if (!vma || (addr + len) <= vma->vm_start) {
170+
171+ if (!vma)
172+ goto got_it;
173+ guard = 0;
174+ if (vma->vm_flags & VM_GROWSDOWN)
175+ guard = min(mm->task_size - (addr + len),
176+ (unsigned long)heap_stack_gap << PAGE_SHIFT);
177+ if (addr + len + guard <= vma->vm_start) {
178+got_it:
179 /* remember the address as a hint for next time */
180 if (use_cache)
181 mm->free_area_cache = addr;
182@@ -344,11 +357,16 @@ static unsigned long slice_find_area_top
183 }
184
185 /* remember the largest hole we saw so far */
186- if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
187- mm->cached_hole_size = vma->vm_start - addr;
188+ if (use_cache && (addr + guard + mm->cached_hole_size) < vma->vm_start)
189+ mm->cached_hole_size = vma->vm_start - (addr + guard);
190
191 /* try just below the current vma->vm_start */
192- addr = vma->vm_start;
193+ addr = vma->vm_start - guard;
194+ }
195+ if (start_addr != mm->mmap_base) {
196+ start_addr = addr = mm->mmap_base;
197+ mm->cached_hole_size = 0;
198+ goto full_search;
199 }
200
201 /*
202--- a/arch/x86/kernel/sys_x86_64.c
203+++ b/arch/x86/kernel/sys_x86_64.c
204@@ -106,6 +106,8 @@ arch_get_unmapped_area(struct file *filp
205
206 full_search:
207 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
208+ unsigned long guard;
209+
210 /* At this point: (!vma || addr < vma->vm_end). */
211 if (end - len < addr) {
212 /*
213@@ -119,15 +121,22 @@ full_search:
214 }
215 return -ENOMEM;
216 }
217- if (!vma || addr + len <= vma->vm_start) {
218+ if (!vma)
219+ goto got_it;
220+ guard = 0;
221+ if (vma->vm_flags & VM_GROWSDOWN)
222+ guard = min(end - (addr + len),
223+ (unsigned long)heap_stack_gap << PAGE_SHIFT);
224+ if (addr + len + guard <= vma->vm_start) {
225+got_it:
226 /*
227 * Remember the place where we stopped the search:
228 */
229 mm->free_area_cache = addr + len;
230 return addr;
231 }
232- if (addr + mm->cached_hole_size < vma->vm_start)
233- mm->cached_hole_size = vma->vm_start - addr;
234+ if (addr + guard + mm->cached_hole_size < vma->vm_start)
235+ mm->cached_hole_size = vma->vm_start - (addr + guard);
236
237 addr = vma->vm_end;
238 }
239@@ -174,34 +183,51 @@ arch_get_unmapped_area_topdown(struct fi
240
241 /* make sure it can fit in the remaining address space */
242 if (addr > len) {
243- vma = find_vma(mm, addr-len);
244- if (!vma || addr <= vma->vm_start)
245- /* remember the address as a hint for next time */
246- return (mm->free_area_cache = addr-len);
247+ unsigned long guard;
248+
249+ addr -= len;
250+ vma = find_vma(mm, addr);
251+ if (!vma)
252+ goto got_it;
253+ guard = 0;
254+ if (vma->vm_flags & VM_GROWSDOWN)
255+ guard = min(TASK_SIZE - (addr + len),
256+ (unsigned long)heap_stack_gap << PAGE_SHIFT);
257+ if (addr + len + guard <= vma->vm_start)
258+ goto got_it;
259 }
260
261 if (mm->mmap_base < len)
262 goto bottomup;
263
264 addr = mm->mmap_base-len;
265-
266 do {
267+ unsigned long guard;
268 /*
269 * Lookup failure means no vma is above this address,
270 * else if new region fits below vma->vm_start,
271 * return with success:
272 */
273 vma = find_vma(mm, addr);
274- if (!vma || addr+len <= vma->vm_start)
275+ if (!vma)
276+ goto got_it;
277+ guard = 0;
278+ if (vma->vm_flags & VM_GROWSDOWN)
279+ guard = min(TASK_SIZE - (addr + len),
280+ (unsigned long)heap_stack_gap << PAGE_SHIFT);
281+ if (addr + len + guard <= vma->vm_start) {
282+got_it:
283 /* remember the address as a hint for next time */
284- return (mm->free_area_cache = addr);
285+ mm->free_area_cache = addr;
286+ return addr;
287+ }
288
289 /* remember the largest hole we saw so far */
290- if (addr + mm->cached_hole_size < vma->vm_start)
291- mm->cached_hole_size = vma->vm_start - addr;
292+ if (addr + guard + mm->cached_hole_size < vma->vm_start)
293+ mm->cached_hole_size = vma->vm_start - (addr + guard);
294
295 /* try just below the current vma->vm_start */
296- addr = vma->vm_start-len;
297+ addr = vma->vm_start - (len + guard);
298 } while (len < vma->vm_start);
299
300 bottomup:
301--- a/include/linux/mm.h
302+++ b/include/linux/mm.h
303@@ -1194,6 +1194,7 @@ void page_cache_async_readahead(struct a
304 unsigned long max_sane_readahead(unsigned long nr);
305
306 /* Do stack extension */
307+extern int heap_stack_gap;
308 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
309 #ifdef CONFIG_IA64
310 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
311--- a/kernel/sysctl.c
312+++ b/kernel/sysctl.c
313@@ -1207,6 +1207,14 @@ static struct ctl_table vm_table[] = {
314 .extra2 = &one,
315 },
316 #endif
317+ {
318+ .ctl_name = CTL_UNNUMBERED,
319+ .procname = "heap-stack-gap",
320+ .data = &heap_stack_gap,
321+ .maxlen = sizeof(int),
322+ .mode = 0644,
323+ .proc_handler = &proc_dointvec,
324+ },
325 /*
326 * NOTE: do not add new entries to this table unless you have read
327 * Documentation/sysctl/ctl_unnumbered.txt
328--- a/mm/mmap.c
329+++ b/mm/mmap.c
330@@ -85,6 +85,7 @@ int sysctl_overcommit_memory = OVERCOMMI
331 int sysctl_overcommit_ratio = 50; /* default is 50% */
332 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
333 atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
334+int heap_stack_gap __read_mostly = 1;
335
336 /* amount of vm to protect from userspace access */
337 unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
338@@ -1293,6 +1294,8 @@ arch_get_unmapped_area(struct file *filp
339
340 full_search:
341 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
342+ unsigned long guard;
343+
344 /* At this point: (!vma || addr < vma->vm_end). */
345 if (TASK_SIZE - len < addr) {
346 /*
347@@ -1307,15 +1310,23 @@ full_search:
348 }
349 return -ENOMEM;
350 }
351- if (!vma || addr + len <= vma->vm_start) {
352+ if (!vma)
353+ goto got_it;
354+ guard = 0;
355+ if (vma->vm_flags & VM_GROWSDOWN)
356+ guard = min(TASK_SIZE - (addr + len),
357+ (unsigned long)heap_stack_gap << PAGE_SHIFT);
358+ if (addr + len + guard <= vma->vm_start) {
359+got_it:
360 /*
361 * Remember the place where we stopped the search:
362 */
363 mm->free_area_cache = addr + len;
364 return addr;
365 }
366- if (addr + mm->cached_hole_size < vma->vm_start)
367- mm->cached_hole_size = vma->vm_start - addr;
368+ if (addr + guard + mm->cached_hole_size < vma->vm_start)
369+ mm->cached_hole_size = vma->vm_start - (addr + guard);
370+
371 addr = vma->vm_end;
372 }
373 }
374@@ -1373,34 +1384,51 @@ arch_get_unmapped_area_topdown(struct fi
375
376 /* make sure it can fit in the remaining address space */
377 if (addr > len) {
378- vma = find_vma(mm, addr-len);
379- if (!vma || addr <= vma->vm_start)
380- /* remember the address as a hint for next time */
381- return (mm->free_area_cache = addr-len);
382+ unsigned long guard;
383+
384+ addr -= len;
385+ vma = find_vma(mm, addr);
386+ if (!vma)
387+ goto got_it;
388+ guard = 0;
389+ if (vma->vm_flags & VM_GROWSDOWN)
390+ guard = min(TASK_SIZE - (addr + len),
391+ (unsigned long)heap_stack_gap << PAGE_SHIFT);
392+ if (addr + len + guard <= vma->vm_start)
393+ goto got_it;
394 }
395
396 if (mm->mmap_base < len)
397 goto bottomup;
398
399 addr = mm->mmap_base-len;
400-
401 do {
402+ unsigned long guard;
403 /*
404 * Lookup failure means no vma is above this address,
405 * else if new region fits below vma->vm_start,
406 * return with success:
407 */
408 vma = find_vma(mm, addr);
409- if (!vma || addr+len <= vma->vm_start)
410+ if (!vma)
411+ goto got_it;
412+ guard = 0;
413+ if (vma->vm_flags & VM_GROWSDOWN)
414+ guard = min(TASK_SIZE - (addr + len),
415+ (unsigned long)heap_stack_gap << PAGE_SHIFT);
416+ if (addr + len + guard <= vma->vm_start) {
417+got_it:
418 /* remember the address as a hint for next time */
419- return (mm->free_area_cache = addr);
420+ mm->free_area_cache = addr;
421+ return addr;
422+ }
423
424 /* remember the largest hole we saw so far */
425- if (addr + mm->cached_hole_size < vma->vm_start)
426- mm->cached_hole_size = vma->vm_start - addr;
427+ if (addr + guard + mm->cached_hole_size < vma->vm_start)
428+ mm->cached_hole_size = vma->vm_start - (addr + guard);
429
430 /* try just below the current vma->vm_start */
431- addr = vma->vm_start-len;
432+ addr = vma->vm_start - (len + guard);
433 } while (len < vma->vm_start);
434
435 bottomup:
436@@ -1623,6 +1651,19 @@ int expand_upwards(struct vm_area_struct
437 /* Somebody else might have raced and expanded it already */
438 if (address > vma->vm_end) {
439 unsigned long size, grow;
440+#ifdef CONFIG_STACK_GROWSUP
441+ unsigned long guard;
442+ struct vm_area_struct *vm_next;
443+
444+ error = -ENOMEM;
445+ guard = min(TASK_SIZE - address,
446+ (unsigned long)heap_stack_gap << PAGE_SHIFT);
447+ vm_next = find_vma(vma->vm_mm, address + guard);
448+ if (unlikely(vm_next && vm_next != vma)) {
449+ /* stack collision with another vma */
450+ goto out_unlock;
451+ }
452+#endif
453
454 size = address - vma->vm_start;
455 grow = (address - vma->vm_end) >> PAGE_SHIFT;
456@@ -1631,6 +1672,7 @@ int expand_upwards(struct vm_area_struct
457 if (!error)
458 vma->vm_end = address;
459 }
460+out_unlock: __maybe_unused
461 anon_vma_unlock(vma);
462 return error;
463 }
464@@ -1667,7 +1709,21 @@ static inline int expand_downwards(struc
465 /* Somebody else might have raced and expanded it already */
466 if (address < vma->vm_start) {
467 unsigned long size, grow;
468+ struct vm_area_struct *prev_vma;
469+
470+ find_vma_prev(vma->vm_mm, address, &prev_vma);
471
472+ error = -ENOMEM;
473+ if (prev_vma) {
474+ unsigned long guard;
475+
476+ guard = min(TASK_SIZE - prev_vma->vm_end,
477+ (unsigned long)heap_stack_gap << PAGE_SHIFT);
478+ if (unlikely(prev_vma->vm_end + guard > address)) {
479+ /* stack collision with another vma */
480+ goto out_unlock;
481+ }
482+ }
483 size = vma->vm_end - address;
484 grow = (vma->vm_start - address) >> PAGE_SHIFT;
485
486@@ -1677,6 +1733,7 @@ static inline int expand_downwards(struc
487 vma->vm_pgoff -= grow;
488 }
489 }
490+ out_unlock:
491 anon_vma_unlock(vma);
492 return error;
493 }