]> git.ipfire.org Git - thirdparty/gcc.git/blob - libgcc/generic-morestack.c
Move libgcc_tm_file to toplevel libgcc
[thirdparty/gcc.git] / libgcc / generic-morestack.c
1 /* Library support for -fsplit-stack. */
2 /* Copyright (C) 2009, 2010, 2011 Free Software Foundation, Inc.
3 Contributed by Ian Lance Taylor <iant@google.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
25
26 #include "tconfig.h"
27 #include "tsystem.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "libgcc_tm.h"
31
32 /* If inhibit_libc is defined, we can not compile this file. The
33 effect is that people will not be able to use -fsplit-stack. That
34 is much better than failing the build particularly since people
35 will want to define inhibit_libc while building a compiler which
36 can build glibc. */
37
38 #ifndef inhibit_libc
39
40 #include <assert.h>
41 #include <errno.h>
42 #include <signal.h>
43 #include <stdlib.h>
44 #include <unistd.h>
45 #include <sys/mman.h>
46 #include <sys/uio.h>
47
48 #include "generic-morestack.h"
49
50 /* This file contains subroutines that are used by code compiled with
51 -fsplit-stack. */
52
53 /* Declare functions to avoid warnings--there is no header file for
54 these internal functions. We give most of these functions the
55 flatten attribute in order to minimize their stack usage--here we
56 must minimize stack usage even at the cost of code size, and in
57 general inlining everything will do that. */
58
59 extern void
60 __generic_morestack_set_initial_sp (void *sp, size_t len)
61 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
62
63 extern void *
64 __generic_morestack (size_t *frame_size, void *old_stack, size_t param_size)
65 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
66
67 extern void *
68 __generic_releasestack (size_t *pavailable)
69 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
70
71 extern void
72 __morestack_block_signals (void)
73 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
74
75 extern void
76 __morestack_unblock_signals (void)
77 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
78
79 extern size_t
80 __generic_findstack (void *stack)
81 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
82
83 extern void
84 __morestack_load_mmap (void)
85 __attribute__ ((no_split_stack, visibility ("hidden")));
86
87 extern void *
88 __morestack_allocate_stack_space (size_t size)
89 __attribute__ ((visibility ("hidden")));
90
91 /* This is a function which -fsplit-stack code can call to get a list
92 of the stacks. Since it is not called only by the compiler, it is
93 not hidden. */
94
95 extern void *
96 __splitstack_find (void *, void *, size_t *, void **, void **, void **)
97 __attribute__ ((visibility ("default")));
98
99 /* When we allocate a stack segment we put this header at the
100 start. */
101
102 struct stack_segment
103 {
104 /* The previous stack segment--when a function running on this stack
105 segment returns, it will run on the previous one. */
106 struct stack_segment *prev;
107 /* The next stack segment, if it has been allocated--when a function
108 is running on this stack segment, the next one is not being
109 used. */
110 struct stack_segment *next;
111 /* The total size of this stack segment. */
112 size_t size;
113 /* The stack address when this stack was created. This is used when
114 popping the stack. */
115 void *old_stack;
116 /* A list of memory blocks allocated by dynamic stack
117 allocation. */
118 struct dynamic_allocation_blocks *dynamic_allocation;
119 /* A list of dynamic memory blocks no longer needed. */
120 struct dynamic_allocation_blocks *free_dynamic_allocation;
121 /* An extra pointer in case we need some more information some
122 day. */
123 void *extra;
124 };
125
126 /* This structure holds the (approximate) initial stack pointer and
127 size for the system supplied stack for a thread. This is set when
128 the thread is created. We also store a sigset_t here to hold the
129 signal mask while splitting the stack, since we don't want to store
130 that on the stack. */
131
132 struct initial_sp
133 {
134 /* The initial stack pointer. */
135 void *sp;
136 /* The stack length. */
137 size_t len;
138 /* A signal mask, put here so that the thread can use it without
139 needing stack space. */
140 sigset_t mask;
141 /* Some extra space for later extensibility. */
142 void *extra[5];
143 };
144
145 /* A list of memory blocks allocated by dynamic stack allocation.
146 This is used for code that calls alloca or uses variably sized
147 arrays. */
148
149 struct dynamic_allocation_blocks
150 {
151 /* The next block in the list. */
152 struct dynamic_allocation_blocks *next;
153 /* The size of the allocated memory. */
154 size_t size;
155 /* The allocated memory. */
156 void *block;
157 };
158
159 /* These thread local global variables must be shared by all split
160 stack code across shared library boundaries. Therefore, they have
161 default visibility. They have extensibility fields if needed for
162 new versions. If more radical changes are needed, new code can be
163 written using new variable names, while still using the existing
164 variables in a backward compatible manner. Symbol versioning is
165 also used, although, since these variables are only referenced by
166 code in this file and generic-morestack-thread.c, it is likely that
167 simply using new names will suffice. */
168
169 /* The first stack segment allocated for this thread. */
170
171 __thread struct stack_segment *__morestack_segments
172 __attribute__ ((visibility ("default")));
173
174 /* The stack segment that we think we are currently using. This will
175 be correct in normal usage, but will be incorrect if an exception
176 unwinds into a different stack segment or if longjmp jumps to a
177 different stack segment. */
178
179 __thread struct stack_segment *__morestack_current_segment
180 __attribute__ ((visibility ("default")));
181
182 /* The initial stack pointer and size for this thread. */
183
184 __thread struct initial_sp __morestack_initial_sp
185 __attribute__ ((visibility ("default")));
186
187 /* A static signal mask, to avoid taking up stack space. */
188
189 static sigset_t __morestack_fullmask;
190
191 /* Convert an integer to a decimal string without using much stack
192 space. Return a pointer to the part of the buffer to use. We this
193 instead of sprintf because sprintf will require too much stack
194 space. */
195
196 static char *
197 print_int (int val, char *buf, int buflen, size_t *print_len)
198 {
199 int is_negative;
200 int i;
201 unsigned int uval;
202
203 uval = (unsigned int) val;
204 if (val >= 0)
205 is_negative = 0;
206 else
207 {
208 is_negative = 1;
209 uval = - uval;
210 }
211
212 i = buflen;
213 do
214 {
215 --i;
216 buf[i] = '0' + (uval % 10);
217 uval /= 10;
218 }
219 while (uval != 0 && i > 0);
220
221 if (is_negative)
222 {
223 if (i > 0)
224 --i;
225 buf[i] = '-';
226 }
227
228 *print_len = buflen - i;
229 return buf + i;
230 }
231
232 /* Print the string MSG/LEN, the errno number ERR, and a newline on
233 stderr. Then crash. */
234
235 void
236 __morestack_fail (const char *, size_t, int) __attribute__ ((noreturn));
237
238 void
239 __morestack_fail (const char *msg, size_t len, int err)
240 {
241 char buf[24];
242 static const char nl[] = "\n";
243 struct iovec iov[3];
244 union { char *p; const char *cp; } const_cast;
245
246 const_cast.cp = msg;
247 iov[0].iov_base = const_cast.p;
248 iov[0].iov_len = len;
249 /* We can't call strerror, because it may try to translate the error
250 message, and that would use too much stack space. */
251 iov[1].iov_base = print_int (err, buf, sizeof buf, &iov[1].iov_len);
252 const_cast.cp = &nl[0];
253 iov[2].iov_base = const_cast.p;
254 iov[2].iov_len = sizeof nl - 1;
255 /* FIXME: On systems without writev we need to issue three write
256 calls, or punt on printing errno. For now this is irrelevant
257 since stack splitting only works on GNU/Linux anyhow. */
258 writev (2, iov, 3);
259 abort ();
260 }
261
262 /* Allocate a new stack segment. FRAME_SIZE is the required frame
263 size. */
264
265 static struct stack_segment *
266 allocate_segment (size_t frame_size)
267 {
268 static unsigned int static_pagesize;
269 static int use_guard_page;
270 unsigned int pagesize;
271 unsigned int overhead;
272 unsigned int allocate;
273 void *space;
274 struct stack_segment *pss;
275
276 pagesize = static_pagesize;
277 if (pagesize == 0)
278 {
279 unsigned int p;
280
281 pagesize = getpagesize ();
282
283 #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
284 p = __sync_val_compare_and_swap (&static_pagesize, 0, pagesize);
285 #else
286 /* Just hope this assignment is atomic. */
287 static_pagesize = pagesize;
288 p = 0;
289 #endif
290
291 use_guard_page = getenv ("SPLIT_STACK_GUARD") != 0;
292
293 /* FIXME: I'm not sure this assert should be in the released
294 code. */
295 assert (p == 0 || p == pagesize);
296 }
297
298 overhead = sizeof (struct stack_segment);
299
300 allocate = pagesize;
301 if (allocate < MINSIGSTKSZ)
302 allocate = ((MINSIGSTKSZ + overhead + pagesize - 1)
303 & ~ (pagesize - 1));
304 if (allocate < frame_size)
305 allocate = ((frame_size + overhead + pagesize - 1)
306 & ~ (pagesize - 1));
307
308 if (use_guard_page)
309 allocate += pagesize;
310
311 /* FIXME: If this binary requires an executable stack, then we need
312 to set PROT_EXEC. Unfortunately figuring that out is complicated
313 and target dependent. We would need to use dl_iterate_phdr to
314 see if there is any object which does not have a PT_GNU_STACK
315 phdr, though only for architectures which use that mechanism. */
316 space = mmap (NULL, allocate, PROT_READ | PROT_WRITE,
317 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
318 if (space == MAP_FAILED)
319 {
320 static const char msg[] =
321 "unable to allocate additional stack space: errno ";
322 __morestack_fail (msg, sizeof msg - 1, errno);
323 }
324
325 if (use_guard_page)
326 {
327 void *guard;
328
329 #ifdef STACK_GROWS_DOWNWARD
330 guard = space;
331 space = (char *) space + pagesize;
332 #else
333 guard = space + allocate - pagesize;
334 #endif
335
336 mprotect (guard, pagesize, PROT_NONE);
337 allocate -= pagesize;
338 }
339
340 pss = (struct stack_segment *) space;
341
342 pss->prev = __morestack_current_segment;
343 pss->next = NULL;
344 pss->size = allocate - overhead;
345 pss->dynamic_allocation = NULL;
346 pss->free_dynamic_allocation = NULL;
347 pss->extra = NULL;
348
349 if (__morestack_current_segment != NULL)
350 __morestack_current_segment->next = pss;
351 else
352 __morestack_segments = pss;
353
354 return pss;
355 }
356
357 /* Free a list of dynamic blocks. */
358
359 static void
360 free_dynamic_blocks (struct dynamic_allocation_blocks *p)
361 {
362 while (p != NULL)
363 {
364 struct dynamic_allocation_blocks *next;
365
366 next = p->next;
367 free (p->block);
368 free (p);
369 p = next;
370 }
371 }
372
373 /* Merge two lists of dynamic blocks. */
374
375 static struct dynamic_allocation_blocks *
376 merge_dynamic_blocks (struct dynamic_allocation_blocks *a,
377 struct dynamic_allocation_blocks *b)
378 {
379 struct dynamic_allocation_blocks **pp;
380
381 if (a == NULL)
382 return b;
383 if (b == NULL)
384 return a;
385 for (pp = &a->next; *pp != NULL; pp = &(*pp)->next)
386 ;
387 *pp = b;
388 return a;
389 }
390
391 /* Release stack segments. If FREE_DYNAMIC is non-zero, we also free
392 any dynamic blocks. Otherwise we return them. */
393
394 struct dynamic_allocation_blocks *
395 __morestack_release_segments (struct stack_segment **pp, int free_dynamic)
396 {
397 struct dynamic_allocation_blocks *ret;
398 struct stack_segment *pss;
399
400 ret = NULL;
401 pss = *pp;
402 while (pss != NULL)
403 {
404 struct stack_segment *next;
405 unsigned int allocate;
406
407 next = pss->next;
408
409 if (pss->dynamic_allocation != NULL
410 || pss->free_dynamic_allocation != NULL)
411 {
412 if (free_dynamic)
413 {
414 free_dynamic_blocks (pss->dynamic_allocation);
415 free_dynamic_blocks (pss->free_dynamic_allocation);
416 }
417 else
418 {
419 ret = merge_dynamic_blocks (pss->dynamic_allocation, ret);
420 ret = merge_dynamic_blocks (pss->free_dynamic_allocation, ret);
421 }
422 }
423
424 allocate = pss->size + sizeof (struct stack_segment);
425 if (munmap (pss, allocate) < 0)
426 {
427 static const char msg[] = "munmap of stack space failed: errno ";
428 __morestack_fail (msg, sizeof msg - 1, errno);
429 }
430
431 pss = next;
432 }
433 *pp = NULL;
434
435 return ret;
436 }
437
438 /* This function is called by a processor specific function to set the
439 initial stack pointer for a thread. The operating system will
440 always create a stack for a thread. Here we record a stack pointer
441 near the base of that stack. The size argument lets the processor
442 specific code estimate how much stack space is available on this
443 initial stack. */
444
445 void
446 __generic_morestack_set_initial_sp (void *sp, size_t len)
447 {
448 /* The stack pointer most likely starts on a page boundary. Adjust
449 to the nearest 512 byte boundary. It's not essential that we be
450 precise here; getting it wrong will just leave some stack space
451 unused. */
452 #ifdef STACK_GROWS_DOWNWARD
453 sp = (void *) ((((__UINTPTR_TYPE__) sp + 511U) / 512U) * 512U);
454 #else
455 sp = (void *) ((((__UINTPTR_TYPE__) sp - 511U) / 512U) * 512U);
456 #endif
457
458 __morestack_initial_sp.sp = sp;
459 __morestack_initial_sp.len = len;
460 sigemptyset (&__morestack_initial_sp.mask);
461
462 sigfillset (&__morestack_fullmask);
463 #ifdef __linux__
464 /* On Linux, the first two real time signals are used by the NPTL
465 threading library. By taking them out of the set of signals, we
466 avoiding copying the signal mask in pthread_sigmask. More
467 importantly, pthread_sigmask uses less stack space on x86_64. */
468 sigdelset (&__morestack_fullmask, __SIGRTMIN);
469 sigdelset (&__morestack_fullmask, __SIGRTMIN + 1);
470 #endif
471 }
472
473 /* This function is called by a processor specific function which is
474 run in the prologue when more stack is needed. The processor
475 specific function handles the details of saving registers and
476 frobbing the actual stack pointer. This function is responsible
477 for allocating a new stack segment and for copying a parameter
478 block from the old stack to the new one. On function entry
479 *PFRAME_SIZE is the size of the required stack frame--the returned
480 stack must be at least this large. On function exit *PFRAME_SIZE
481 is the amount of space remaining on the allocated stack. OLD_STACK
482 points at the parameters the old stack (really the current one
483 while this function is running). OLD_STACK is saved so that it can
484 be returned by a later call to __generic_releasestack. PARAM_SIZE
485 is the size in bytes of parameters to copy to the new stack. This
486 function returns a pointer to the new stack segment, pointing to
487 the memory after the parameters have been copied. The returned
488 value minus the returned *PFRAME_SIZE (or plus if the stack grows
489 upward) is the first address on the stack which should not be used.
490
491 This function is running on the old stack and has only a limited
492 amount of stack space available. */
493
494 void *
495 __generic_morestack (size_t *pframe_size, void *old_stack, size_t param_size)
496 {
497 size_t frame_size = *pframe_size;
498 struct stack_segment *current;
499 struct stack_segment **pp;
500 struct dynamic_allocation_blocks *dynamic;
501 char *from;
502 char *to;
503 void *ret;
504 size_t i;
505
506 current = __morestack_current_segment;
507
508 pp = current != NULL ? &current->next : &__morestack_segments;
509 if (*pp != NULL && (*pp)->size < frame_size)
510 dynamic = __morestack_release_segments (pp, 0);
511 else
512 dynamic = NULL;
513 current = *pp;
514
515 if (current == NULL)
516 current = allocate_segment (frame_size + param_size);
517
518 current->old_stack = old_stack;
519
520 __morestack_current_segment = current;
521
522 if (dynamic != NULL)
523 {
524 /* Move the free blocks onto our list. We don't want to call
525 free here, as we are short on stack space. */
526 current->free_dynamic_allocation =
527 merge_dynamic_blocks (dynamic, current->free_dynamic_allocation);
528 }
529
530 *pframe_size = current->size - param_size;
531
532 #ifdef STACK_GROWS_DOWNWARD
533 {
534 char *bottom = (char *) (current + 1) + current->size;
535 to = bottom - param_size;
536 ret = bottom - param_size;
537 }
538 #else
539 to = current + 1;
540 ret = (char *) (current + 1) + param_size;
541 #endif
542
543 /* We don't call memcpy to avoid worrying about the dynamic linker
544 trying to resolve it. */
545 from = (char *) old_stack;
546 for (i = 0; i < param_size; i++)
547 *to++ = *from++;
548
549 return ret;
550 }
551
552 /* This function is called by a processor specific function when it is
553 ready to release a stack segment. We don't actually release the
554 stack segment, we just move back to the previous one. The current
555 stack segment will still be available if we need it in
556 __generic_morestack. This returns a pointer to the new stack
557 segment to use, which is the one saved by a previous call to
558 __generic_morestack. The processor specific function is then
559 responsible for actually updating the stack pointer. This sets
560 *PAVAILABLE to the amount of stack space now available. */
561
562 void *
563 __generic_releasestack (size_t *pavailable)
564 {
565 struct stack_segment *current;
566 void *old_stack;
567
568 current = __morestack_current_segment;
569 old_stack = current->old_stack;
570 current = current->prev;
571 __morestack_current_segment = current;
572
573 if (current != NULL)
574 {
575 #ifdef STACK_GROWS_DOWNWARD
576 *pavailable = (char *) old_stack - (char *) (current + 1);
577 #else
578 *pavailable = (char *) (current + 1) + current->size - (char *) old_stack;
579 #endif
580 }
581 else
582 {
583 size_t used;
584
585 /* We have popped back to the original stack. */
586 #ifdef STACK_GROWS_DOWNWARD
587 if ((char *) old_stack >= (char *) __morestack_initial_sp.sp)
588 used = 0;
589 else
590 used = (char *) __morestack_initial_sp.sp - (char *) old_stack;
591 #else
592 if ((char *) old_stack <= (char *) __morestack_initial_sp.sp)
593 used = 0;
594 else
595 used = (char *) old_stack - (char *) __morestack_initial_sp.sp;
596 #endif
597
598 if (used > __morestack_initial_sp.len)
599 *pavailable = 0;
600 else
601 *pavailable = __morestack_initial_sp.len - used;
602 }
603
604 return old_stack;
605 }
606
607 /* Block signals while splitting the stack. This avoids trouble if we
608 try to invoke a signal handler which itself wants to split the
609 stack. */
610
611 extern int pthread_sigmask (int, const sigset_t *, sigset_t *)
612 __attribute__ ((weak));
613
614 void
615 __morestack_block_signals (void)
616 {
617 if (pthread_sigmask)
618 pthread_sigmask (SIG_BLOCK, &__morestack_fullmask,
619 &__morestack_initial_sp.mask);
620 else
621 sigprocmask (SIG_BLOCK, &__morestack_fullmask,
622 &__morestack_initial_sp.mask);
623 }
624
625 /* Unblock signals while splitting the stack. */
626
627 void
628 __morestack_unblock_signals (void)
629 {
630 if (pthread_sigmask)
631 pthread_sigmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
632 else
633 sigprocmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
634 }
635
636 /* This function is called to allocate dynamic stack space, for alloca
637 or a variably sized array. This is a regular function with
638 sufficient stack space, so we just use malloc to allocate the
639 space. We attach the allocated blocks to the current stack
640 segment, so that they will eventually be reused or freed. */
641
642 void *
643 __morestack_allocate_stack_space (size_t size)
644 {
645 struct stack_segment *seg, *current;
646 struct dynamic_allocation_blocks *p;
647
648 /* We have to block signals to avoid getting confused if we get
649 interrupted by a signal whose handler itself uses alloca or a
650 variably sized array. */
651 __morestack_block_signals ();
652
653 /* Since we don't want to call free while we are low on stack space,
654 we may have a list of already allocated blocks waiting to be
655 freed. Release them all, unless we find one that is large
656 enough. We don't look at every block to see if one is large
657 enough, just the first one, because we aren't trying to build a
658 memory allocator here, we're just trying to speed up common
659 cases. */
660
661 current = __morestack_current_segment;
662 p = NULL;
663 for (seg = __morestack_segments; seg != NULL; seg = seg->next)
664 {
665 p = seg->free_dynamic_allocation;
666 if (p != NULL)
667 {
668 if (p->size >= size)
669 {
670 seg->free_dynamic_allocation = p->next;
671 break;
672 }
673
674 free_dynamic_blocks (p);
675 seg->free_dynamic_allocation = NULL;
676 p = NULL;
677 }
678 }
679
680 if (p == NULL)
681 {
682 /* We need to allocate additional memory. */
683 p = malloc (sizeof (*p));
684 if (p == NULL)
685 abort ();
686 p->size = size;
687 p->block = malloc (size);
688 if (p->block == NULL)
689 abort ();
690 }
691
692 /* If we are still on the initial stack, then we have a space leak.
693 FIXME. */
694 if (current != NULL)
695 {
696 p->next = current->dynamic_allocation;
697 current->dynamic_allocation = p;
698 }
699
700 __morestack_unblock_signals ();
701
702 return p->block;
703 }
704
705 /* Find the stack segment for STACK and return the amount of space
706 available. This is used when unwinding the stack because of an
707 exception, in order to reset the stack guard correctly. */
708
709 size_t
710 __generic_findstack (void *stack)
711 {
712 struct stack_segment *pss;
713 size_t used;
714
715 for (pss = __morestack_current_segment; pss != NULL; pss = pss->prev)
716 {
717 if ((char *) pss < (char *) stack
718 && (char *) pss + pss->size > (char *) stack)
719 {
720 __morestack_current_segment = pss;
721 #ifdef STACK_GROWS_DOWNWARD
722 return (char *) stack - (char *) (pss + 1);
723 #else
724 return (char *) (pss + 1) + pss->size - (char *) stack;
725 #endif
726 }
727 }
728
729 /* We have popped back to the original stack. */
730 #ifdef STACK_GROWS_DOWNWARD
731 if ((char *) stack >= (char *) __morestack_initial_sp.sp)
732 used = 0;
733 else
734 used = (char *) __morestack_initial_sp.sp - (char *) stack;
735 #else
736 if ((char *) stack <= (char *) __morestack_initial_sp.sp)
737 used = 0;
738 else
739 used = (char *) stack - (char *) __morestack_initial_sp.sp;
740 #endif
741
742 if (used > __morestack_initial_sp.len)
743 return 0;
744 else
745 return __morestack_initial_sp.len - used;
746 }
747
748 /* This function is called at program startup time to make sure that
749 mmap, munmap, and getpagesize are resolved if linking dynamically.
750 We want to resolve them while we have enough stack for them, rather
751 than calling into the dynamic linker while low on stack space. */
752
753 void
754 __morestack_load_mmap (void)
755 {
756 /* Call with bogus values to run faster. We don't care if the call
757 fails. Pass __MORESTACK_CURRENT_SEGMENT to make sure that any
758 TLS accessor function is resolved. */
759 mmap (__morestack_current_segment, 0, PROT_READ, MAP_ANONYMOUS, -1, 0);
760 mprotect (NULL, 0, 0);
761 munmap (0, getpagesize ());
762 }
763
764 /* This function may be used to iterate over the stack segments.
765 This can be called like this.
766 void *next_segment = NULL;
767 void *next_sp = NULL;
768 void *initial_sp = NULL;
769 void *stack;
770 size_t stack_size;
771 while ((stack = __splitstack_find (next_segment, next_sp, &stack_size,
772 &next_segment, &next_sp,
773 &initial_sp)) != NULL)
774 {
775 // Stack segment starts at stack and is stack_size bytes long.
776 }
777
778 There is no way to iterate over the stack segments of a different
779 thread. However, what is permitted is for one thread to call this
780 with the first two values NULL, to pass next_segment, next_sp, and
781 initial_sp to a different thread, and then to suspend one way or
782 another. A different thread may run the subsequent
783 __morestack_find iterations. Of course, this will only work if the
784 first thread is suspended during the __morestack_find iterations.
785 If not, the second thread will be looking at the stack while it is
786 changing, and anything could happen.
787
788 FIXME: This should be declared in some header file, but where? */
789
790 void *
791 __splitstack_find (void *segment_arg, void *sp, size_t *len,
792 void **next_segment, void **next_sp,
793 void **initial_sp)
794 {
795 struct stack_segment *segment;
796 void *ret;
797 char *nsp;
798
799 if (segment_arg == (void *) 1)
800 {
801 char *isp = (char *) *initial_sp;
802
803 *next_segment = (void *) 2;
804 *next_sp = NULL;
805 #ifdef STACK_GROWS_DOWNWARD
806 if ((char *) sp >= isp)
807 return NULL;
808 *len = (char *) isp - (char *) sp;
809 return sp;
810 #else
811 if ((char *) sp <= (char *) isp)
812 return NULL;
813 *len = (char *) sp - (char *) isp;
814 return (void *) isp;
815 #endif
816 }
817 else if (segment_arg == (void *) 2)
818 return NULL;
819 else if (segment_arg != NULL)
820 segment = (struct stack_segment *) segment_arg;
821 else
822 {
823 *initial_sp = __morestack_initial_sp.sp;
824 segment = __morestack_current_segment;
825 sp = (void *) &segment;
826 while (1)
827 {
828 if (segment == NULL)
829 return __splitstack_find ((void *) 1, sp, len, next_segment,
830 next_sp, initial_sp);
831 if ((char *) sp >= (char *) (segment + 1)
832 && (char *) sp <= (char *) (segment + 1) + segment->size)
833 break;
834 segment = segment->prev;
835 }
836 }
837
838 if (segment->prev == NULL)
839 *next_segment = (void *) 1;
840 else
841 *next_segment = segment->prev;
842
843 /* The old_stack value is the address of the function parameters of
844 the function which called __morestack. So if f1 called f2 which
845 called __morestack, the stack looks like this:
846
847 parameters <- old_stack
848 return in f1
849 return in f2
850 registers pushed by __morestack
851
852 The registers pushed by __morestack may not be visible on any
853 other stack, if we are being called by a signal handler
854 immediately after the call to __morestack_unblock_signals. We
855 want to adjust our return value to include those registers. This
856 is target dependent. */
857
858 nsp = (char *) segment->old_stack;
859
860 #if defined (__x86_64__)
861 nsp -= 12 * sizeof (void *);
862 #elif defined (__i386__)
863 nsp -= 6 * sizeof (void *);
864 #else
865 #error "unrecognized target"
866 #endif
867
868 *next_sp = (void *) nsp;
869
870 #ifdef STACK_GROWS_DOWNWARD
871 *len = (char *) (segment + 1) + segment->size - (char *) sp;
872 ret = (void *) sp;
873 #else
874 *len = (char *) sp - (char *) (segment + 1);
875 ret = (void *) (segment + 1);
876 #endif
877
878 return ret;
879 }
880
881 #endif /* !defined (inhibit_libc) */