]> git.ipfire.org Git - thirdparty/gcc.git/blame - libgcc/generic-morestack.c
common.opt (fsplit-stack): New option.
[thirdparty/gcc.git] / libgcc / generic-morestack.c
CommitLineData
7458026b
ILT
1/* Library support for -fsplit-stack. */
2/* Copyright (C) 2009, 2010 Free Software Foundation, Inc.
3 Contributed by Ian Lance Taylor <iant@google.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17Under Section 7 of GPL version 3, you are granted additional
18permissions described in the GCC Runtime Library Exception, version
193.1, as published by the Free Software Foundation.
20
21You should have received a copy of the GNU General Public License and
22a copy of the GCC Runtime Library Exception along with this program;
23see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24<http://www.gnu.org/licenses/>. */
25
26#include "tconfig.h"
27#include "tsystem.h"
28#include "coretypes.h"
29#include "tm.h"
30
31/* If inhibit_libc is defined, we can not compile this file. The
32 effect is that people will not be able to use -fsplit-stack. That
33 is much better than failing the build particularly since people
34 will want to define inhibit_libc while building a compiler which
35 can build glibc. */
36
37#ifndef inhibit_libc
38
39#include <assert.h>
40#include <errno.h>
41#include <signal.h>
42#include <stdlib.h>
43#include <unistd.h>
44#include <sys/mman.h>
45#include <sys/uio.h>
46
47#include "generic-morestack.h"
48
49/* This file contains subroutines that are used by code compiled with
50 -fsplit-stack. */
51
52/* Declare functions to avoid warnings--there is no header file for
53 these internal functions. We give most of these functions the
54 flatten attribute in order to minimize their stack usage--here we
55 must minimize stack usage even at the cost of code size, and in
56 general inlining everything will do that. */
57
58extern void
59__generic_morestack_set_initial_sp (void *sp, size_t len)
60 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
61
62extern void *
63__generic_morestack (size_t *frame_size, void *old_stack, size_t param_size)
64 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
65
66extern void *
67__generic_releasestack (size_t *pavailable)
68 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
69
70extern void
71__morestack_block_signals (void)
72 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
73
74extern void
75__morestack_unblock_signals (void)
76 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
77
78extern size_t
79__generic_findstack (void *stack)
80 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
81
82extern void
83__morestack_load_mmap (void)
84 __attribute__ ((no_split_stack, visibility ("hidden")));
85
86extern void *
87__morestack_allocate_stack_space (size_t size)
88 __attribute__ ((visibility ("hidden")));
89
90/* This is a function which -fsplit-stack code can call to get a list
91 of the stacks. Since it is not called only by the compiler, it is
92 not hidden. */
93
94extern void *
95__splitstack_find (void *, void *, size_t *, void **, void **, void **)
96 __attribute__ ((visibility ("default")));
97
98/* When we allocate a stack segment we put this header at the
99 start. */
100
101struct stack_segment
102{
103 /* The previous stack segment--when a function running on this stack
104 segment returns, it will run on the previous one. */
105 struct stack_segment *prev;
106 /* The next stack segment, if it has been allocated--when a function
107 is running on this stack segment, the next one is not being
108 used. */
109 struct stack_segment *next;
110 /* The total size of this stack segment. */
111 size_t size;
112 /* The stack address when this stack was created. This is used when
113 popping the stack. */
114 void *old_stack;
115 /* A list of memory blocks allocated by dynamic stack
116 allocation. */
117 struct dynamic_allocation_blocks *dynamic_allocation;
118 /* A list of dynamic memory blocks no longer needed. */
119 struct dynamic_allocation_blocks *free_dynamic_allocation;
120 /* An extra pointer in case we need some more information some
121 day. */
122 void *extra;
123};
124
125/* This structure holds the (approximate) initial stack pointer and
126 size for the system supplied stack for a thread. This is set when
127 the thread is created. We also store a sigset_t here to hold the
128 signal mask while splitting the stack, since we don't want to store
129 that on the stack. */
130
131struct initial_sp
132{
133 /* The initial stack pointer. */
134 void *sp;
135 /* The stack length. */
136 size_t len;
137 /* A signal mask, put here so that the thread can use it without
138 needing stack space. */
139 sigset_t mask;
140 /* Some extra space for later extensibility. */
141 void *extra[5];
142};
143
144/* A list of memory blocks allocated by dynamic stack allocation.
145 This is used for code that calls alloca or uses variably sized
146 arrays. */
147
148struct dynamic_allocation_blocks
149{
150 /* The next block in the list. */
151 struct dynamic_allocation_blocks *next;
152 /* The size of the allocated memory. */
153 size_t size;
154 /* The allocated memory. */
155 void *block;
156};
157
158/* These thread local global variables must be shared by all split
159 stack code across shared library boundaries. Therefore, they have
160 default visibility. They have extensibility fields if needed for
161 new versions. If more radical changes are needed, new code can be
162 written using new variable names, while still using the existing
163 variables in a backward compatible manner. Symbol versioning is
164 also used, although, since these variables are only referenced by
165 code in this file and generic-morestack-thread.c, it is likely that
166 simply using new names will suffice. */
167
168/* The first stack segment allocated for this thread. */
169
170__thread struct stack_segment *__morestack_segments
171 __attribute__ ((visibility ("default")));
172
173/* The stack segment that we think we are currently using. This will
174 be correct in normal usage, but will be incorrect if an exception
175 unwinds into a different stack segment or if longjmp jumps to a
176 different stack segment. */
177
178__thread struct stack_segment *__morestack_current_segment
179 __attribute__ ((visibility ("default")));
180
181/* The initial stack pointer and size for this thread. */
182
183__thread struct initial_sp __morestack_initial_sp
184 __attribute__ ((visibility ("default")));
185
186/* A static signal mask, to avoid taking up stack space. */
187
188static sigset_t __morestack_fullmask;
189
190/* Convert an integer to a decimal string without using much stack
191 space. Return a pointer to the part of the buffer to use. We this
192 instead of sprintf because sprintf will require too much stack
193 space. */
194
195static char *
196print_int (int val, char *buf, int buflen, size_t *print_len)
197{
198 int is_negative;
199 int i;
200 unsigned int uval;
201
202 uval = (unsigned int) val;
203 if (val >= 0)
204 is_negative = 0;
205 else
206 {
207 is_negative = 1;
208 uval = - uval;
209 }
210
211 i = buflen;
212 do
213 {
214 --i;
215 buf[i] = '0' + (uval % 10);
216 uval /= 10;
217 }
218 while (uval != 0 && i > 0);
219
220 if (is_negative)
221 {
222 if (i > 0)
223 --i;
224 buf[i] = '-';
225 }
226
227 *print_len = buflen - i;
228 return buf + i;
229}
230
231/* Print the string MSG/LEN, the errno number ERR, and a newline on
232 stderr. Then crash. */
233
234void
235__morestack_fail (const char *, size_t, int) __attribute__ ((noreturn));
236
237void
238__morestack_fail (const char *msg, size_t len, int err)
239{
240 char buf[24];
241 static const char nl[] = "\n";
242 struct iovec iov[3];
243 union { char *p; const char *cp; } const_cast;
244
245 const_cast.cp = msg;
246 iov[0].iov_base = const_cast.p;
247 iov[0].iov_len = len;
248 /* We can't call strerror, because it may try to translate the error
249 message, and that would use too much stack space. */
250 iov[1].iov_base = print_int (err, buf, sizeof buf, &iov[1].iov_len);
251 const_cast.cp = &nl[0];
252 iov[2].iov_base = const_cast.p;
253 iov[2].iov_len = sizeof nl - 1;
254 /* FIXME: On systems without writev we need to issue three write
255 calls, or punt on printing errno. For now this is irrelevant
256 since stack splitting only works on GNU/Linux anyhow. */
257 writev (2, iov, 3);
258 abort ();
259}
260
261/* Allocate a new stack segment. FRAME_SIZE is the required frame
262 size. */
263
264static struct stack_segment *
265allocate_segment (size_t frame_size)
266{
267 static unsigned int static_pagesize;
268 static int use_guard_page;
269 unsigned int pagesize;
270 unsigned int overhead;
271 unsigned int allocate;
272 void *space;
273 struct stack_segment *pss;
274
275 pagesize = static_pagesize;
276 if (pagesize == 0)
277 {
278 unsigned int p;
279
280 pagesize = getpagesize ();
281
282#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
283 p = __sync_val_compare_and_swap (&static_pagesize, 0, pagesize);
284#else
285 /* Just hope this assignment is atomic. */
286 static_pagesize = pagesize;
287 p = 0;
288#endif
289
290 use_guard_page = getenv ("SPLIT_STACK_GUARD") != 0;
291
292 /* FIXME: I'm not sure this assert should be in the released
293 code. */
294 assert (p == 0 || p == pagesize);
295 }
296
297 overhead = sizeof (struct stack_segment);
298
299 allocate = pagesize;
300 if (allocate < MINSIGSTKSZ)
301 allocate = ((MINSIGSTKSZ + overhead + pagesize - 1)
302 & ~ (pagesize - 1));
303 if (allocate < frame_size)
304 allocate = ((frame_size + overhead + pagesize - 1)
305 & ~ (pagesize - 1));
306
307 if (use_guard_page)
308 allocate += pagesize;
309
310 /* FIXME: If this binary requires an executable stack, then we need
311 to set PROT_EXEC. Unfortunately figuring that out is complicated
312 and target dependent. We would need to use dl_iterate_phdr to
313 see if there is any object which does not have a PT_GNU_STACK
314 phdr, though only for architectures which use that mechanism. */
315 space = mmap (NULL, allocate, PROT_READ | PROT_WRITE,
316 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
317 if (space == MAP_FAILED)
318 {
319 static const char msg[] =
320 "unable to allocate additional stack space: errno ";
321 __morestack_fail (msg, sizeof msg - 1, errno);
322 }
323
324 if (use_guard_page)
325 {
326 void *guard;
327
328#ifdef STACK_GROWS_DOWNWARD
329 guard = space;
330 space = (char *) space + pagesize;
331#else
332 guard = space + allocate - pagesize;
333#endif
334
335 mprotect (guard, pagesize, PROT_NONE);
336 allocate -= pagesize;
337 }
338
339 pss = (struct stack_segment *) space;
340
341 pss->prev = __morestack_current_segment;
342 pss->next = NULL;
343 pss->size = allocate - overhead;
344 pss->dynamic_allocation = NULL;
345 pss->free_dynamic_allocation = NULL;
346 pss->extra = NULL;
347
348 if (__morestack_current_segment != NULL)
349 __morestack_current_segment->next = pss;
350 else
351 __morestack_segments = pss;
352
353 return pss;
354}
355
356/* Free a list of dynamic blocks. */
357
358static void
359free_dynamic_blocks (struct dynamic_allocation_blocks *p)
360{
361 while (p != NULL)
362 {
363 struct dynamic_allocation_blocks *next;
364
365 next = p->next;
366 free (p->block);
367 free (p);
368 p = next;
369 }
370}
371
372/* Merge two lists of dynamic blocks. */
373
374static struct dynamic_allocation_blocks *
375merge_dynamic_blocks (struct dynamic_allocation_blocks *a,
376 struct dynamic_allocation_blocks *b)
377{
378 struct dynamic_allocation_blocks **pp;
379
380 if (a == NULL)
381 return b;
382 if (b == NULL)
383 return a;
384 for (pp = &a->next; *pp != NULL; pp = &(*pp)->next)
385 ;
386 *pp = b;
387 return a;
388}
389
390/* Release stack segments. If FREE_DYNAMIC is non-zero, we also free
391 any dynamic blocks. Otherwise we return them. */
392
393struct dynamic_allocation_blocks *
394__morestack_release_segments (struct stack_segment **pp, int free_dynamic)
395{
396 struct dynamic_allocation_blocks *ret;
397 struct stack_segment *pss;
398
399 ret = NULL;
400 pss = *pp;
401 while (pss != NULL)
402 {
403 struct stack_segment *next;
404 unsigned int allocate;
405
406 next = pss->next;
407
408 if (pss->dynamic_allocation != NULL
409 || pss->free_dynamic_allocation != NULL)
410 {
411 if (free_dynamic)
412 {
413 free_dynamic_blocks (pss->dynamic_allocation);
414 free_dynamic_blocks (pss->free_dynamic_allocation);
415 }
416 else
417 {
418 ret = merge_dynamic_blocks (pss->dynamic_allocation, ret);
419 ret = merge_dynamic_blocks (pss->free_dynamic_allocation, ret);
420 }
421 }
422
423 allocate = pss->size + sizeof (struct stack_segment);
424 if (munmap (pss, allocate) < 0)
425 {
426 static const char msg[] = "munmap of stack space failed: errno ";
427 __morestack_fail (msg, sizeof msg - 1, errno);
428 }
429
430 pss = next;
431 }
432 *pp = NULL;
433
434 return ret;
435}
436
437/* This function is called by a processor specific function to set the
438 initial stack pointer for a thread. The operating system will
439 always create a stack for a thread. Here we record a stack pointer
440 near the base of that stack. The size argument lets the processor
441 specific code estimate how much stack space is available on this
442 initial stack. */
443
444void
445__generic_morestack_set_initial_sp (void *sp, size_t len)
446{
447 /* The stack pointer most likely starts on a page boundary. Adjust
448 to the nearest 512 byte boundary. It's not essential that we be
449 precise here; getting it wrong will just leave some stack space
450 unused. */
451#ifdef STACK_GROWS_DOWNWARD
452 sp = (void *) ((((__UINTPTR_TYPE__) sp + 511U) / 512U) * 512U);
453#else
454 sp = (void *) ((((__UINTPTR_TYPE__) sp - 511U) / 512U) * 512U);
455#endif
456
457 __morestack_initial_sp.sp = sp;
458 __morestack_initial_sp.len = len;
459 sigemptyset (&__morestack_initial_sp.mask);
460
461 sigfillset (&__morestack_fullmask);
462#ifdef __linux__
463 /* On Linux, the first two real time signals are used by the NPTL
464 threading library. By taking them out of the set of signals, we
465 avoiding copying the signal mask in pthread_sigmask. More
466 importantly, pthread_sigmask uses less stack space on x86_64. */
467 sigdelset (&__morestack_fullmask, __SIGRTMIN);
468 sigdelset (&__morestack_fullmask, __SIGRTMIN + 1);
469#endif
470}
471
472/* This function is called by a processor specific function which is
473 run in the prologue when more stack is needed. The processor
474 specific function handles the details of saving registers and
475 frobbing the actual stack pointer. This function is responsible
476 for allocating a new stack segment and for copying a parameter
477 block from the old stack to the new one. On function entry
478 *PFRAME_SIZE is the size of the required stack frame--the returned
479 stack must be at least this large. On function exit *PFRAME_SIZE
480 is the amount of space remaining on the allocated stack. OLD_STACK
481 points at the parameters the old stack (really the current one
482 while this function is running). OLD_STACK is saved so that it can
483 be returned by a later call to __generic_releasestack. PARAM_SIZE
484 is the size in bytes of parameters to copy to the new stack. This
485 function returns a pointer to the new stack segment, pointing to
486 the memory after the parameters have been copied. The returned
487 value minus the returned *PFRAME_SIZE (or plus if the stack grows
488 upward) is the first address on the stack which should not be used.
489
490 This function is running on the old stack and has only a limited
491 amount of stack space available. */
492
493void *
494__generic_morestack (size_t *pframe_size, void *old_stack, size_t param_size)
495{
496 size_t frame_size = *pframe_size;
497 struct stack_segment *current;
498 struct stack_segment **pp;
499 struct dynamic_allocation_blocks *dynamic;
500 char *from;
501 char *to;
502 void *ret;
503 size_t i;
504
505 current = __morestack_current_segment;
506
507 pp = current != NULL ? &current->next : &__morestack_segments;
508 if (*pp != NULL && (*pp)->size < frame_size)
509 dynamic = __morestack_release_segments (pp, 0);
510 else
511 dynamic = NULL;
512 current = *pp;
513
514 if (current == NULL)
515 current = allocate_segment (frame_size);
516
517 current->old_stack = old_stack;
518
519 __morestack_current_segment = current;
520
521 if (dynamic != NULL)
522 {
523 /* Move the free blocks onto our list. We don't want to call
524 free here, as we are short on stack space. */
525 current->free_dynamic_allocation =
526 merge_dynamic_blocks (dynamic, current->free_dynamic_allocation);
527 }
528
529 *pframe_size = current->size - param_size;
530
531#ifdef STACK_GROWS_DOWNWARD
532 {
533 char *bottom = (char *) (current + 1) + current->size;
534 to = bottom - param_size;
535 ret = bottom - param_size;
536 }
537#else
538 to = current + 1;
539 ret = (char *) (current + 1) + param_size;
540#endif
541
542 /* We don't call memcpy to avoid worrying about the dynamic linker
543 trying to resolve it. */
544 from = (char *) old_stack;
545 for (i = 0; i < param_size; i++)
546 *to++ = *from++;
547
548 return ret;
549}
550
551/* This function is called by a processor specific function when it is
552 ready to release a stack segment. We don't actually release the
553 stack segment, we just move back to the previous one. The current
554 stack segment will still be available if we need it in
555 __generic_morestack. This returns a pointer to the new stack
556 segment to use, which is the one saved by a previous call to
557 __generic_morestack. The processor specific function is then
558 responsible for actually updating the stack pointer. This sets
559 *PAVAILABLE to the amount of stack space now available. */
560
561void *
562__generic_releasestack (size_t *pavailable)
563{
564 struct stack_segment *current;
565 void *old_stack;
566
567 current = __morestack_current_segment;
568 old_stack = current->old_stack;
569 current = current->prev;
570 __morestack_current_segment = current;
571
572 if (current != NULL)
573 {
574#ifdef STACK_GROWS_DOWNWARD
575 *pavailable = (char *) old_stack - (char *) (current + 1);
576#else
577 *pavailable = (char *) (current + 1) + current->size - (char *) old_stack;
578#endif
579 }
580 else
581 {
582 size_t used;
583
584 /* We have popped back to the original stack. */
585#ifdef STACK_GROWS_DOWNWARD
586 if ((char *) old_stack >= (char *) __morestack_initial_sp.sp)
587 used = 0;
588 else
589 used = (char *) __morestack_initial_sp.sp - (char *) old_stack;
590#else
591 if ((char *) old_stack <= (char *) __morestack_initial_sp.sp)
592 used = 0;
593 else
594 used = (char *) old_stack - (char *) __morestack_initial_sp.sp;
595#endif
596
597 if (used > __morestack_initial_sp.len)
598 *pavailable = 0;
599 else
600 *pavailable = __morestack_initial_sp.len - used;
601 }
602
603 return old_stack;
604}
605
606/* Block signals while splitting the stack. This avoids trouble if we
607 try to invoke a signal handler which itself wants to split the
608 stack. */
609
610extern int pthread_sigmask (int, const sigset_t *, sigset_t *)
611 __attribute__ ((weak));
612
613void
614__morestack_block_signals (void)
615{
616 if (pthread_sigmask)
617 pthread_sigmask (SIG_BLOCK, &__morestack_fullmask,
618 &__morestack_initial_sp.mask);
619 else
620 sigprocmask (SIG_BLOCK, &__morestack_fullmask,
621 &__morestack_initial_sp.mask);
622}
623
624/* Unblock signals while splitting the stack. */
625
626void
627__morestack_unblock_signals (void)
628{
629 if (pthread_sigmask)
630 pthread_sigmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
631 else
632 sigprocmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
633}
634
635/* This function is called to allocate dynamic stack space, for alloca
636 or a variably sized array. This is a regular function with
637 sufficient stack space, so we just use malloc to allocate the
638 space. We attach the allocated blocks to the current stack
639 segment, so that they will eventually be reused or freed. */
640
641void *
642__morestack_allocate_stack_space (size_t size)
643{
644 struct stack_segment *seg, *current;
645 struct dynamic_allocation_blocks *p;
646
647 /* We have to block signals to avoid getting confused if we get
648 interrupted by a signal whose handler itself uses alloca or a
649 variably sized array. */
650 __morestack_block_signals ();
651
652 /* Since we don't want to call free while we are low on stack space,
653 we may have a list of already allocated blocks waiting to be
654 freed. Release them all, unless we find one that is large
655 enough. We don't look at every block to see if one is large
656 enough, just the first one, because we aren't trying to build a
657 memory allocator here, we're just trying to speed up common
658 cases. */
659
660 current = __morestack_current_segment;
661 p = NULL;
662 for (seg = __morestack_segments; seg != NULL; seg = seg->next)
663 {
664 p = seg->free_dynamic_allocation;
665 if (p != NULL)
666 {
667 if (p->size >= size)
668 {
669 seg->free_dynamic_allocation = p->next;
670 break;
671 }
672
673 free_dynamic_blocks (p);
674 seg->free_dynamic_allocation = NULL;
675 p = NULL;
676 }
677 }
678
679 if (p == NULL)
680 {
681 /* We need to allocate additional memory. */
682 p = malloc (sizeof (*p));
683 if (p == NULL)
684 abort ();
685 p->size = size;
686 p->block = malloc (size);
687 if (p->block == NULL)
688 abort ();
689 }
690
691 /* If we are still on the initial stack, then we have a space leak.
692 FIXME. */
693 if (current != NULL)
694 {
695 p->next = current->dynamic_allocation;
696 current->dynamic_allocation = p;
697 }
698
699 __morestack_unblock_signals ();
700
701 return p->block;
702}
703
704/* Find the stack segment for STACK and return the amount of space
705 available. This is used when unwinding the stack because of an
706 exception, in order to reset the stack guard correctly. */
707
708size_t
709__generic_findstack (void *stack)
710{
711 struct stack_segment *pss;
712 size_t used;
713
714 for (pss = __morestack_current_segment; pss != NULL; pss = pss->prev)
715 {
716 if ((char *) pss < (char *) stack
717 && (char *) pss + pss->size > (char *) stack)
718 {
719 __morestack_current_segment = pss;
720#ifdef STACK_GROWS_DOWNWARD
721 return (char *) stack - (char *) (pss + 1);
722#else
723 return (char *) (pss + 1) + pss->size - (char *) stack;
724#endif
725 }
726 }
727
728 /* We have popped back to the original stack. */
729#ifdef STACK_GROWS_DOWNWARD
730 if ((char *) stack >= (char *) __morestack_initial_sp.sp)
731 used = 0;
732 else
733 used = (char *) __morestack_initial_sp.sp - (char *) stack;
734#else
735 if ((char *) stack <= (char *) __morestack_initial_sp.sp)
736 used = 0;
737 else
738 used = (char *) stack - (char *) __morestack_initial_sp.sp;
739#endif
740
741 if (used > __morestack_initial_sp.len)
742 return 0;
743 else
744 return __morestack_initial_sp.len - used;
745}
746
747/* This function is called at program startup time to make sure that
748 mmap, munmap, and getpagesize are resolved if linking dynamically.
749 We want to resolve them while we have enough stack for them, rather
750 than calling into the dynamic linker while low on stack space. */
751
752void
753__morestack_load_mmap (void)
754{
755 /* Call with bogus values to run faster. We don't care if the call
756 fails. Pass __MORESTACK_CURRENT_SEGMENT to make sure that any
757 TLS accessor function is resolved. */
758 mmap (__morestack_current_segment, 0, PROT_READ, MAP_ANONYMOUS, -1, 0);
759 mprotect (NULL, 0, 0);
760 munmap (0, getpagesize ());
761}
762
763/* This function may be used to iterate over the stack segments.
764 This can be called like this.
765 void *next_segment = NULL;
766 void *next_sp = NULL;
767 void *initial_sp = NULL;
768 void *stack;
769 size_t stack_size;
770 while ((stack = __splitstack_find (next_segment, next_sp, &stack_size,
771 &next_segment, &next_sp,
772 &initial_sp)) != NULL)
773 {
774 // Stack segment starts at stack and is stack_size bytes long.
775 }
776
777 There is no way to iterate over the stack segments of a different
778 thread. However, what is permitted is for one thread to call this
779 with the first two values NULL, to pass next_segment, next_sp, and
780 initial_sp to a different thread, and then to suspend one way or
781 another. A different thread may run the subsequent
782 __morestack_find iterations. Of course, this will only work if the
783 first thread is suspended during the __morestack_find iterations.
784 If not, the second thread will be looking at the stack while it is
785 changing, and anything could happen.
786
787 FIXME: This should be declared in some header file, but where? */
788
789void *
790__splitstack_find (void *segment_arg, void *sp, size_t *len,
791 void **next_segment, void **next_sp,
792 void **initial_sp)
793{
794 struct stack_segment *segment;
795 void *ret;
796 char *nsp;
797
798 if (segment_arg == (void *) 1)
799 {
800 char *isp = (char *) *initial_sp;
801
802 *next_segment = (void *) 2;
803 *next_sp = NULL;
804#ifdef STACK_GROWS_DOWNWARD
805 if ((char *) sp >= isp)
806 return NULL;
807 *len = (char *) isp - (char *) sp;
808 return sp;
809#else
810 if ((char *) sp <= (char *) isp)
811 return NULL;
812 *len = (char *) sp - (char *) isp;
813 return (void *) isp;
814#endif
815 }
816 else if (segment_arg == (void *) 2)
817 return NULL;
818 else if (segment_arg != NULL)
819 segment = (struct stack_segment *) segment_arg;
820 else
821 {
822 *initial_sp = __morestack_initial_sp.sp;
823 segment = __morestack_current_segment;
824 sp = (void *) &segment;
825 while (1)
826 {
827 if (segment == NULL)
828 return __splitstack_find ((void *) 1, sp, len, next_segment,
829 next_sp, initial_sp);
830 if ((char *) sp >= (char *) (segment + 1)
831 && (char *) sp <= (char *) (segment + 1) + segment->size)
832 break;
833 segment = segment->prev;
834 }
835 }
836
837 if (segment->prev == NULL)
838 *next_segment = (void *) 1;
839 else
840 *next_segment = segment->prev;
841
842 /* The old_stack value is the address of the function parameters of
843 the function which called __morestack. So if f1 called f2 which
844 called __morestack, the stack looks like this:
845
846 parameters <- old_stack
847 return in f1
848 return in f2
849 data pushed by __morestack
850
851 On x86, the data pushed by __morestack includes the saved value
852 of the ebp/rbp register. We want our caller to be able to see
853 that value, which can not be found on any other stack. So we
854 adjust accordingly. This may need to be tweaked for other
855 targets. */
856
857 nsp = (char *) segment->old_stack;
858#ifdef STACK_GROWS_DOWNWARD
859 nsp -= 3 * sizeof (void *);
860#else
861 nsp += 3 * sizeof (void *);
862#endif
863 *next_sp = (void *) nsp;
864
865#ifdef STACK_GROWS_DOWNWARD
866 *len = (char *) (segment + 1) + segment->size - (char *) sp;
867 ret = (void *) sp;
868#else
869 *len = (char *) sp - (char *) (segment + 1);
870 ret = (void *) (segment + 1);
871#endif
872
873 return ret;
874}
875
876#endif /* !defined (inhibit_libc) */