]> git.ipfire.org Git - thirdparty/gcc.git/blame - libgcc/generic-morestack.c
generic-morestack.c: Include <string.h>.
[thirdparty/gcc.git] / libgcc / generic-morestack.c
CommitLineData
7458026b 1/* Library support for -fsplit-stack. */
457186f6 2/* Copyright (C) 2009, 2010, 2011 Free Software Foundation, Inc.
7458026b
ILT
3 Contributed by Ian Lance Taylor <iant@google.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17Under Section 7 of GPL version 3, you are granted additional
18permissions described in the GCC Runtime Library Exception, version
193.1, as published by the Free Software Foundation.
20
21You should have received a copy of the GNU General Public License and
22a copy of the GCC Runtime Library Exception along with this program;
23see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24<http://www.gnu.org/licenses/>. */
25
26#include "tconfig.h"
27#include "tsystem.h"
28#include "coretypes.h"
29#include "tm.h"
852b75ed 30#include "libgcc_tm.h"
7458026b
ILT
31
32/* If inhibit_libc is defined, we can not compile this file. The
33 effect is that people will not be able to use -fsplit-stack. That
34 is much better than failing the build particularly since people
35 will want to define inhibit_libc while building a compiler which
36 can build glibc. */
37
38#ifndef inhibit_libc
39
40#include <assert.h>
41#include <errno.h>
42#include <signal.h>
43#include <stdlib.h>
e14304ef 44#include <string.h>
7458026b
ILT
45#include <unistd.h>
46#include <sys/mman.h>
47#include <sys/uio.h>
48
49#include "generic-morestack.h"
50
e14304ef
ILT
51typedef unsigned uintptr_type __attribute__ ((mode (pointer)));
52
7458026b
ILT
53/* This file contains subroutines that are used by code compiled with
54 -fsplit-stack. */
55
56/* Declare functions to avoid warnings--there is no header file for
57 these internal functions. We give most of these functions the
58 flatten attribute in order to minimize their stack usage--here we
59 must minimize stack usage even at the cost of code size, and in
60 general inlining everything will do that. */
61
62extern void
63__generic_morestack_set_initial_sp (void *sp, size_t len)
64 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
65
66extern void *
67__generic_morestack (size_t *frame_size, void *old_stack, size_t param_size)
68 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
69
70extern void *
71__generic_releasestack (size_t *pavailable)
72 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
73
74extern void
75__morestack_block_signals (void)
76 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
77
78extern void
79__morestack_unblock_signals (void)
80 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
81
82extern size_t
83__generic_findstack (void *stack)
84 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
85
86extern void
87__morestack_load_mmap (void)
88 __attribute__ ((no_split_stack, visibility ("hidden")));
89
90extern void *
91__morestack_allocate_stack_space (size_t size)
92 __attribute__ ((visibility ("hidden")));
93
e14304ef
ILT
94/* These are functions which -fsplit-stack code can call. These are
95 not called by the compiler, and are not hidden. FIXME: These
96 should be in some header file somewhere, somehow. */
7458026b
ILT
97
98extern void *
99__splitstack_find (void *, void *, size_t *, void **, void **, void **)
100 __attribute__ ((visibility ("default")));
101
e14304ef
ILT
102extern void
103__splitstack_block_signals (int *, int *)
104 __attribute__ ((visibility ("default")));
105
106extern void
107__splitstack_getcontext (void *context[10])
108 __attribute__ ((no_split_stack, visibility ("default")));
109
110extern void
111__splitstack_setcontext (void *context[10])
112 __attribute__ ((no_split_stack, visibility ("default")));
113
114extern void *
115__splitstack_makecontext (size_t, void *context[10], size_t *)
116 __attribute__ ((visibility ("default")));
117
118extern void
119__splitstack_block_signals_context (void *context[10], int *, int *)
120 __attribute__ ((visibility ("default")));
121
122extern void *
123__splitstack_find_context (void *context[10], size_t *, void **, void **,
124 void **)
125 __attribute__ ((visibility ("default")));
126
127/* These functions must be defined by the processor specific code. */
128
129extern void *__morestack_get_guard (void)
130 __attribute__ ((no_split_stack, visibility ("hidden")));
131
132extern void __morestack_set_guard (void *)
133 __attribute__ ((no_split_stack, visibility ("hidden")));
134
135extern void *__morestack_make_guard (void *, size_t)
136 __attribute__ ((no_split_stack, visibility ("hidden")));
137
7458026b
ILT
138/* When we allocate a stack segment we put this header at the
139 start. */
140
141struct stack_segment
142{
143 /* The previous stack segment--when a function running on this stack
144 segment returns, it will run on the previous one. */
145 struct stack_segment *prev;
146 /* The next stack segment, if it has been allocated--when a function
147 is running on this stack segment, the next one is not being
148 used. */
149 struct stack_segment *next;
150 /* The total size of this stack segment. */
151 size_t size;
152 /* The stack address when this stack was created. This is used when
153 popping the stack. */
154 void *old_stack;
155 /* A list of memory blocks allocated by dynamic stack
156 allocation. */
157 struct dynamic_allocation_blocks *dynamic_allocation;
158 /* A list of dynamic memory blocks no longer needed. */
159 struct dynamic_allocation_blocks *free_dynamic_allocation;
160 /* An extra pointer in case we need some more information some
161 day. */
162 void *extra;
163};
164
165/* This structure holds the (approximate) initial stack pointer and
166 size for the system supplied stack for a thread. This is set when
167 the thread is created. We also store a sigset_t here to hold the
168 signal mask while splitting the stack, since we don't want to store
169 that on the stack. */
170
171struct initial_sp
172{
173 /* The initial stack pointer. */
174 void *sp;
175 /* The stack length. */
176 size_t len;
177 /* A signal mask, put here so that the thread can use it without
178 needing stack space. */
179 sigset_t mask;
e14304ef
ILT
180 /* Non-zero if we should not block signals. This is a reversed flag
181 so that the default zero value is the safe value. The type is
182 uintptr_type because it replaced one of the void * pointers in
183 extra. */
184 uintptr_type dont_block_signals;
7458026b 185 /* Some extra space for later extensibility. */
e14304ef 186 void *extra[4];
7458026b
ILT
187};
188
189/* A list of memory blocks allocated by dynamic stack allocation.
190 This is used for code that calls alloca or uses variably sized
191 arrays. */
192
193struct dynamic_allocation_blocks
194{
195 /* The next block in the list. */
196 struct dynamic_allocation_blocks *next;
197 /* The size of the allocated memory. */
198 size_t size;
199 /* The allocated memory. */
200 void *block;
201};
202
203/* These thread local global variables must be shared by all split
204 stack code across shared library boundaries. Therefore, they have
205 default visibility. They have extensibility fields if needed for
206 new versions. If more radical changes are needed, new code can be
207 written using new variable names, while still using the existing
208 variables in a backward compatible manner. Symbol versioning is
209 also used, although, since these variables are only referenced by
210 code in this file and generic-morestack-thread.c, it is likely that
211 simply using new names will suffice. */
212
213/* The first stack segment allocated for this thread. */
214
215__thread struct stack_segment *__morestack_segments
216 __attribute__ ((visibility ("default")));
217
218/* The stack segment that we think we are currently using. This will
219 be correct in normal usage, but will be incorrect if an exception
220 unwinds into a different stack segment or if longjmp jumps to a
221 different stack segment. */
222
223__thread struct stack_segment *__morestack_current_segment
224 __attribute__ ((visibility ("default")));
225
226/* The initial stack pointer and size for this thread. */
227
228__thread struct initial_sp __morestack_initial_sp
229 __attribute__ ((visibility ("default")));
230
231/* A static signal mask, to avoid taking up stack space. */
232
233static sigset_t __morestack_fullmask;
234
235/* Convert an integer to a decimal string without using much stack
236 space. Return a pointer to the part of the buffer to use. We this
237 instead of sprintf because sprintf will require too much stack
238 space. */
239
240static char *
241print_int (int val, char *buf, int buflen, size_t *print_len)
242{
243 int is_negative;
244 int i;
245 unsigned int uval;
246
247 uval = (unsigned int) val;
248 if (val >= 0)
249 is_negative = 0;
250 else
251 {
252 is_negative = 1;
253 uval = - uval;
254 }
255
256 i = buflen;
257 do
258 {
259 --i;
260 buf[i] = '0' + (uval % 10);
261 uval /= 10;
262 }
263 while (uval != 0 && i > 0);
264
265 if (is_negative)
266 {
267 if (i > 0)
268 --i;
269 buf[i] = '-';
270 }
271
272 *print_len = buflen - i;
273 return buf + i;
274}
275
276/* Print the string MSG/LEN, the errno number ERR, and a newline on
277 stderr. Then crash. */
278
279void
280__morestack_fail (const char *, size_t, int) __attribute__ ((noreturn));
281
282void
283__morestack_fail (const char *msg, size_t len, int err)
284{
285 char buf[24];
286 static const char nl[] = "\n";
287 struct iovec iov[3];
288 union { char *p; const char *cp; } const_cast;
289
290 const_cast.cp = msg;
291 iov[0].iov_base = const_cast.p;
292 iov[0].iov_len = len;
293 /* We can't call strerror, because it may try to translate the error
294 message, and that would use too much stack space. */
295 iov[1].iov_base = print_int (err, buf, sizeof buf, &iov[1].iov_len);
296 const_cast.cp = &nl[0];
297 iov[2].iov_base = const_cast.p;
298 iov[2].iov_len = sizeof nl - 1;
299 /* FIXME: On systems without writev we need to issue three write
300 calls, or punt on printing errno. For now this is irrelevant
301 since stack splitting only works on GNU/Linux anyhow. */
302 writev (2, iov, 3);
303 abort ();
304}
305
306/* Allocate a new stack segment. FRAME_SIZE is the required frame
307 size. */
308
309static struct stack_segment *
310allocate_segment (size_t frame_size)
311{
312 static unsigned int static_pagesize;
313 static int use_guard_page;
314 unsigned int pagesize;
315 unsigned int overhead;
316 unsigned int allocate;
317 void *space;
318 struct stack_segment *pss;
319
320 pagesize = static_pagesize;
321 if (pagesize == 0)
322 {
323 unsigned int p;
324
325 pagesize = getpagesize ();
326
327#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
328 p = __sync_val_compare_and_swap (&static_pagesize, 0, pagesize);
329#else
330 /* Just hope this assignment is atomic. */
331 static_pagesize = pagesize;
332 p = 0;
333#endif
334
335 use_guard_page = getenv ("SPLIT_STACK_GUARD") != 0;
336
337 /* FIXME: I'm not sure this assert should be in the released
338 code. */
339 assert (p == 0 || p == pagesize);
340 }
341
342 overhead = sizeof (struct stack_segment);
343
344 allocate = pagesize;
345 if (allocate < MINSIGSTKSZ)
346 allocate = ((MINSIGSTKSZ + overhead + pagesize - 1)
347 & ~ (pagesize - 1));
348 if (allocate < frame_size)
349 allocate = ((frame_size + overhead + pagesize - 1)
350 & ~ (pagesize - 1));
351
352 if (use_guard_page)
353 allocate += pagesize;
354
355 /* FIXME: If this binary requires an executable stack, then we need
356 to set PROT_EXEC. Unfortunately figuring that out is complicated
357 and target dependent. We would need to use dl_iterate_phdr to
358 see if there is any object which does not have a PT_GNU_STACK
359 phdr, though only for architectures which use that mechanism. */
360 space = mmap (NULL, allocate, PROT_READ | PROT_WRITE,
361 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
362 if (space == MAP_FAILED)
363 {
364 static const char msg[] =
365 "unable to allocate additional stack space: errno ";
366 __morestack_fail (msg, sizeof msg - 1, errno);
367 }
368
369 if (use_guard_page)
370 {
371 void *guard;
372
373#ifdef STACK_GROWS_DOWNWARD
374 guard = space;
375 space = (char *) space + pagesize;
376#else
377 guard = space + allocate - pagesize;
378#endif
379
380 mprotect (guard, pagesize, PROT_NONE);
381 allocate -= pagesize;
382 }
383
384 pss = (struct stack_segment *) space;
385
e14304ef 386 pss->prev = NULL;
7458026b
ILT
387 pss->next = NULL;
388 pss->size = allocate - overhead;
389 pss->dynamic_allocation = NULL;
390 pss->free_dynamic_allocation = NULL;
391 pss->extra = NULL;
392
7458026b
ILT
393 return pss;
394}
395
396/* Free a list of dynamic blocks. */
397
398static void
399free_dynamic_blocks (struct dynamic_allocation_blocks *p)
400{
401 while (p != NULL)
402 {
403 struct dynamic_allocation_blocks *next;
404
405 next = p->next;
406 free (p->block);
407 free (p);
408 p = next;
409 }
410}
411
412/* Merge two lists of dynamic blocks. */
413
414static struct dynamic_allocation_blocks *
415merge_dynamic_blocks (struct dynamic_allocation_blocks *a,
416 struct dynamic_allocation_blocks *b)
417{
418 struct dynamic_allocation_blocks **pp;
419
420 if (a == NULL)
421 return b;
422 if (b == NULL)
423 return a;
424 for (pp = &a->next; *pp != NULL; pp = &(*pp)->next)
425 ;
426 *pp = b;
427 return a;
428}
429
430/* Release stack segments. If FREE_DYNAMIC is non-zero, we also free
431 any dynamic blocks. Otherwise we return them. */
432
433struct dynamic_allocation_blocks *
434__morestack_release_segments (struct stack_segment **pp, int free_dynamic)
435{
436 struct dynamic_allocation_blocks *ret;
437 struct stack_segment *pss;
438
439 ret = NULL;
440 pss = *pp;
441 while (pss != NULL)
442 {
443 struct stack_segment *next;
444 unsigned int allocate;
445
446 next = pss->next;
447
448 if (pss->dynamic_allocation != NULL
449 || pss->free_dynamic_allocation != NULL)
450 {
451 if (free_dynamic)
452 {
453 free_dynamic_blocks (pss->dynamic_allocation);
454 free_dynamic_blocks (pss->free_dynamic_allocation);
455 }
456 else
457 {
458 ret = merge_dynamic_blocks (pss->dynamic_allocation, ret);
459 ret = merge_dynamic_blocks (pss->free_dynamic_allocation, ret);
460 }
461 }
462
463 allocate = pss->size + sizeof (struct stack_segment);
464 if (munmap (pss, allocate) < 0)
465 {
466 static const char msg[] = "munmap of stack space failed: errno ";
467 __morestack_fail (msg, sizeof msg - 1, errno);
468 }
469
470 pss = next;
471 }
472 *pp = NULL;
473
474 return ret;
475}
476
477/* This function is called by a processor specific function to set the
478 initial stack pointer for a thread. The operating system will
479 always create a stack for a thread. Here we record a stack pointer
480 near the base of that stack. The size argument lets the processor
481 specific code estimate how much stack space is available on this
482 initial stack. */
483
484void
485__generic_morestack_set_initial_sp (void *sp, size_t len)
486{
487 /* The stack pointer most likely starts on a page boundary. Adjust
488 to the nearest 512 byte boundary. It's not essential that we be
489 precise here; getting it wrong will just leave some stack space
490 unused. */
491#ifdef STACK_GROWS_DOWNWARD
492 sp = (void *) ((((__UINTPTR_TYPE__) sp + 511U) / 512U) * 512U);
493#else
494 sp = (void *) ((((__UINTPTR_TYPE__) sp - 511U) / 512U) * 512U);
495#endif
496
497 __morestack_initial_sp.sp = sp;
498 __morestack_initial_sp.len = len;
499 sigemptyset (&__morestack_initial_sp.mask);
500
501 sigfillset (&__morestack_fullmask);
502#ifdef __linux__
503 /* On Linux, the first two real time signals are used by the NPTL
504 threading library. By taking them out of the set of signals, we
505 avoiding copying the signal mask in pthread_sigmask. More
506 importantly, pthread_sigmask uses less stack space on x86_64. */
507 sigdelset (&__morestack_fullmask, __SIGRTMIN);
508 sigdelset (&__morestack_fullmask, __SIGRTMIN + 1);
509#endif
510}
511
512/* This function is called by a processor specific function which is
513 run in the prologue when more stack is needed. The processor
514 specific function handles the details of saving registers and
515 frobbing the actual stack pointer. This function is responsible
516 for allocating a new stack segment and for copying a parameter
517 block from the old stack to the new one. On function entry
518 *PFRAME_SIZE is the size of the required stack frame--the returned
519 stack must be at least this large. On function exit *PFRAME_SIZE
520 is the amount of space remaining on the allocated stack. OLD_STACK
521 points at the parameters the old stack (really the current one
522 while this function is running). OLD_STACK is saved so that it can
523 be returned by a later call to __generic_releasestack. PARAM_SIZE
524 is the size in bytes of parameters to copy to the new stack. This
525 function returns a pointer to the new stack segment, pointing to
526 the memory after the parameters have been copied. The returned
527 value minus the returned *PFRAME_SIZE (or plus if the stack grows
528 upward) is the first address on the stack which should not be used.
529
530 This function is running on the old stack and has only a limited
531 amount of stack space available. */
532
533void *
534__generic_morestack (size_t *pframe_size, void *old_stack, size_t param_size)
535{
536 size_t frame_size = *pframe_size;
537 struct stack_segment *current;
538 struct stack_segment **pp;
539 struct dynamic_allocation_blocks *dynamic;
540 char *from;
541 char *to;
542 void *ret;
543 size_t i;
544
545 current = __morestack_current_segment;
546
547 pp = current != NULL ? &current->next : &__morestack_segments;
548 if (*pp != NULL && (*pp)->size < frame_size)
549 dynamic = __morestack_release_segments (pp, 0);
550 else
551 dynamic = NULL;
552 current = *pp;
553
554 if (current == NULL)
e14304ef
ILT
555 {
556 current = allocate_segment (frame_size + param_size);
557 current->prev = __morestack_current_segment;
558 *pp = current;
559 }
7458026b
ILT
560
561 current->old_stack = old_stack;
562
563 __morestack_current_segment = current;
564
565 if (dynamic != NULL)
566 {
567 /* Move the free blocks onto our list. We don't want to call
568 free here, as we are short on stack space. */
569 current->free_dynamic_allocation =
570 merge_dynamic_blocks (dynamic, current->free_dynamic_allocation);
571 }
572
573 *pframe_size = current->size - param_size;
574
575#ifdef STACK_GROWS_DOWNWARD
576 {
577 char *bottom = (char *) (current + 1) + current->size;
578 to = bottom - param_size;
579 ret = bottom - param_size;
580 }
581#else
582 to = current + 1;
583 ret = (char *) (current + 1) + param_size;
584#endif
585
586 /* We don't call memcpy to avoid worrying about the dynamic linker
587 trying to resolve it. */
588 from = (char *) old_stack;
589 for (i = 0; i < param_size; i++)
590 *to++ = *from++;
591
592 return ret;
593}
594
595/* This function is called by a processor specific function when it is
596 ready to release a stack segment. We don't actually release the
597 stack segment, we just move back to the previous one. The current
598 stack segment will still be available if we need it in
599 __generic_morestack. This returns a pointer to the new stack
600 segment to use, which is the one saved by a previous call to
601 __generic_morestack. The processor specific function is then
602 responsible for actually updating the stack pointer. This sets
603 *PAVAILABLE to the amount of stack space now available. */
604
605void *
606__generic_releasestack (size_t *pavailable)
607{
608 struct stack_segment *current;
609 void *old_stack;
610
611 current = __morestack_current_segment;
612 old_stack = current->old_stack;
613 current = current->prev;
614 __morestack_current_segment = current;
615
616 if (current != NULL)
617 {
618#ifdef STACK_GROWS_DOWNWARD
619 *pavailable = (char *) old_stack - (char *) (current + 1);
620#else
621 *pavailable = (char *) (current + 1) + current->size - (char *) old_stack;
622#endif
623 }
624 else
625 {
626 size_t used;
627
628 /* We have popped back to the original stack. */
629#ifdef STACK_GROWS_DOWNWARD
630 if ((char *) old_stack >= (char *) __morestack_initial_sp.sp)
631 used = 0;
632 else
633 used = (char *) __morestack_initial_sp.sp - (char *) old_stack;
634#else
635 if ((char *) old_stack <= (char *) __morestack_initial_sp.sp)
636 used = 0;
637 else
638 used = (char *) old_stack - (char *) __morestack_initial_sp.sp;
639#endif
640
641 if (used > __morestack_initial_sp.len)
642 *pavailable = 0;
643 else
644 *pavailable = __morestack_initial_sp.len - used;
645 }
646
647 return old_stack;
648}
649
650/* Block signals while splitting the stack. This avoids trouble if we
651 try to invoke a signal handler which itself wants to split the
652 stack. */
653
654extern int pthread_sigmask (int, const sigset_t *, sigset_t *)
655 __attribute__ ((weak));
656
657void
658__morestack_block_signals (void)
659{
e14304ef
ILT
660 if (__morestack_initial_sp.dont_block_signals)
661 ;
662 else if (pthread_sigmask)
7458026b
ILT
663 pthread_sigmask (SIG_BLOCK, &__morestack_fullmask,
664 &__morestack_initial_sp.mask);
665 else
666 sigprocmask (SIG_BLOCK, &__morestack_fullmask,
667 &__morestack_initial_sp.mask);
668}
669
670/* Unblock signals while splitting the stack. */
671
672void
673__morestack_unblock_signals (void)
674{
e14304ef
ILT
675 if (__morestack_initial_sp.dont_block_signals)
676 ;
677 else if (pthread_sigmask)
7458026b
ILT
678 pthread_sigmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
679 else
680 sigprocmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
681}
682
683/* This function is called to allocate dynamic stack space, for alloca
684 or a variably sized array. This is a regular function with
685 sufficient stack space, so we just use malloc to allocate the
686 space. We attach the allocated blocks to the current stack
687 segment, so that they will eventually be reused or freed. */
688
689void *
690__morestack_allocate_stack_space (size_t size)
691{
692 struct stack_segment *seg, *current;
693 struct dynamic_allocation_blocks *p;
694
695 /* We have to block signals to avoid getting confused if we get
696 interrupted by a signal whose handler itself uses alloca or a
697 variably sized array. */
698 __morestack_block_signals ();
699
700 /* Since we don't want to call free while we are low on stack space,
701 we may have a list of already allocated blocks waiting to be
702 freed. Release them all, unless we find one that is large
703 enough. We don't look at every block to see if one is large
704 enough, just the first one, because we aren't trying to build a
705 memory allocator here, we're just trying to speed up common
706 cases. */
707
708 current = __morestack_current_segment;
709 p = NULL;
710 for (seg = __morestack_segments; seg != NULL; seg = seg->next)
711 {
712 p = seg->free_dynamic_allocation;
713 if (p != NULL)
714 {
715 if (p->size >= size)
716 {
717 seg->free_dynamic_allocation = p->next;
718 break;
719 }
720
721 free_dynamic_blocks (p);
722 seg->free_dynamic_allocation = NULL;
723 p = NULL;
724 }
725 }
726
727 if (p == NULL)
728 {
729 /* We need to allocate additional memory. */
730 p = malloc (sizeof (*p));
731 if (p == NULL)
732 abort ();
733 p->size = size;
734 p->block = malloc (size);
735 if (p->block == NULL)
736 abort ();
737 }
738
739 /* If we are still on the initial stack, then we have a space leak.
740 FIXME. */
741 if (current != NULL)
742 {
743 p->next = current->dynamic_allocation;
744 current->dynamic_allocation = p;
745 }
746
747 __morestack_unblock_signals ();
748
749 return p->block;
750}
751
752/* Find the stack segment for STACK and return the amount of space
753 available. This is used when unwinding the stack because of an
754 exception, in order to reset the stack guard correctly. */
755
756size_t
757__generic_findstack (void *stack)
758{
759 struct stack_segment *pss;
760 size_t used;
761
762 for (pss = __morestack_current_segment; pss != NULL; pss = pss->prev)
763 {
764 if ((char *) pss < (char *) stack
765 && (char *) pss + pss->size > (char *) stack)
766 {
767 __morestack_current_segment = pss;
768#ifdef STACK_GROWS_DOWNWARD
769 return (char *) stack - (char *) (pss + 1);
770#else
771 return (char *) (pss + 1) + pss->size - (char *) stack;
772#endif
773 }
774 }
775
776 /* We have popped back to the original stack. */
e14304ef
ILT
777
778 if (__morestack_initial_sp.sp == NULL)
779 return 0;
780
7458026b
ILT
781#ifdef STACK_GROWS_DOWNWARD
782 if ((char *) stack >= (char *) __morestack_initial_sp.sp)
783 used = 0;
784 else
785 used = (char *) __morestack_initial_sp.sp - (char *) stack;
786#else
787 if ((char *) stack <= (char *) __morestack_initial_sp.sp)
788 used = 0;
789 else
790 used = (char *) stack - (char *) __morestack_initial_sp.sp;
791#endif
792
793 if (used > __morestack_initial_sp.len)
794 return 0;
795 else
796 return __morestack_initial_sp.len - used;
797}
798
799/* This function is called at program startup time to make sure that
800 mmap, munmap, and getpagesize are resolved if linking dynamically.
801 We want to resolve them while we have enough stack for them, rather
802 than calling into the dynamic linker while low on stack space. */
803
804void
805__morestack_load_mmap (void)
806{
807 /* Call with bogus values to run faster. We don't care if the call
808 fails. Pass __MORESTACK_CURRENT_SEGMENT to make sure that any
809 TLS accessor function is resolved. */
810 mmap (__morestack_current_segment, 0, PROT_READ, MAP_ANONYMOUS, -1, 0);
811 mprotect (NULL, 0, 0);
812 munmap (0, getpagesize ());
813}
814
815/* This function may be used to iterate over the stack segments.
816 This can be called like this.
817 void *next_segment = NULL;
818 void *next_sp = NULL;
819 void *initial_sp = NULL;
820 void *stack;
821 size_t stack_size;
822 while ((stack = __splitstack_find (next_segment, next_sp, &stack_size,
823 &next_segment, &next_sp,
824 &initial_sp)) != NULL)
825 {
826 // Stack segment starts at stack and is stack_size bytes long.
827 }
828
829 There is no way to iterate over the stack segments of a different
830 thread. However, what is permitted is for one thread to call this
831 with the first two values NULL, to pass next_segment, next_sp, and
832 initial_sp to a different thread, and then to suspend one way or
833 another. A different thread may run the subsequent
834 __morestack_find iterations. Of course, this will only work if the
835 first thread is suspended during the __morestack_find iterations.
836 If not, the second thread will be looking at the stack while it is
837 changing, and anything could happen.
838
839 FIXME: This should be declared in some header file, but where? */
840
841void *
842__splitstack_find (void *segment_arg, void *sp, size_t *len,
843 void **next_segment, void **next_sp,
844 void **initial_sp)
845{
846 struct stack_segment *segment;
847 void *ret;
848 char *nsp;
849
e14304ef 850 if (segment_arg == (void *) (uintptr_type) 1)
7458026b
ILT
851 {
852 char *isp = (char *) *initial_sp;
853
e14304ef
ILT
854 if (isp == NULL)
855 return NULL;
856
857 *next_segment = (void *) (uintptr_type) 2;
7458026b
ILT
858 *next_sp = NULL;
859#ifdef STACK_GROWS_DOWNWARD
860 if ((char *) sp >= isp)
861 return NULL;
862 *len = (char *) isp - (char *) sp;
863 return sp;
864#else
865 if ((char *) sp <= (char *) isp)
866 return NULL;
867 *len = (char *) sp - (char *) isp;
868 return (void *) isp;
869#endif
870 }
e14304ef 871 else if (segment_arg == (void *) (uintptr_type) 2)
7458026b
ILT
872 return NULL;
873 else if (segment_arg != NULL)
874 segment = (struct stack_segment *) segment_arg;
875 else
876 {
877 *initial_sp = __morestack_initial_sp.sp;
878 segment = __morestack_current_segment;
879 sp = (void *) &segment;
880 while (1)
881 {
882 if (segment == NULL)
e14304ef
ILT
883 return __splitstack_find ((void *) (uintptr_type) 1, sp, len,
884 next_segment, next_sp, initial_sp);
7458026b
ILT
885 if ((char *) sp >= (char *) (segment + 1)
886 && (char *) sp <= (char *) (segment + 1) + segment->size)
887 break;
888 segment = segment->prev;
889 }
890 }
891
892 if (segment->prev == NULL)
e14304ef 893 *next_segment = (void *) (uintptr_type) 1;
7458026b
ILT
894 else
895 *next_segment = segment->prev;
896
897 /* The old_stack value is the address of the function parameters of
898 the function which called __morestack. So if f1 called f2 which
899 called __morestack, the stack looks like this:
900
901 parameters <- old_stack
902 return in f1
903 return in f2
457186f6 904 registers pushed by __morestack
7458026b 905
457186f6
ILT
906 The registers pushed by __morestack may not be visible on any
907 other stack, if we are being called by a signal handler
908 immediately after the call to __morestack_unblock_signals. We
909 want to adjust our return value to include those registers. This
910 is target dependent. */
7458026b
ILT
911
912 nsp = (char *) segment->old_stack;
457186f6
ILT
913
914#if defined (__x86_64__)
915 nsp -= 12 * sizeof (void *);
916#elif defined (__i386__)
917 nsp -= 6 * sizeof (void *);
7458026b 918#else
457186f6 919#error "unrecognized target"
7458026b 920#endif
457186f6 921
7458026b
ILT
922 *next_sp = (void *) nsp;
923
924#ifdef STACK_GROWS_DOWNWARD
925 *len = (char *) (segment + 1) + segment->size - (char *) sp;
926 ret = (void *) sp;
927#else
928 *len = (char *) sp - (char *) (segment + 1);
929 ret = (void *) (segment + 1);
930#endif
931
932 return ret;
933}
934
e14304ef
ILT
935/* Tell the split stack code whether it has to block signals while
936 manipulating the stack. This is for programs in which some threads
937 block all signals. If a thread already blocks signals, there is no
938 need for the split stack code to block them as well. If NEW is not
939 NULL, then if *NEW is non-zero signals will be blocked while
940 splitting the stack, otherwise they will not. If OLD is not NULL,
941 *OLD will be set to the old value. */
942
943void
944__splitstack_block_signals (int *new, int *old)
945{
946 if (old != NULL)
947 *old = __morestack_initial_sp.dont_block_signals ? 0 : 1;
948 if (new != NULL)
949 __morestack_initial_sp.dont_block_signals = *new ? 0 : 1;
950}
951
952/* The offsets into the arrays used by __splitstack_getcontext and
953 __splitstack_setcontext. */
954
955enum __splitstack_context_offsets
956{
957 MORESTACK_SEGMENTS = 0,
958 CURRENT_SEGMENT = 1,
959 CURRENT_STACK = 2,
960 STACK_GUARD = 3,
961 INITIAL_SP = 4,
962 INITIAL_SP_LEN = 5,
963 BLOCK_SIGNALS = 6,
964
965 NUMBER_OFFSETS = 10
966};
967
968/* Get the current split stack context. This may be used for
969 coroutine switching, similar to getcontext. The argument should
970 have at least 10 void *pointers for extensibility, although we
971 don't currently use all of them. This would normally be called
972 immediately before a call to getcontext or swapcontext or
973 setjmp. */
974
975void
976__splitstack_getcontext (void *context[NUMBER_OFFSETS])
977{
978 memset (context, 0, NUMBER_OFFSETS * sizeof (void *));
979 context[MORESTACK_SEGMENTS] = (void *) __morestack_segments;
980 context[CURRENT_SEGMENT] = (void *) __morestack_current_segment;
981 context[CURRENT_STACK] = (void *) &context;
982 context[STACK_GUARD] = __morestack_get_guard ();
983 context[INITIAL_SP] = (void *) __morestack_initial_sp.sp;
984 context[INITIAL_SP_LEN] = (void *) (uintptr_type) __morestack_initial_sp.len;
985 context[BLOCK_SIGNALS] = (void *) __morestack_initial_sp.dont_block_signals;
986}
987
988/* Set the current split stack context. The argument should be a
989 context previously passed to __splitstack_getcontext. This would
990 normally be called immediately after a call to getcontext or
991 swapcontext or setjmp if something jumped to it. */
992
993void
994__splitstack_setcontext (void *context[NUMBER_OFFSETS])
995{
996 __morestack_segments = (struct stack_segment *) context[MORESTACK_SEGMENTS];
997 __morestack_current_segment =
998 (struct stack_segment *) context[CURRENT_SEGMENT];
999 __morestack_set_guard (context[STACK_GUARD]);
1000 __morestack_initial_sp.sp = context[INITIAL_SP];
1001 __morestack_initial_sp.len = (size_t) context[INITIAL_SP_LEN];
1002 __morestack_initial_sp.dont_block_signals =
1003 (uintptr_type) context[BLOCK_SIGNALS];
1004}
1005
1006/* Create a new split stack context. This will allocate a new stack
1007 segment which may be used by a coroutine. STACK_SIZE is the
1008 minimum size of the new stack. The caller is responsible for
1009 actually setting the stack pointer. This would normally be called
1010 before a call to makecontext, and the returned stack pointer and
1011 size would be used to set the uc_stack field. A function called
1012 via makecontext on a stack created by __splitstack_makecontext may
1013 not return. Note that the returned pointer points to the lowest
1014 address in the stack space, and thus may not be the value to which
1015 to set the stack pointer. */
1016
1017void *
1018__splitstack_makecontext (size_t stack_size, void *context[NUMBER_OFFSETS],
1019 size_t *size)
1020{
1021 struct stack_segment *segment;
1022 void *initial_sp;
1023
1024 memset (context, 0, NUMBER_OFFSETS * sizeof (void *));
1025 segment = allocate_segment (stack_size);
1026 context[MORESTACK_SEGMENTS] = segment;
1027 context[CURRENT_SEGMENT] = segment;
1028#ifdef STACK_GROWS_DOWNWARD
1029 initial_sp = (void *) ((char *) (segment + 1) + segment->size);
1030#else
1031 initial_sp = (void *) (segment + 1);
1032#endif
1033 context[STACK_GUARD] = __morestack_make_guard (initial_sp, segment->size);
1034 context[INITIAL_SP] = NULL;
1035 context[INITIAL_SP_LEN] = 0;
1036 *size = segment->size;
1037 return (void *) (segment + 1);
1038}
1039
1040/* Like __splitstack_block_signals, but operating on CONTEXT, rather
1041 than on the current state. */
1042
1043void
1044__splitstack_block_signals_context (void *context[NUMBER_OFFSETS], int *new,
1045 int *old)
1046{
1047 if (old != NULL)
1048 *old = ((uintptr_type) context[BLOCK_SIGNALS]) != 0 ? 0 : 1;
1049 if (new != NULL)
1050 context[BLOCK_SIGNALS] = (void *) (uintptr_type) (*new ? 0 : 1);
1051}
1052
1053/* Find the stack segments associated with a split stack context.
1054 This will return the address of the first stack segment and set
1055 *STACK_SIZE to its size. It will set next_segment, next_sp, and
1056 initial_sp which may be passed to __splitstack_find to find the
1057 remaining segments. */
1058
1059void *
1060__splitstack_find_context (void *context[NUMBER_OFFSETS], size_t *stack_size,
1061 void **next_segment, void **next_sp,
1062 void **initial_sp)
1063{
1064 void *sp;
1065 struct stack_segment *segment;
1066
1067 *initial_sp = context[INITIAL_SP];
1068
1069 sp = context[CURRENT_STACK];
1070 if (sp == NULL)
1071 {
1072 /* Most likely this context was created but was never used. The
1073 value 2 is a code used by __splitstack_find to mean that we
1074 have reached the end of the list of stacks. */
1075 *next_segment = (void *) (uintptr_type) 2;
1076 *next_sp = NULL;
1077 *initial_sp = NULL;
1078 return NULL;
1079 }
1080
1081 segment = context[CURRENT_SEGMENT];
1082 if (segment == NULL)
1083 {
1084 /* Most likely this context was saved by a thread which was not
1085 created using __splistack_makecontext and which has never
1086 split the stack. The value 1 is a code used by
1087 __splitstack_find to look at the initial stack. */
1088 segment = (struct stack_segment *) (uintptr_type) 1;
1089 }
1090
1091 return __splitstack_find (segment, sp, stack_size, next_segment, next_sp,
1092 initial_sp);
1093}
1094
7458026b 1095#endif /* !defined (inhibit_libc) */