]> git.ipfire.org Git - thirdparty/gcc.git/blame - libgcc/generic-morestack.c
middle-end/94479 - fix gimplification of address
[thirdparty/gcc.git] / libgcc / generic-morestack.c
CommitLineData
7458026b 1/* Library support for -fsplit-stack. */
8d9254fc 2/* Copyright (C) 2009-2020 Free Software Foundation, Inc.
7458026b
ILT
3 Contributed by Ian Lance Taylor <iant@google.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17Under Section 7 of GPL version 3, you are granted additional
18permissions described in the GCC Runtime Library Exception, version
193.1, as published by the Free Software Foundation.
20
21You should have received a copy of the GNU General Public License and
22a copy of the GCC Runtime Library Exception along with this program;
23see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24<http://www.gnu.org/licenses/>. */
25
aac9480d
MS
26#pragma GCC optimize ("no-isolate-erroneous-paths-dereference")
27
0f0fd745
AM
28/* powerpc 32-bit not supported. */
29#if !defined __powerpc__ || defined __powerpc64__
30
7458026b
ILT
31#include "tconfig.h"
32#include "tsystem.h"
33#include "coretypes.h"
34#include "tm.h"
852b75ed 35#include "libgcc_tm.h"
7458026b 36
67914693 37/* If inhibit_libc is defined, we cannot compile this file. The
7458026b
ILT
38 effect is that people will not be able to use -fsplit-stack. That
39 is much better than failing the build particularly since people
40 will want to define inhibit_libc while building a compiler which
41 can build glibc. */
42
43#ifndef inhibit_libc
44
45#include <assert.h>
46#include <errno.h>
47#include <signal.h>
48#include <stdlib.h>
e14304ef 49#include <string.h>
7458026b
ILT
50#include <unistd.h>
51#include <sys/mman.h>
52#include <sys/uio.h>
53
54#include "generic-morestack.h"
55
710d54ed
ILT
56/* Some systems use LD_PRELOAD or similar tricks to add hooks to
57 mmap/munmap. That breaks this code, because when we call mmap
58 there is enough stack space for the system call but there is not,
59 in general, enough stack space to run a hook. At least when using
60 glibc on GNU/Linux we can avoid the problem by calling __mmap and
61 __munmap. */
62
458ca332 63#if defined(__gnu_linux__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 26))
710d54ed
ILT
64
65extern void *__mmap (void *, size_t, int, int, int, off_t);
66extern int __munmap (void *, size_t);
67
68#define mmap __mmap
69#define munmap __munmap
70
71#endif /* defined(__gnu_linux__) */
72
e14304ef
ILT
73typedef unsigned uintptr_type __attribute__ ((mode (pointer)));
74
7458026b
ILT
75/* This file contains subroutines that are used by code compiled with
76 -fsplit-stack. */
77
78/* Declare functions to avoid warnings--there is no header file for
79 these internal functions. We give most of these functions the
80 flatten attribute in order to minimize their stack usage--here we
81 must minimize stack usage even at the cost of code size, and in
82 general inlining everything will do that. */
83
84extern void
85__generic_morestack_set_initial_sp (void *sp, size_t len)
86 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
87
88extern void *
89__generic_morestack (size_t *frame_size, void *old_stack, size_t param_size)
90 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
91
92extern void *
93__generic_releasestack (size_t *pavailable)
94 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
95
96extern void
97__morestack_block_signals (void)
98 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
99
100extern void
101__morestack_unblock_signals (void)
102 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
103
104extern size_t
105__generic_findstack (void *stack)
106 __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
107
108extern void
109__morestack_load_mmap (void)
110 __attribute__ ((no_split_stack, visibility ("hidden")));
111
112extern void *
113__morestack_allocate_stack_space (size_t size)
114 __attribute__ ((visibility ("hidden")));
115
e14304ef
ILT
116/* These are functions which -fsplit-stack code can call. These are
117 not called by the compiler, and are not hidden. FIXME: These
118 should be in some header file somewhere, somehow. */
7458026b
ILT
119
120extern void *
121__splitstack_find (void *, void *, size_t *, void **, void **, void **)
122 __attribute__ ((visibility ("default")));
123
e14304ef
ILT
124extern void
125__splitstack_block_signals (int *, int *)
126 __attribute__ ((visibility ("default")));
127
128extern void
129__splitstack_getcontext (void *context[10])
130 __attribute__ ((no_split_stack, visibility ("default")));
131
132extern void
133__splitstack_setcontext (void *context[10])
134 __attribute__ ((no_split_stack, visibility ("default")));
135
136extern void *
137__splitstack_makecontext (size_t, void *context[10], size_t *)
138 __attribute__ ((visibility ("default")));
139
a01207c4
ILT
140extern void *
141__splitstack_resetcontext (void *context[10], size_t *)
142 __attribute__ ((visibility ("default")));
143
144extern void
145__splitstack_releasecontext (void *context[10])
146 __attribute__ ((visibility ("default")));
147
e14304ef
ILT
148extern void
149__splitstack_block_signals_context (void *context[10], int *, int *)
150 __attribute__ ((visibility ("default")));
151
152extern void *
153__splitstack_find_context (void *context[10], size_t *, void **, void **,
154 void **)
155 __attribute__ ((visibility ("default")));
156
157/* These functions must be defined by the processor specific code. */
158
159extern void *__morestack_get_guard (void)
160 __attribute__ ((no_split_stack, visibility ("hidden")));
161
162extern void __morestack_set_guard (void *)
163 __attribute__ ((no_split_stack, visibility ("hidden")));
164
165extern void *__morestack_make_guard (void *, size_t)
166 __attribute__ ((no_split_stack, visibility ("hidden")));
167
7458026b
ILT
168/* When we allocate a stack segment we put this header at the
169 start. */
170
171struct stack_segment
172{
173 /* The previous stack segment--when a function running on this stack
174 segment returns, it will run on the previous one. */
175 struct stack_segment *prev;
176 /* The next stack segment, if it has been allocated--when a function
177 is running on this stack segment, the next one is not being
178 used. */
179 struct stack_segment *next;
180 /* The total size of this stack segment. */
181 size_t size;
182 /* The stack address when this stack was created. This is used when
183 popping the stack. */
184 void *old_stack;
185 /* A list of memory blocks allocated by dynamic stack
186 allocation. */
187 struct dynamic_allocation_blocks *dynamic_allocation;
188 /* A list of dynamic memory blocks no longer needed. */
189 struct dynamic_allocation_blocks *free_dynamic_allocation;
190 /* An extra pointer in case we need some more information some
191 day. */
192 void *extra;
193};
194
195/* This structure holds the (approximate) initial stack pointer and
196 size for the system supplied stack for a thread. This is set when
197 the thread is created. We also store a sigset_t here to hold the
198 signal mask while splitting the stack, since we don't want to store
199 that on the stack. */
200
201struct initial_sp
202{
203 /* The initial stack pointer. */
204 void *sp;
205 /* The stack length. */
206 size_t len;
207 /* A signal mask, put here so that the thread can use it without
208 needing stack space. */
209 sigset_t mask;
e14304ef
ILT
210 /* Non-zero if we should not block signals. This is a reversed flag
211 so that the default zero value is the safe value. The type is
212 uintptr_type because it replaced one of the void * pointers in
213 extra. */
214 uintptr_type dont_block_signals;
7458026b 215 /* Some extra space for later extensibility. */
e14304ef 216 void *extra[4];
7458026b
ILT
217};
218
219/* A list of memory blocks allocated by dynamic stack allocation.
220 This is used for code that calls alloca or uses variably sized
221 arrays. */
222
223struct dynamic_allocation_blocks
224{
225 /* The next block in the list. */
226 struct dynamic_allocation_blocks *next;
227 /* The size of the allocated memory. */
228 size_t size;
229 /* The allocated memory. */
230 void *block;
231};
232
233/* These thread local global variables must be shared by all split
234 stack code across shared library boundaries. Therefore, they have
235 default visibility. They have extensibility fields if needed for
236 new versions. If more radical changes are needed, new code can be
237 written using new variable names, while still using the existing
238 variables in a backward compatible manner. Symbol versioning is
239 also used, although, since these variables are only referenced by
240 code in this file and generic-morestack-thread.c, it is likely that
241 simply using new names will suffice. */
242
243/* The first stack segment allocated for this thread. */
244
245__thread struct stack_segment *__morestack_segments
246 __attribute__ ((visibility ("default")));
247
248/* The stack segment that we think we are currently using. This will
249 be correct in normal usage, but will be incorrect if an exception
250 unwinds into a different stack segment or if longjmp jumps to a
251 different stack segment. */
252
253__thread struct stack_segment *__morestack_current_segment
254 __attribute__ ((visibility ("default")));
255
256/* The initial stack pointer and size for this thread. */
257
258__thread struct initial_sp __morestack_initial_sp
259 __attribute__ ((visibility ("default")));
260
261/* A static signal mask, to avoid taking up stack space. */
262
263static sigset_t __morestack_fullmask;
264
1f3fa525
TM
265/* Page size, as returned from getpagesize(). Set on startup. */
266static unsigned int static_pagesize;
267
268/* Set on startup to non-zero value if SPLIT_STACK_GUARD env var is set. */
269static int use_guard_page;
270
7458026b
ILT
271/* Convert an integer to a decimal string without using much stack
272 space. Return a pointer to the part of the buffer to use. We this
273 instead of sprintf because sprintf will require too much stack
274 space. */
275
276static char *
277print_int (int val, char *buf, int buflen, size_t *print_len)
278{
279 int is_negative;
280 int i;
281 unsigned int uval;
282
283 uval = (unsigned int) val;
284 if (val >= 0)
285 is_negative = 0;
286 else
287 {
288 is_negative = 1;
289 uval = - uval;
290 }
291
292 i = buflen;
293 do
294 {
295 --i;
296 buf[i] = '0' + (uval % 10);
297 uval /= 10;
298 }
299 while (uval != 0 && i > 0);
300
301 if (is_negative)
302 {
303 if (i > 0)
304 --i;
305 buf[i] = '-';
306 }
307
308 *print_len = buflen - i;
309 return buf + i;
310}
311
312/* Print the string MSG/LEN, the errno number ERR, and a newline on
313 stderr. Then crash. */
314
315void
316__morestack_fail (const char *, size_t, int) __attribute__ ((noreturn));
317
318void
319__morestack_fail (const char *msg, size_t len, int err)
320{
321 char buf[24];
322 static const char nl[] = "\n";
323 struct iovec iov[3];
324 union { char *p; const char *cp; } const_cast;
325
326 const_cast.cp = msg;
327 iov[0].iov_base = const_cast.p;
328 iov[0].iov_len = len;
329 /* We can't call strerror, because it may try to translate the error
330 message, and that would use too much stack space. */
331 iov[1].iov_base = print_int (err, buf, sizeof buf, &iov[1].iov_len);
332 const_cast.cp = &nl[0];
333 iov[2].iov_base = const_cast.p;
334 iov[2].iov_len = sizeof nl - 1;
335 /* FIXME: On systems without writev we need to issue three write
336 calls, or punt on printing errno. For now this is irrelevant
337 since stack splitting only works on GNU/Linux anyhow. */
338 writev (2, iov, 3);
339 abort ();
340}
341
342/* Allocate a new stack segment. FRAME_SIZE is the required frame
343 size. */
344
345static struct stack_segment *
346allocate_segment (size_t frame_size)
347{
7458026b
ILT
348 unsigned int pagesize;
349 unsigned int overhead;
350 unsigned int allocate;
351 void *space;
352 struct stack_segment *pss;
353
354 pagesize = static_pagesize;
7458026b
ILT
355 overhead = sizeof (struct stack_segment);
356
357 allocate = pagesize;
358 if (allocate < MINSIGSTKSZ)
359 allocate = ((MINSIGSTKSZ + overhead + pagesize - 1)
360 & ~ (pagesize - 1));
361 if (allocate < frame_size)
362 allocate = ((frame_size + overhead + pagesize - 1)
363 & ~ (pagesize - 1));
364
365 if (use_guard_page)
366 allocate += pagesize;
367
368 /* FIXME: If this binary requires an executable stack, then we need
369 to set PROT_EXEC. Unfortunately figuring that out is complicated
370 and target dependent. We would need to use dl_iterate_phdr to
371 see if there is any object which does not have a PT_GNU_STACK
372 phdr, though only for architectures which use that mechanism. */
373 space = mmap (NULL, allocate, PROT_READ | PROT_WRITE,
374 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
375 if (space == MAP_FAILED)
376 {
377 static const char msg[] =
378 "unable to allocate additional stack space: errno ";
379 __morestack_fail (msg, sizeof msg - 1, errno);
380 }
381
382 if (use_guard_page)
383 {
384 void *guard;
385
53d68b9f 386#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
7458026b
ILT
387 guard = space;
388 space = (char *) space + pagesize;
389#else
390 guard = space + allocate - pagesize;
391#endif
392
393 mprotect (guard, pagesize, PROT_NONE);
394 allocate -= pagesize;
395 }
396
397 pss = (struct stack_segment *) space;
398
e14304ef 399 pss->prev = NULL;
7458026b
ILT
400 pss->next = NULL;
401 pss->size = allocate - overhead;
402 pss->dynamic_allocation = NULL;
403 pss->free_dynamic_allocation = NULL;
404 pss->extra = NULL;
405
7458026b
ILT
406 return pss;
407}
408
409/* Free a list of dynamic blocks. */
410
411static void
412free_dynamic_blocks (struct dynamic_allocation_blocks *p)
413{
414 while (p != NULL)
415 {
416 struct dynamic_allocation_blocks *next;
417
418 next = p->next;
419 free (p->block);
420 free (p);
421 p = next;
422 }
423}
424
425/* Merge two lists of dynamic blocks. */
426
427static struct dynamic_allocation_blocks *
428merge_dynamic_blocks (struct dynamic_allocation_blocks *a,
429 struct dynamic_allocation_blocks *b)
430{
431 struct dynamic_allocation_blocks **pp;
432
433 if (a == NULL)
434 return b;
435 if (b == NULL)
436 return a;
437 for (pp = &a->next; *pp != NULL; pp = &(*pp)->next)
438 ;
439 *pp = b;
440 return a;
441}
442
443/* Release stack segments. If FREE_DYNAMIC is non-zero, we also free
444 any dynamic blocks. Otherwise we return them. */
445
446struct dynamic_allocation_blocks *
447__morestack_release_segments (struct stack_segment **pp, int free_dynamic)
448{
449 struct dynamic_allocation_blocks *ret;
450 struct stack_segment *pss;
451
452 ret = NULL;
453 pss = *pp;
454 while (pss != NULL)
455 {
456 struct stack_segment *next;
457 unsigned int allocate;
458
459 next = pss->next;
460
461 if (pss->dynamic_allocation != NULL
462 || pss->free_dynamic_allocation != NULL)
463 {
464 if (free_dynamic)
465 {
466 free_dynamic_blocks (pss->dynamic_allocation);
467 free_dynamic_blocks (pss->free_dynamic_allocation);
468 }
469 else
470 {
471 ret = merge_dynamic_blocks (pss->dynamic_allocation, ret);
472 ret = merge_dynamic_blocks (pss->free_dynamic_allocation, ret);
473 }
474 }
475
476 allocate = pss->size + sizeof (struct stack_segment);
477 if (munmap (pss, allocate) < 0)
478 {
479 static const char msg[] = "munmap of stack space failed: errno ";
480 __morestack_fail (msg, sizeof msg - 1, errno);
481 }
482
483 pss = next;
484 }
485 *pp = NULL;
486
487 return ret;
488}
489
490/* This function is called by a processor specific function to set the
491 initial stack pointer for a thread. The operating system will
492 always create a stack for a thread. Here we record a stack pointer
493 near the base of that stack. The size argument lets the processor
494 specific code estimate how much stack space is available on this
495 initial stack. */
496
497void
498__generic_morestack_set_initial_sp (void *sp, size_t len)
499{
500 /* The stack pointer most likely starts on a page boundary. Adjust
501 to the nearest 512 byte boundary. It's not essential that we be
502 precise here; getting it wrong will just leave some stack space
503 unused. */
53d68b9f 504#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
7458026b
ILT
505 sp = (void *) ((((__UINTPTR_TYPE__) sp + 511U) / 512U) * 512U);
506#else
507 sp = (void *) ((((__UINTPTR_TYPE__) sp - 511U) / 512U) * 512U);
508#endif
509
510 __morestack_initial_sp.sp = sp;
511 __morestack_initial_sp.len = len;
512 sigemptyset (&__morestack_initial_sp.mask);
513
514 sigfillset (&__morestack_fullmask);
d80c632e 515#if defined(__GLIBC__) && defined(__linux__)
a13780a6 516 /* In glibc, the first two real time signals are used by the NPTL
7458026b
ILT
517 threading library. By taking them out of the set of signals, we
518 avoiding copying the signal mask in pthread_sigmask. More
519 importantly, pthread_sigmask uses less stack space on x86_64. */
520 sigdelset (&__morestack_fullmask, __SIGRTMIN);
521 sigdelset (&__morestack_fullmask, __SIGRTMIN + 1);
522#endif
523}
524
525/* This function is called by a processor specific function which is
526 run in the prologue when more stack is needed. The processor
527 specific function handles the details of saving registers and
528 frobbing the actual stack pointer. This function is responsible
529 for allocating a new stack segment and for copying a parameter
530 block from the old stack to the new one. On function entry
531 *PFRAME_SIZE is the size of the required stack frame--the returned
532 stack must be at least this large. On function exit *PFRAME_SIZE
533 is the amount of space remaining on the allocated stack. OLD_STACK
534 points at the parameters the old stack (really the current one
535 while this function is running). OLD_STACK is saved so that it can
536 be returned by a later call to __generic_releasestack. PARAM_SIZE
537 is the size in bytes of parameters to copy to the new stack. This
538 function returns a pointer to the new stack segment, pointing to
539 the memory after the parameters have been copied. The returned
540 value minus the returned *PFRAME_SIZE (or plus if the stack grows
541 upward) is the first address on the stack which should not be used.
542
543 This function is running on the old stack and has only a limited
544 amount of stack space available. */
545
546void *
547__generic_morestack (size_t *pframe_size, void *old_stack, size_t param_size)
548{
549 size_t frame_size = *pframe_size;
550 struct stack_segment *current;
551 struct stack_segment **pp;
552 struct dynamic_allocation_blocks *dynamic;
553 char *from;
554 char *to;
555 void *ret;
556 size_t i;
e808687a 557 size_t aligned;
7458026b
ILT
558
559 current = __morestack_current_segment;
560
561 pp = current != NULL ? &current->next : &__morestack_segments;
562 if (*pp != NULL && (*pp)->size < frame_size)
563 dynamic = __morestack_release_segments (pp, 0);
564 else
565 dynamic = NULL;
566 current = *pp;
567
568 if (current == NULL)
e14304ef
ILT
569 {
570 current = allocate_segment (frame_size + param_size);
571 current->prev = __morestack_current_segment;
572 *pp = current;
573 }
7458026b
ILT
574
575 current->old_stack = old_stack;
576
577 __morestack_current_segment = current;
578
579 if (dynamic != NULL)
580 {
581 /* Move the free blocks onto our list. We don't want to call
582 free here, as we are short on stack space. */
583 current->free_dynamic_allocation =
584 merge_dynamic_blocks (dynamic, current->free_dynamic_allocation);
585 }
586
587 *pframe_size = current->size - param_size;
588
e808687a
ILT
589 /* Align the returned stack to a 32-byte boundary. */
590 aligned = (param_size + 31) & ~ (size_t) 31;
591
53d68b9f 592#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
7458026b
ILT
593 {
594 char *bottom = (char *) (current + 1) + current->size;
e808687a
ILT
595 to = bottom - aligned;
596 ret = bottom - aligned;
7458026b
ILT
597 }
598#else
599 to = current + 1;
e808687a
ILT
600 to += aligned - param_size;
601 ret = (char *) (current + 1) + aligned;
7458026b
ILT
602#endif
603
604 /* We don't call memcpy to avoid worrying about the dynamic linker
605 trying to resolve it. */
606 from = (char *) old_stack;
607 for (i = 0; i < param_size; i++)
608 *to++ = *from++;
609
610 return ret;
611}
612
613/* This function is called by a processor specific function when it is
614 ready to release a stack segment. We don't actually release the
615 stack segment, we just move back to the previous one. The current
616 stack segment will still be available if we need it in
617 __generic_morestack. This returns a pointer to the new stack
618 segment to use, which is the one saved by a previous call to
619 __generic_morestack. The processor specific function is then
620 responsible for actually updating the stack pointer. This sets
621 *PAVAILABLE to the amount of stack space now available. */
622
623void *
624__generic_releasestack (size_t *pavailable)
625{
626 struct stack_segment *current;
627 void *old_stack;
628
629 current = __morestack_current_segment;
630 old_stack = current->old_stack;
631 current = current->prev;
632 __morestack_current_segment = current;
633
634 if (current != NULL)
635 {
53d68b9f 636#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
7458026b
ILT
637 *pavailable = (char *) old_stack - (char *) (current + 1);
638#else
639 *pavailable = (char *) (current + 1) + current->size - (char *) old_stack;
640#endif
641 }
642 else
643 {
644 size_t used;
645
646 /* We have popped back to the original stack. */
53d68b9f 647#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
7458026b
ILT
648 if ((char *) old_stack >= (char *) __morestack_initial_sp.sp)
649 used = 0;
650 else
651 used = (char *) __morestack_initial_sp.sp - (char *) old_stack;
652#else
653 if ((char *) old_stack <= (char *) __morestack_initial_sp.sp)
654 used = 0;
655 else
656 used = (char *) old_stack - (char *) __morestack_initial_sp.sp;
657#endif
658
659 if (used > __morestack_initial_sp.len)
660 *pavailable = 0;
661 else
662 *pavailable = __morestack_initial_sp.len - used;
663 }
664
665 return old_stack;
666}
667
668/* Block signals while splitting the stack. This avoids trouble if we
669 try to invoke a signal handler which itself wants to split the
670 stack. */
671
672extern int pthread_sigmask (int, const sigset_t *, sigset_t *)
673 __attribute__ ((weak));
674
675void
676__morestack_block_signals (void)
677{
e14304ef
ILT
678 if (__morestack_initial_sp.dont_block_signals)
679 ;
680 else if (pthread_sigmask)
7458026b
ILT
681 pthread_sigmask (SIG_BLOCK, &__morestack_fullmask,
682 &__morestack_initial_sp.mask);
683 else
684 sigprocmask (SIG_BLOCK, &__morestack_fullmask,
685 &__morestack_initial_sp.mask);
686}
687
688/* Unblock signals while splitting the stack. */
689
690void
691__morestack_unblock_signals (void)
692{
e14304ef
ILT
693 if (__morestack_initial_sp.dont_block_signals)
694 ;
695 else if (pthread_sigmask)
7458026b
ILT
696 pthread_sigmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
697 else
698 sigprocmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
699}
700
701/* This function is called to allocate dynamic stack space, for alloca
702 or a variably sized array. This is a regular function with
703 sufficient stack space, so we just use malloc to allocate the
704 space. We attach the allocated blocks to the current stack
705 segment, so that they will eventually be reused or freed. */
706
707void *
708__morestack_allocate_stack_space (size_t size)
709{
710 struct stack_segment *seg, *current;
711 struct dynamic_allocation_blocks *p;
712
713 /* We have to block signals to avoid getting confused if we get
714 interrupted by a signal whose handler itself uses alloca or a
715 variably sized array. */
716 __morestack_block_signals ();
717
718 /* Since we don't want to call free while we are low on stack space,
719 we may have a list of already allocated blocks waiting to be
720 freed. Release them all, unless we find one that is large
721 enough. We don't look at every block to see if one is large
722 enough, just the first one, because we aren't trying to build a
723 memory allocator here, we're just trying to speed up common
724 cases. */
725
726 current = __morestack_current_segment;
727 p = NULL;
728 for (seg = __morestack_segments; seg != NULL; seg = seg->next)
729 {
730 p = seg->free_dynamic_allocation;
731 if (p != NULL)
732 {
733 if (p->size >= size)
734 {
735 seg->free_dynamic_allocation = p->next;
736 break;
737 }
738
739 free_dynamic_blocks (p);
740 seg->free_dynamic_allocation = NULL;
741 p = NULL;
742 }
743 }
744
745 if (p == NULL)
746 {
747 /* We need to allocate additional memory. */
748 p = malloc (sizeof (*p));
749 if (p == NULL)
750 abort ();
751 p->size = size;
752 p->block = malloc (size);
753 if (p->block == NULL)
754 abort ();
755 }
756
757 /* If we are still on the initial stack, then we have a space leak.
758 FIXME. */
759 if (current != NULL)
760 {
761 p->next = current->dynamic_allocation;
762 current->dynamic_allocation = p;
763 }
764
765 __morestack_unblock_signals ();
766
767 return p->block;
768}
769
770/* Find the stack segment for STACK and return the amount of space
771 available. This is used when unwinding the stack because of an
772 exception, in order to reset the stack guard correctly. */
773
774size_t
775__generic_findstack (void *stack)
776{
777 struct stack_segment *pss;
778 size_t used;
779
780 for (pss = __morestack_current_segment; pss != NULL; pss = pss->prev)
781 {
782 if ((char *) pss < (char *) stack
783 && (char *) pss + pss->size > (char *) stack)
784 {
785 __morestack_current_segment = pss;
53d68b9f 786#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
7458026b
ILT
787 return (char *) stack - (char *) (pss + 1);
788#else
789 return (char *) (pss + 1) + pss->size - (char *) stack;
790#endif
791 }
792 }
793
794 /* We have popped back to the original stack. */
e14304ef
ILT
795
796 if (__morestack_initial_sp.sp == NULL)
797 return 0;
798
53d68b9f 799#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
7458026b
ILT
800 if ((char *) stack >= (char *) __morestack_initial_sp.sp)
801 used = 0;
802 else
803 used = (char *) __morestack_initial_sp.sp - (char *) stack;
804#else
805 if ((char *) stack <= (char *) __morestack_initial_sp.sp)
806 used = 0;
807 else
808 used = (char *) stack - (char *) __morestack_initial_sp.sp;
809#endif
810
811 if (used > __morestack_initial_sp.len)
812 return 0;
813 else
814 return __morestack_initial_sp.len - used;
815}
816
817/* This function is called at program startup time to make sure that
818 mmap, munmap, and getpagesize are resolved if linking dynamically.
819 We want to resolve them while we have enough stack for them, rather
1f3fa525
TM
820 than calling into the dynamic linker while low on stack space.
821 Similarly, invoke getenv here to check for split-stack related control
822 variables, since doing do as part of the __morestack path can result
823 in unwanted use of SSE/AVX registers (see GCC PR 86213). */
7458026b
ILT
824
825void
826__morestack_load_mmap (void)
827{
828 /* Call with bogus values to run faster. We don't care if the call
829 fails. Pass __MORESTACK_CURRENT_SEGMENT to make sure that any
830 TLS accessor function is resolved. */
831 mmap (__morestack_current_segment, 0, PROT_READ, MAP_ANONYMOUS, -1, 0);
832 mprotect (NULL, 0, 0);
1f3fa525
TM
833 munmap (0, static_pagesize);
834
835 /* Initialize these values here, so as to avoid dynamic linker
836 activity as part of a __morestack call. */
837 static_pagesize = getpagesize();
838 use_guard_page = getenv ("SPLIT_STACK_GUARD") != 0;
7458026b
ILT
839}
840
841/* This function may be used to iterate over the stack segments.
842 This can be called like this.
843 void *next_segment = NULL;
844 void *next_sp = NULL;
845 void *initial_sp = NULL;
846 void *stack;
847 size_t stack_size;
848 while ((stack = __splitstack_find (next_segment, next_sp, &stack_size,
849 &next_segment, &next_sp,
850 &initial_sp)) != NULL)
851 {
852 // Stack segment starts at stack and is stack_size bytes long.
853 }
854
855 There is no way to iterate over the stack segments of a different
856 thread. However, what is permitted is for one thread to call this
857 with the first two values NULL, to pass next_segment, next_sp, and
858 initial_sp to a different thread, and then to suspend one way or
859 another. A different thread may run the subsequent
860 __morestack_find iterations. Of course, this will only work if the
861 first thread is suspended during the __morestack_find iterations.
862 If not, the second thread will be looking at the stack while it is
863 changing, and anything could happen.
864
865 FIXME: This should be declared in some header file, but where? */
866
867void *
868__splitstack_find (void *segment_arg, void *sp, size_t *len,
869 void **next_segment, void **next_sp,
870 void **initial_sp)
871{
872 struct stack_segment *segment;
873 void *ret;
874 char *nsp;
875
e14304ef 876 if (segment_arg == (void *) (uintptr_type) 1)
7458026b
ILT
877 {
878 char *isp = (char *) *initial_sp;
879
e14304ef
ILT
880 if (isp == NULL)
881 return NULL;
882
883 *next_segment = (void *) (uintptr_type) 2;
7458026b 884 *next_sp = NULL;
53d68b9f 885#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
7458026b
ILT
886 if ((char *) sp >= isp)
887 return NULL;
888 *len = (char *) isp - (char *) sp;
889 return sp;
890#else
891 if ((char *) sp <= (char *) isp)
892 return NULL;
893 *len = (char *) sp - (char *) isp;
894 return (void *) isp;
895#endif
896 }
e14304ef 897 else if (segment_arg == (void *) (uintptr_type) 2)
7458026b
ILT
898 return NULL;
899 else if (segment_arg != NULL)
900 segment = (struct stack_segment *) segment_arg;
901 else
902 {
903 *initial_sp = __morestack_initial_sp.sp;
904 segment = __morestack_current_segment;
905 sp = (void *) &segment;
906 while (1)
907 {
908 if (segment == NULL)
e14304ef
ILT
909 return __splitstack_find ((void *) (uintptr_type) 1, sp, len,
910 next_segment, next_sp, initial_sp);
7458026b
ILT
911 if ((char *) sp >= (char *) (segment + 1)
912 && (char *) sp <= (char *) (segment + 1) + segment->size)
913 break;
914 segment = segment->prev;
915 }
916 }
917
918 if (segment->prev == NULL)
e14304ef 919 *next_segment = (void *) (uintptr_type) 1;
7458026b
ILT
920 else
921 *next_segment = segment->prev;
922
923 /* The old_stack value is the address of the function parameters of
924 the function which called __morestack. So if f1 called f2 which
925 called __morestack, the stack looks like this:
926
927 parameters <- old_stack
928 return in f1
929 return in f2
457186f6 930 registers pushed by __morestack
7458026b 931
457186f6
ILT
932 The registers pushed by __morestack may not be visible on any
933 other stack, if we are being called by a signal handler
934 immediately after the call to __morestack_unblock_signals. We
935 want to adjust our return value to include those registers. This
936 is target dependent. */
7458026b
ILT
937
938 nsp = (char *) segment->old_stack;
457186f6 939
a01207c4
ILT
940 if (nsp == NULL)
941 {
942 /* We've reached the top of the stack. */
943 *next_segment = (void *) (uintptr_type) 2;
944 }
945 else
946 {
457186f6 947#if defined (__x86_64__)
a01207c4 948 nsp -= 12 * sizeof (void *);
457186f6 949#elif defined (__i386__)
a01207c4 950 nsp -= 6 * sizeof (void *);
0f0fd745 951#elif defined __powerpc64__
4cb4721f
MK
952#elif defined __s390x__
953 nsp -= 2 * 160;
954#elif defined __s390__
955 nsp -= 2 * 96;
7458026b 956#else
457186f6 957#error "unrecognized target"
7458026b 958#endif
457186f6 959
a01207c4
ILT
960 *next_sp = (void *) nsp;
961 }
7458026b 962
53d68b9f 963#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
7458026b
ILT
964 *len = (char *) (segment + 1) + segment->size - (char *) sp;
965 ret = (void *) sp;
966#else
967 *len = (char *) sp - (char *) (segment + 1);
968 ret = (void *) (segment + 1);
969#endif
970
971 return ret;
972}
973
e14304ef
ILT
974/* Tell the split stack code whether it has to block signals while
975 manipulating the stack. This is for programs in which some threads
976 block all signals. If a thread already blocks signals, there is no
977 need for the split stack code to block them as well. If NEW is not
978 NULL, then if *NEW is non-zero signals will be blocked while
979 splitting the stack, otherwise they will not. If OLD is not NULL,
980 *OLD will be set to the old value. */
981
982void
983__splitstack_block_signals (int *new, int *old)
984{
985 if (old != NULL)
986 *old = __morestack_initial_sp.dont_block_signals ? 0 : 1;
987 if (new != NULL)
988 __morestack_initial_sp.dont_block_signals = *new ? 0 : 1;
989}
990
991/* The offsets into the arrays used by __splitstack_getcontext and
992 __splitstack_setcontext. */
993
994enum __splitstack_context_offsets
995{
996 MORESTACK_SEGMENTS = 0,
997 CURRENT_SEGMENT = 1,
998 CURRENT_STACK = 2,
999 STACK_GUARD = 3,
1000 INITIAL_SP = 4,
1001 INITIAL_SP_LEN = 5,
1002 BLOCK_SIGNALS = 6,
1003
1004 NUMBER_OFFSETS = 10
1005};
1006
1007/* Get the current split stack context. This may be used for
1008 coroutine switching, similar to getcontext. The argument should
1009 have at least 10 void *pointers for extensibility, although we
1010 don't currently use all of them. This would normally be called
1011 immediately before a call to getcontext or swapcontext or
1012 setjmp. */
1013
1014void
1015__splitstack_getcontext (void *context[NUMBER_OFFSETS])
1016{
1017 memset (context, 0, NUMBER_OFFSETS * sizeof (void *));
1018 context[MORESTACK_SEGMENTS] = (void *) __morestack_segments;
1019 context[CURRENT_SEGMENT] = (void *) __morestack_current_segment;
1020 context[CURRENT_STACK] = (void *) &context;
1021 context[STACK_GUARD] = __morestack_get_guard ();
1022 context[INITIAL_SP] = (void *) __morestack_initial_sp.sp;
1023 context[INITIAL_SP_LEN] = (void *) (uintptr_type) __morestack_initial_sp.len;
1024 context[BLOCK_SIGNALS] = (void *) __morestack_initial_sp.dont_block_signals;
1025}
1026
1027/* Set the current split stack context. The argument should be a
1028 context previously passed to __splitstack_getcontext. This would
1029 normally be called immediately after a call to getcontext or
1030 swapcontext or setjmp if something jumped to it. */
1031
1032void
1033__splitstack_setcontext (void *context[NUMBER_OFFSETS])
1034{
1035 __morestack_segments = (struct stack_segment *) context[MORESTACK_SEGMENTS];
1036 __morestack_current_segment =
1037 (struct stack_segment *) context[CURRENT_SEGMENT];
1038 __morestack_set_guard (context[STACK_GUARD]);
1039 __morestack_initial_sp.sp = context[INITIAL_SP];
1040 __morestack_initial_sp.len = (size_t) context[INITIAL_SP_LEN];
1041 __morestack_initial_sp.dont_block_signals =
1042 (uintptr_type) context[BLOCK_SIGNALS];
1043}
1044
1045/* Create a new split stack context. This will allocate a new stack
1046 segment which may be used by a coroutine. STACK_SIZE is the
1047 minimum size of the new stack. The caller is responsible for
1048 actually setting the stack pointer. This would normally be called
1049 before a call to makecontext, and the returned stack pointer and
1050 size would be used to set the uc_stack field. A function called
1051 via makecontext on a stack created by __splitstack_makecontext may
1052 not return. Note that the returned pointer points to the lowest
1053 address in the stack space, and thus may not be the value to which
1054 to set the stack pointer. */
1055
1056void *
1057__splitstack_makecontext (size_t stack_size, void *context[NUMBER_OFFSETS],
1058 size_t *size)
1059{
1060 struct stack_segment *segment;
1061 void *initial_sp;
1062
1063 memset (context, 0, NUMBER_OFFSETS * sizeof (void *));
1064 segment = allocate_segment (stack_size);
1065 context[MORESTACK_SEGMENTS] = segment;
1066 context[CURRENT_SEGMENT] = segment;
53d68b9f 1067#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
e14304ef
ILT
1068 initial_sp = (void *) ((char *) (segment + 1) + segment->size);
1069#else
1070 initial_sp = (void *) (segment + 1);
1071#endif
1072 context[STACK_GUARD] = __morestack_make_guard (initial_sp, segment->size);
1073 context[INITIAL_SP] = NULL;
1074 context[INITIAL_SP_LEN] = 0;
1075 *size = segment->size;
1076 return (void *) (segment + 1);
1077}
1078
a01207c4
ILT
1079/* Given an existing split stack context, reset it back to the start
1080 of the stack. Return the stack pointer and size, appropriate for
1081 use with makecontext. This may be used if a coroutine exits, in
1082 order to reuse the stack segments for a new coroutine. */
1083
1084void *
1085__splitstack_resetcontext (void *context[10], size_t *size)
1086{
1087 struct stack_segment *segment;
1088 void *initial_sp;
1089 size_t initial_size;
1090 void *ret;
1091
1092 /* Reset the context assuming that MORESTACK_SEGMENTS, INITIAL_SP
1093 and INITIAL_SP_LEN are correct. */
1094
1095 segment = context[MORESTACK_SEGMENTS];
1096 context[CURRENT_SEGMENT] = segment;
1097 context[CURRENT_STACK] = NULL;
1098 if (segment == NULL)
1099 {
1100 initial_sp = context[INITIAL_SP];
1101 initial_size = (uintptr_type) context[INITIAL_SP_LEN];
1102 ret = initial_sp;
53d68b9f 1103#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
a01207c4
ILT
1104 ret = (void *) ((char *) ret - initial_size);
1105#endif
1106 }
1107 else
1108 {
53d68b9f 1109#ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
a01207c4
ILT
1110 initial_sp = (void *) ((char *) (segment + 1) + segment->size);
1111#else
1112 initial_sp = (void *) (segment + 1);
1113#endif
1114 initial_size = segment->size;
1115 ret = (void *) (segment + 1);
1116 }
1117 context[STACK_GUARD] = __morestack_make_guard (initial_sp, initial_size);
1118 context[BLOCK_SIGNALS] = NULL;
1119 *size = initial_size;
1120 return ret;
1121}
1122
1123/* Release all the memory associated with a splitstack context. This
1124 may be used if a coroutine exits and the associated stack should be
1125 freed. */
1126
1127void
1128__splitstack_releasecontext (void *context[10])
1129{
e9e053eb
ILT
1130 __morestack_release_segments (((struct stack_segment **)
1131 &context[MORESTACK_SEGMENTS]),
1132 1);
a01207c4
ILT
1133}
1134
e14304ef
ILT
1135/* Like __splitstack_block_signals, but operating on CONTEXT, rather
1136 than on the current state. */
1137
1138void
1139__splitstack_block_signals_context (void *context[NUMBER_OFFSETS], int *new,
1140 int *old)
1141{
1142 if (old != NULL)
1143 *old = ((uintptr_type) context[BLOCK_SIGNALS]) != 0 ? 0 : 1;
1144 if (new != NULL)
1145 context[BLOCK_SIGNALS] = (void *) (uintptr_type) (*new ? 0 : 1);
1146}
1147
1148/* Find the stack segments associated with a split stack context.
1149 This will return the address of the first stack segment and set
1150 *STACK_SIZE to its size. It will set next_segment, next_sp, and
1151 initial_sp which may be passed to __splitstack_find to find the
1152 remaining segments. */
1153
1154void *
1155__splitstack_find_context (void *context[NUMBER_OFFSETS], size_t *stack_size,
1156 void **next_segment, void **next_sp,
1157 void **initial_sp)
1158{
1159 void *sp;
1160 struct stack_segment *segment;
1161
1162 *initial_sp = context[INITIAL_SP];
1163
1164 sp = context[CURRENT_STACK];
1165 if (sp == NULL)
1166 {
1167 /* Most likely this context was created but was never used. The
1168 value 2 is a code used by __splitstack_find to mean that we
1169 have reached the end of the list of stacks. */
1170 *next_segment = (void *) (uintptr_type) 2;
1171 *next_sp = NULL;
1172 *initial_sp = NULL;
1173 return NULL;
1174 }
1175
1176 segment = context[CURRENT_SEGMENT];
1177 if (segment == NULL)
1178 {
1179 /* Most likely this context was saved by a thread which was not
1180 created using __splistack_makecontext and which has never
1181 split the stack. The value 1 is a code used by
1182 __splitstack_find to look at the initial stack. */
1183 segment = (struct stack_segment *) (uintptr_type) 1;
1184 }
1185
1186 return __splitstack_find (segment, sp, stack_size, next_segment, next_sp,
1187 initial_sp);
1188}
1189
7458026b 1190#endif /* !defined (inhibit_libc) */
0f0fd745 1191#endif /* not powerpc 32-bit */