]> git.ipfire.org Git - thirdparty/gcc.git/blame - libgcc/config/i386/morestack.S
generic-morestack.c: Include <string.h>.
[thirdparty/gcc.git] / libgcc / config / i386 / morestack.S
CommitLineData
7458026b 1# x86/x86_64 support for -fsplit-stack.
e14304ef 2# Copyright (C) 2009, 2010, 2011 Free Software Foundation, Inc.
7458026b
ILT
3# Contributed by Ian Lance Taylor <iant@google.com>.
4
5# This file is part of GCC.
6
7# GCC is free software; you can redistribute it and/or modify it under
8# the terms of the GNU General Public License as published by the Free
9# Software Foundation; either version 3, or (at your option) any later
10# version.
11
12# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13# WARRANTY; without even the implied warranty of MERCHANTABILITY or
14# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15# for more details.
16
17# Under Section 7 of GPL version 3, you are granted additional
18# permissions described in the GCC Runtime Library Exception, version
19# 3.1, as published by the Free Software Foundation.
20
21# You should have received a copy of the GNU General Public License and
22# a copy of the GCC Runtime Library Exception along with this program;
23# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24# <http://www.gnu.org/licenses/>.
25
26
27# Support for allocating more stack space when using -fsplit-stack.
28# When a function discovers that it needs more stack space, it will
29# call __morestack with the size of the stack frame and the size of
30# the parameters to copy from the old stack frame to the new one.
31# The __morestack function preserves the parameter registers and
32# calls __generic_morestack to actually allocate the stack space.
33
34# When this is called stack space is very low, but we ensure that
35# there is enough space to push the parameter registers and to call
36# __generic_morestack.
37
38# When calling __generic_morestack, FRAME_SIZE points to the size of
39# the desired frame when the function is called, and the function
40# sets it to the size of the allocated stack. OLD_STACK points to
41# the parameters on the old stack and PARAM_SIZE is the number of
42# bytes of parameters to copy to the new stack. These are the
43# parameters of the function that called __morestack. The
44# __generic_morestack function returns the new stack pointer,
45# pointing to the address of the first copied parameter. The return
46# value minus the returned *FRAME_SIZE will be the first address on
47# the stack which we should not use.
48
49# void *__generic_morestack (size_t *frame_size, void *old_stack,
50# size_t param_size);
51
52# The __morestack routine has to arrange for the caller to return to a
53# stub on the new stack. The stub is responsible for restoring the
54# old stack pointer and returning to the caller's caller. This calls
55# __generic_releasestack to retrieve the old stack pointer and release
56# the newly allocated stack.
57
58# void *__generic_releasestack (size_t *available);
59
60# We do a little dance so that the processor's call/return return
61# address prediction works out. The compiler arranges for the caller
62# to look like this:
63# call __generic_morestack
64# ret
65# L:
66# // carry on with function
67# After we allocate more stack, we call L, which is in our caller.
68# When that returns (to the predicted instruction), we release the
69# stack segment and reset the stack pointer. We then return to the
70# predicted instruction, namely the ret instruction immediately after
71# the call to __generic_morestack. That then returns to the caller of
72# the original caller.
73
74
75# The amount of extra space we ask for. In general this has to be
76# enough for the dynamic loader to find a symbol and for a signal
77# handler to run.
78
79#ifndef __x86_64__
80#define BACKOFF (1024)
81#else
82#define BACKOFF (1536)
83#endif
84
85
86# This entry point is for split-stack code which calls non-split-stack
87# code. When the linker sees this case, it converts the call to
88# __morestack to call __morestack_non_split instead. We just bump the
89# requested stack space by 16K.
90
91 .global __morestack_non_split
92 .hidden __morestack_non_split
93
94#ifdef __ELF__
95 .type __morestack_non_split,@function
96#endif
97
98__morestack_non_split:
99
100#ifndef __x86_64__
101 addl $0x4000,4(%esp)
102#else
103 addq $0x4000,%r10
104#endif
105
106#ifdef __ELF__
107 .size __morestack_non_split, . - __morestack_non_split
108#endif
109
110# __morestack_non_split falls through into __morestack.
111
112
113# The __morestack function.
114
115 .global __morestack
116 .hidden __morestack
117
118#ifdef __ELF__
119 .type __morestack,@function
120#endif
121
122__morestack:
123.LFB1:
124 .cfi_startproc
125
126
127#ifndef __x86_64__
128
129
130# The 32-bit __morestack function.
131
132 # We use a cleanup to restore the stack guard if an exception
133 # is thrown through this code.
134#ifndef __PIC__
135 .cfi_personality 0,__gcc_personality_v0
136 .cfi_lsda 0,.LLSDA1
137#else
138 .cfi_personality 0x9b,DW.ref.__gcc_personality_v0
139 .cfi_lsda 0x1b,.LLSDA1
140#endif
141
7458026b
ILT
142 # We return below with a ret $8. We will return to a single
143 # return instruction, which will return to the caller of our
144 # caller. We let the unwinder skip that single return
145 # instruction, and just return to the real caller.
18b4bd7b
ILT
146
147 # Here CFA points just past the return address on the stack,
148 # e.g., on function entry it is %esp + 4. Later we will
149 # change it to %ebp + 8, as set by .cfi_def_cfa_register and
150 # .cfi_def_cfa_offset above. The stack looks like this:
151 # CFA + 12: stack pointer after two returns
152 # CFA + 8: return address of morestack caller's caller
153 # CFA + 4: size of parameters
154 # CFA: new stack frame size
155 # CFA - 4: return address of this function
156 # CFA - 8: previous value of %ebp; %ebp points here
157 # We want to set %esp to the stack pointer after the double
158 # return, which is CFA + 12.
159 .cfi_offset 8, 8 # New PC stored at CFA + 8
7458026b 160 .cfi_escape 0x15, 4, 0x7d # DW_CFA_val_offset_sf, %esp, 12/-4
18b4bd7b
ILT
161 # i.e., next %esp is CFA + 12
162
163 # Set up a normal backtrace.
164 pushl %ebp
165 .cfi_def_cfa_offset 8
166 .cfi_offset %ebp, -8
167 movl %esp,%ebp
168 .cfi_def_cfa_register %ebp
7458026b
ILT
169
170 # In 32-bit mode the parameters are pushed on the stack. The
171 # argument size is pushed then the new stack frame size is
172 # pushed.
173
18b4bd7b
ILT
174 # Align stack to 16-byte boundary with enough space for saving
175 # registers and passing parameters to functions we call.
176 subl $40,%esp
177
178 # Because our cleanup code may need to clobber %ebx, we need
179 # to save it here so the unwinder can restore the value used
180 # by the caller. Note that we don't have to restore the
181 # register, since we don't change it, we just have to save it
182 # for the unwinder.
183 movl %ebx,-4(%ebp)
184 .cfi_offset %ebx, -12
185
7458026b
ILT
186 # In 32-bit mode the registers %eax, %edx, and %ecx may be
187 # used for parameters, depending on the regparm and fastcall
188 # attributes.
189
18b4bd7b
ILT
190 movl %eax,-8(%ebp)
191 movl %edx,-12(%ebp)
192 movl %ecx,-16(%ebp)
7458026b
ILT
193
194 call __morestack_block_signals
195
18b4bd7b
ILT
196 movl 12(%ebp),%eax # The size of the parameters.
197 movl %eax,8(%esp)
7458026b 198 leal 20(%ebp),%eax # Address of caller's parameters.
18b4bd7b 199 movl %eax,4(%esp)
7458026b
ILT
200 addl $BACKOFF,8(%ebp) # Ask for backoff bytes.
201 leal 8(%ebp),%eax # The address of the new frame size.
18b4bd7b 202 movl %eax,(%esp)
7458026b 203
7458026b
ILT
204 call __generic_morestack
205
206 movl %eax,%esp # Switch to the new stack.
207 subl 8(%ebp),%eax # The end of the stack space.
208 addl $BACKOFF,%eax # Back off 512 bytes.
209
210.LEHB0:
211 # FIXME: The offset must match
212 # TARGET_THREAD_SPLIT_STACK_OFFSET in
213 # gcc/config/i386/linux.h.
214 movl %eax,%gs:0x30 # Save the new stack boundary.
215
216 call __morestack_unblock_signals
217
18b4bd7b
ILT
218 movl -12(%ebp),%edx # Restore registers.
219 movl -16(%ebp),%ecx
7458026b
ILT
220
221 movl 4(%ebp),%eax # Increment the return address
222 cmpb $0xc3,(%eax) # to skip the ret instruction;
223 je 1f # see above.
224 addl $2,%eax
2251: inc %eax
226
18b4bd7b 227 movl %eax,-12(%ebp) # Store return address in an
7458026b
ILT
228 # unused slot.
229
18b4bd7b 230 movl -8(%ebp),%eax # Restore the last register.
7458026b 231
18b4bd7b 232 call *-12(%ebp) # Call our caller!
7458026b
ILT
233
234 # The caller will return here, as predicted.
235
236 # Save the registers which may hold a return value. We
237 # assume that __generic_releasestack does not touch any
238 # floating point or vector registers.
239 pushl %eax
240 pushl %edx
241
242 # Push the arguments to __generic_releasestack now so that the
243 # stack is at a 16-byte boundary for
244 # __morestack_block_signals.
245 pushl $0 # Where the available space is returned.
246 leal 0(%esp),%eax # Push its address.
247 push %eax
248
249 call __morestack_block_signals
250
251 call __generic_releasestack
252
253 subl 4(%esp),%eax # Subtract available space.
254 addl $BACKOFF,%eax # Back off 512 bytes.
255.LEHE0:
256 movl %eax,%gs:0x30 # Save the new stack boundary.
257
258 addl $8,%esp # Remove values from stack.
259
260 # We need to restore the old stack pointer, which is in %rbp,
261 # before we unblock signals. We also need to restore %eax and
262 # %edx after we unblock signals but before we return. Do this
263 # by moving %eax and %edx from the current stack to the old
264 # stack.
265
266 popl %edx # Pop return value from current stack.
267 popl %eax
268
269 movl %ebp,%esp # Restore stack pointer.
270
271 pushl %eax # Push return value on old stack.
272 pushl %edx
273 subl $8,%esp # Align stack to 16-byte boundary.
274
275 call __morestack_unblock_signals
276
277 addl $8,%esp
278 popl %edx # Restore return value.
279 popl %eax
280
281 .cfi_remember_state
18b4bd7b
ILT
282
283 # We never changed %ebx, so we don't have to actually restore it.
284 .cfi_restore %ebx
285
7458026b
ILT
286 popl %ebp
287 .cfi_restore %ebp
18b4bd7b 288 .cfi_def_cfa %esp, 4
7458026b
ILT
289 ret $8 # Return to caller, which will
290 # immediately return. Pop
291 # arguments as we go.
292
293# This is the cleanup code called by the stack unwinder when unwinding
294# through the code between .LEHB0 and .LEHE0 above.
295
296.L1:
297 .cfi_restore_state
298 subl $16,%esp # Maintain 16 byte alignment.
299 movl %eax,4(%esp) # Save exception header.
300 movl %ebp,(%esp) # Stack pointer after resume.
301 call __generic_findstack
302 movl %ebp,%ecx # Get the stack pointer.
303 subl %eax,%ecx # Subtract available space.
304 addl $BACKOFF,%ecx # Back off 512 bytes.
305 movl %ecx,%gs:0x30 # Save new stack boundary.
306 movl 4(%esp),%eax # Function argument.
307 movl %eax,(%esp)
308#ifdef __PIC__
a3330c9d 309 call __x86.get_pc_thunk.bx # %ebx may not be set up for us.
7458026b
ILT
310 addl $_GLOBAL_OFFSET_TABLE_, %ebx
311 call _Unwind_Resume@PLT # Resume unwinding.
312#else
313 call _Unwind_Resume
314#endif
315
316#else /* defined(__x86_64__) */
317
318
319# The 64-bit __morestack function.
320
321 # We use a cleanup to restore the stack guard if an exception
322 # is thrown through this code.
323#ifndef __PIC__
324 .cfi_personality 0x3,__gcc_personality_v0
325 .cfi_lsda 0x3,.LLSDA1
326#else
327 .cfi_personality 0x9b,DW.ref.__gcc_personality_v0
328 .cfi_lsda 0x1b,.LLSDA1
329#endif
330
7458026b
ILT
331 # We will return a single return instruction, which will
332 # return to the caller of our caller. Let the unwinder skip
333 # that single return instruction, and just return to the real
334 # caller.
335 .cfi_offset 16, 0
336 .cfi_escape 0x15, 7, 0x7f # DW_CFA_val_offset_sf, %esp, 8/-8
337
18b4bd7b
ILT
338 # Set up a normal backtrace.
339 pushq %rbp
340 .cfi_def_cfa_offset 16
341 .cfi_offset %rbp, -16
342 movq %rsp, %rbp
343 .cfi_def_cfa_register %rbp
344
7458026b
ILT
345 # In 64-bit mode the new stack frame size is passed in r10
346 # and the argument size is passed in r11.
347
348 addq $BACKOFF,%r10 # Ask for backoff bytes.
349 pushq %r10 # Save new frame size.
350
351 # In 64-bit mode the registers %rdi, %rsi, %rdx, %rcx, %r8,
352 # and %r9 may be used for parameters. We also preserve %rax
353 # which the caller may use to hold %r10.
354
355 pushq %rax
356 pushq %rdi
357 pushq %rsi
358 pushq %rdx
359 pushq %rcx
360 pushq %r8
361 pushq %r9
362
363 pushq %r11
364 pushq $0 # For alignment.
365
366 call __morestack_block_signals
367
368 leaq -8(%rbp),%rdi # Address of new frame size.
369 leaq 24(%rbp),%rsi # The caller's parameters.
370 addq $8,%rsp
371 popq %rdx # The size of the parameters.
372
373 call __generic_morestack
374
375 movq -8(%rbp),%r10 # Reload modified frame size
376 movq %rax,%rsp # Switch to the new stack.
377 subq %r10,%rax # The end of the stack space.
378 addq $BACKOFF,%rax # Back off 1024 bytes.
379
380.LEHB0:
381 # FIXME: The offset must match
382 # TARGET_THREAD_SPLIT_STACK_OFFSET in
383 # gcc/config/i386/linux64.h.
02a8fe00
L
384 # Macro to save the new stack boundary.
385#ifdef __LP64__
386#define X86_64_SAVE_NEW_STACK_BOUNDARY(reg) movq %r##reg,%fs:0x70
387#else
388#define X86_64_SAVE_NEW_STACK_BOUNDARY(reg) movl %e##reg,%fs:0x40
389#endif
390 X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
7458026b
ILT
391
392 call __morestack_unblock_signals
393
394 movq -24(%rbp),%rdi # Restore registers.
395 movq -32(%rbp),%rsi
396 movq -40(%rbp),%rdx
397 movq -48(%rbp),%rcx
398 movq -56(%rbp),%r8
399 movq -64(%rbp),%r9
400
401 movq 8(%rbp),%r10 # Increment the return address
402 incq %r10 # to skip the ret instruction;
403 # see above.
404
405 movq -16(%rbp),%rax # Restore caller's %rax.
406
407 call *%r10 # Call our caller!
408
409 # The caller will return here, as predicted.
410
411 # Save the registers which may hold a return value. We
412 # assume that __generic_releasestack does not touch any
413 # floating point or vector registers.
414 pushq %rax
415 pushq %rdx
416
417 call __morestack_block_signals
418
419 pushq $0 # For alignment.
420 pushq $0 # Where the available space is returned.
421 leaq 0(%rsp),%rdi # Pass its address.
422
423 call __generic_releasestack
424
425 subq 0(%rsp),%rax # Subtract available space.
426 addq $BACKOFF,%rax # Back off 1024 bytes.
427.LEHE0:
02a8fe00 428 X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
7458026b
ILT
429
430 addq $16,%rsp # Remove values from stack.
431
432 # We need to restore the old stack pointer, which is in %rbp,
433 # before we unblock signals. We also need to restore %rax and
434 # %rdx after we unblock signals but before we return. Do this
435 # by moving %rax and %rdx from the current stack to the old
436 # stack.
437
438 popq %rdx # Pop return value from current stack.
439 popq %rax
440
441 movq %rbp,%rsp # Restore stack pointer.
442
443 pushq %rax # Push return value on old stack.
444 pushq %rdx
445
446 call __morestack_unblock_signals
447
448 popq %rdx # Restore return value.
449 popq %rax
450
451 .cfi_remember_state
452 popq %rbp
453 .cfi_restore %rbp
454 .cfi_def_cfa %rsp, 8
455 ret # Return to caller, which will
456 # immediately return.
457
458# This is the cleanup code called by the stack unwinder when unwinding
459# through the code between .LEHB0 and .LEHE0 above.
460
461.L1:
462 .cfi_restore_state
463 subq $16,%rsp # Maintain 16 byte alignment.
464 movq %rax,(%rsp) # Save exception header.
465 movq %rbp,%rdi # Stack pointer after resume.
466 call __generic_findstack
467 movq %rbp,%rcx # Get the stack pointer.
468 subq %rax,%rcx # Subtract available space.
469 addq $BACKOFF,%rcx # Back off 1024 bytes.
02a8fe00 470 X86_64_SAVE_NEW_STACK_BOUNDARY (cx)
7458026b
ILT
471 movq (%rsp),%rdi # Restore exception data for call.
472#ifdef __PIC__
473 call _Unwind_Resume@PLT # Resume unwinding.
474#else
475 call _Unwind_Resume # Resume unwinding.
476#endif
477
478#endif /* defined(__x86_64__) */
479
480 .cfi_endproc
481#ifdef __ELF__
482 .size __morestack, . - __morestack
483#endif
484
6ace0756
CD
485#if !defined(__x86_64__) && defined(__PIC__)
486# Output the thunk to get PC into bx, since we use it above.
a3330c9d
CD
487 .section .text.__x86.get_pc_thunk.bx,"axG",@progbits,__x86.get_pc_thunk.bx,comdat
488 .globl __x86.get_pc_thunk.bx
489 .hidden __x86.get_pc_thunk.bx
6ace0756 490#ifdef __ELF__
a3330c9d 491 .type __x86.get_pc_thunk.bx, @function
6ace0756 492#endif
a3330c9d 493__x86.get_pc_thunk.bx:
6ace0756
CD
494 .cfi_startproc
495 movl (%esp), %ebx
496 ret
497 .cfi_endproc
498#ifdef __ELF__
a3330c9d 499 .size __x86.get_pc_thunk.bx, . - __x86.get_pc_thunk.bx
6ace0756
CD
500#endif
501#endif
7458026b
ILT
502
503# The exception table. This tells the personality routine to execute
504# the exception handler.
505
506 .section .gcc_except_table,"a",@progbits
507 .align 4
508.LLSDA1:
509 .byte 0xff # @LPStart format (omit)
510 .byte 0xff # @TType format (omit)
511 .byte 0x1 # call-site format (uleb128)
512 .uleb128 .LLSDACSE1-.LLSDACSB1 # Call-site table length
513.LLSDACSB1:
514 .uleb128 .LEHB0-.LFB1 # region 0 start
515 .uleb128 .LEHE0-.LEHB0 # length
516 .uleb128 .L1-.LFB1 # landing pad
517 .uleb128 0 # action
518.LLSDACSE1:
519
520
521 .global __gcc_personality_v0
522#ifdef __PIC__
523 # Build a position independent reference to the basic
524 # personality function.
525 .hidden DW.ref.__gcc_personality_v0
526 .weak DW.ref.__gcc_personality_v0
527 .section .data.DW.ref.__gcc_personality_v0,"awG",@progbits,DW.ref.__gcc_personality_v0,comdat
528 .type DW.ref.__gcc_personality_v0, @object
529DW.ref.__gcc_personality_v0:
02a8fe00 530#ifndef __LP64__
7458026b
ILT
531 .align 4
532 .size DW.ref.__gcc_personality_v0, 4
533 .long __gcc_personality_v0
534#else
535 .align 8
536 .size DW.ref.__gcc_personality_v0, 8
537 .quad __gcc_personality_v0
538#endif
539#endif
540
02a8fe00 541#if defined __x86_64__ && defined __LP64__
f3824a8b
ILT
542
543# This entry point is used for the large model. With this entry point
544# the upper 32 bits of %r10 hold the argument size and the lower 32
545# bits hold the new stack frame size. There doesn't seem to be a way
546# to know in the assembler code that we are assembling for the large
547# model, and there doesn't seem to be a large model multilib anyhow.
548# If one is developed, then the non-PIC code is probably OK since we
549# will probably be close to the morestack code, but the PIC code
550# almost certainly needs to be changed. FIXME.
551
552 .text
553 .global __morestack_large_model
554 .hidden __morestack_large_model
555
556#ifdef __ELF__
557 .type __morestack_large_model,@function
558#endif
559
560__morestack_large_model:
561
562 .cfi_startproc
563
564 movq %r10, %r11
565 andl $0xffffffff, %r10d
566 sarq $32, %r11
567 jmp __morestack
568
569 .cfi_endproc
570#ifdef __ELF__
571 .size __morestack_large_model, . - __morestack_large_model
572#endif
573
02a8fe00 574#endif /* __x86_64__ && __LP64__ */
7458026b
ILT
575
576# Initialize the stack test value when the program starts or when a
577# new thread starts. We don't know how large the main stack is, so we
578# guess conservatively. We might be able to use getrlimit here.
579
580 .text
581 .global __stack_split_initialize
582 .hidden __stack_split_initialize
583
584#ifdef __ELF__
585 .type __stack_split_initialize, @function
586#endif
587
588__stack_split_initialize:
589
590#ifndef __x86_64__
591
592 leal -16000(%esp),%eax # We should have at least 16K.
593 movl %eax,%gs:0x30
594 pushl $16000
595 pushl %esp
596#ifdef __PIC__
597 call __generic_morestack_set_initial_sp@PLT
598#else
599 call __generic_morestack_set_initial_sp
600#endif
601 addl $8,%esp
602 ret
603
604#else /* defined(__x86_64__) */
605
606 leaq -16000(%rsp),%rax # We should have at least 16K.
02a8fe00 607 X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
7458026b
ILT
608 movq %rsp,%rdi
609 movq $16000,%rsi
610#ifdef __PIC__
611 call __generic_morestack_set_initial_sp@PLT
612#else
613 call __generic_morestack_set_initial_sp
614#endif
615 ret
616
617#endif /* defined(__x86_64__) */
618
619#ifdef __ELF__
620 .size __stack_split_initialize, . - __stack_split_initialize
621#endif
622
e14304ef
ILT
623# Routines to get and set the guard, for __splitstack_getcontext,
624# __splitstack_setcontext, and __splitstack_makecontext.
625
626# void *__morestack_get_guard (void) returns the current stack guard.
627 .text
628 .global __morestack_get_guard
629 .hidden __morestack_get_guard
630
631#ifdef __ELF__
632 .type __morestack_get_guard,@function
633#endif
634
635__morestack_get_guard:
636
637#ifndef __x86_64__
638 movl %gs:0x30,%eax
639#else
640#ifdef __LP64__
641 movq %fs:0x70,%rax
642#else
643 movl %fs:0x40,%eax
644#endif
645#endif
646 ret
647
648#ifdef __ELF__
649 .size __morestack_get_guard, . - __morestack_get_guard
650#endif
651
652# void __morestack_set_guard (void *) sets the stack guard.
653 .global __morestack_set_guard
654 .hidden __morestack_set_guard
655
656#ifdef __ELF__
657 .type __morestack_set_guard,@function
658#endif
659
660__morestack_set_guard:
661
662#ifndef __x86_64__
663 movl 4(%esp),%eax
664 movl %eax,%gs:0x30
665#else
666 X86_64_SAVE_NEW_STACK_BOUNDARY (di)
667#endif
668 ret
669
670#ifdef __ELF__
671 .size __morestack_set_guard, . - __morestack_set_guard
672#endif
673
674# void *__morestack_make_guard (void *, size_t) returns the stack
675# guard value for a stack.
676 .global __morestack_make_guard
677 .hidden __morestack_make_guard
678
679#ifdef __ELF__
680 .type __morestack_make_guard,@function
681#endif
682
683__morestack_make_guard:
684
685#ifndef __x86_64__
686 movl 4(%esp),%eax
687 subl 8(%esp),%eax
688 addl $BACKOFF,%eax
689#else
690 subq %rsi,%rdi
691 addq $BACKOFF,%rdi
692 movq %rdi,%rax
693#endif
694 ret
695
696#ifdef __ELF__
697 .size __morestack_make_guard, . - __morestack_make_guard
698#endif
7458026b
ILT
699
700# Make __stack_split_initialize a high priority constructor. FIXME:
701# This is ELF specific.
702
703 .section .ctors.65535,"aw",@progbits
704
02a8fe00 705#ifndef __LP64__
7458026b
ILT
706 .align 4
707 .long __stack_split_initialize
708 .long __morestack_load_mmap
709#else
710 .align 8
711 .quad __stack_split_initialize
712 .quad __morestack_load_mmap
713#endif
714
715#ifdef __ELF__
716 .section .note.GNU-stack,"",@progbits
717 .section .note.GNU-split-stack,"",@progbits
718 .section .note.GNU-no-split-stack,"",@progbits
719#endif