]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/powerpc/powerpc32/fpu/setjmp-common.S
Update.
[thirdparty/glibc.git] / sysdeps / powerpc / powerpc32 / fpu / setjmp-common.S
CommitLineData
5c76ff27
UD
1/* setjmp for PowerPC.
2 Copyright (C) 1995-99, 2000, 2003, 2004 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20#include <sysdep.h>
21#define _ASM
22#define _SETJMP_H
23#ifdef __NO_VMX__
24# include <novmxsetjmp.h>
25#else
26# include <bits/setjmp.h>
27#endif
28#include <bp-sym.h>
29#include <bp-asm.h>
30
31
32ENTRY (BP_SYM (__sigsetjmp))
33 CHECK_BOUNDS_BOTH_WIDE_LIT (r3, r8, r9, JB_SIZE)
34
35 stw r1,(JB_GPR1*4)(3)
36 mflr r0
37 stw r14,((JB_GPRS+0)*4)(3)
38 stfd fp14,((JB_FPRS+0*2)*4)(3)
39 stw r0,(JB_LR*4)(3)
40 stw r15,((JB_GPRS+1)*4)(3)
41 stfd fp15,((JB_FPRS+1*2)*4)(3)
42 mfcr r0
43 stw r16,((JB_GPRS+2)*4)(3)
44 stfd fp16,((JB_FPRS+2*2)*4)(3)
45 stw r0,(JB_CR*4)(3)
46 stw r17,((JB_GPRS+3)*4)(3)
47 stfd fp17,((JB_FPRS+3*2)*4)(3)
48 stw r18,((JB_GPRS+4)*4)(3)
49 stfd fp18,((JB_FPRS+4*2)*4)(3)
50 stw r19,((JB_GPRS+5)*4)(3)
51 stfd fp19,((JB_FPRS+5*2)*4)(3)
52 stw r20,((JB_GPRS+6)*4)(3)
53 stfd fp20,((JB_FPRS+6*2)*4)(3)
54 stw r21,((JB_GPRS+7)*4)(3)
55 stfd fp21,((JB_FPRS+7*2)*4)(3)
56 stw r22,((JB_GPRS+8)*4)(3)
57 stfd fp22,((JB_FPRS+8*2)*4)(3)
58 stw r23,((JB_GPRS+9)*4)(3)
59 stfd fp23,((JB_FPRS+9*2)*4)(3)
60 stw r24,((JB_GPRS+10)*4)(3)
61 stfd fp24,((JB_FPRS+10*2)*4)(3)
62 stw r25,((JB_GPRS+11)*4)(3)
63 stfd fp25,((JB_FPRS+11*2)*4)(3)
64 stw r26,((JB_GPRS+12)*4)(3)
65 stfd fp26,((JB_FPRS+12*2)*4)(3)
66 stw r27,((JB_GPRS+13)*4)(3)
67 stfd fp27,((JB_FPRS+13*2)*4)(3)
68 stw r28,((JB_GPRS+14)*4)(3)
69 stfd fp28,((JB_FPRS+14*2)*4)(3)
70 stw r29,((JB_GPRS+15)*4)(3)
71 stfd fp29,((JB_FPRS+15*2)*4)(3)
72 stw r30,((JB_GPRS+16)*4)(3)
73 stfd fp30,((JB_FPRS+16*2)*4)(3)
74 stw r31,((JB_GPRS+17)*4)(3)
75 stfd fp31,((JB_FPRS+17*2)*4)(3)
76#ifndef __NO_VMX__
77#ifdef PIC
78 mflr r6
79 bl _GLOBAL_OFFSET_TABLE_@local-4
80 mflr r5
81#ifdef SHARED
82 lwz r5,_rtld_global@got(r5)
83 mtlr r6
84 lwz r5,RTLD_GLOBAL_DL_HWCAP_OFFSET(r5)
85#else
86 lwz r5,_rtld_global@got(r5)
87 mtlr r6
88 lwz r5,0(r5)
89#endif
90#else
91 lis r5,_dl_hwcap@ha
92 lwz r5,_dl_hwcap@l(r5)
93#endif
94 andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
95 beq no_vmx
96 la r5,((JB_VRS)*4)(3)
97 andi. r6,r5,0xf
98 mfspr r0,VRSAVE
99 stw r0,((JB_VRSAVE)*4)(3)
100 addi r6,r5,16
101 beq+ aligned_save_vmx
102 lvsr v0,0,r5
103 vspltisb v1,-1 /* set v1 to all 1's */
104 vspltisb v2,0 /* set v2 to all 0's */
105 vperm v3,v2,v1,v0 /* v3 contains shift mask with num all 1 bytes on left = misalignment */
106
107
108 /* Special case for v20 we need to preserve what is in save area below v20 before obliterating it */
109 lvx v5,0,r5
110 vperm v20,v20,v20,v0
111 vsel v5,v5,v20,v3
112 vsel v20,v20,v2,v3
113 stvx v5,0,r5
114
115#define save_2vmx_partial(savevr,prev_savevr,hivr,shiftvr,maskvr,savegpr,addgpr) \
116 addi addgpr,addgpr,32; \
117 vperm savevr,savevr,savevr,shiftvr; \
118 vsel hivr,prev_savevr,savevr,maskvr; \
119 stvx hivr,0,savegpr;
120
121 save_2vmx_partial(v21,v20,v5,v0,v3,r6,r5)
122 save_2vmx_partial(v22,v21,v5,v0,v3,r5,r6)
123 save_2vmx_partial(v23,v22,v5,v0,v3,r6,r5)
124 save_2vmx_partial(v24,v23,v5,v0,v3,r5,r6)
125 save_2vmx_partial(v25,v24,v5,v0,v3,r6,r5)
126 save_2vmx_partial(v26,v25,v5,v0,v3,r5,r6)
127 save_2vmx_partial(v27,v26,v5,v0,v3,r6,r5)
128 save_2vmx_partial(v28,v27,v5,v0,v3,r5,r6)
129 save_2vmx_partial(v29,v28,v5,v0,v3,r6,r5)
130 save_2vmx_partial(v30,v29,v5,v0,v3,r5,r6)
131
132 /* Special case for r31 we need to preserve what is in save area above v31 before obliterating it */
133 addi r5,r5,32
134 vperm v31,v31,v31,v0
135 lvx v4,0,r5
136 vsel v5,v30,v31,v3
137 stvx v5,0,r6
138 vsel v4,v31,v4,v3
139 stvx v4,0,r5
140 b no_vmx
141
142aligned_save_vmx:
143 stvx 20,0,r5
144 addi r5,r5,32
145 stvx 21,0,r6
146 addi r6,r6,32
147 stvx 22,0,r5
148 addi r5,r5,32
149 stvx 23,0,r6
150 addi r6,r6,32
151 stvx 24,0,r5
152 addi r5,r5,32
153 stvx 25,0,r6
154 addi r6,r6,32
155 stvx 26,0,r5
156 addi r5,r5,32
157 stvx 27,0,r6
158 addi r6,r6,32
159 stvx 28,0,r5
160 addi r5,r5,32
161 stvx 29,0,r6
162 addi r6,r6,32
163 stvx 30,0,r5
164 stvx 31,0,r6
165no_vmx:
166#endif
167 b JUMPTARGET (BP_SYM (__sigjmp_save))
168END (BP_SYM (__sigsetjmp))