]>
Commit | Line | Data |
---|---|---|
965a54a4 | 1 | /* Copyright (C) 1992,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006,2012 |
a2274a08 | 2 | Free Software Foundation, Inc. |
cfc91acd RM |
3 | This file is part of the GNU C Library. |
4 | ||
5 | The GNU C Library is free software; you can redistribute it and/or | |
6 | modify it under the terms of the GNU Lesser General Public | |
7 | License as published by the Free Software Foundation; either | |
8 | version 2.1 of the License, or (at your option) any later version. | |
9 | ||
10 | The GNU C Library is distributed in the hope that it will be useful, | |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | Lesser General Public License for more details. | |
14 | ||
15 | You should have received a copy of the GNU Lesser General Public | |
59ba27a6 PE |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ | |
1e2f8718 | 18 | |
cfc91acd RM |
19 | /* Alan Modra <amodra@bigpond.net.au> rewrote the INLINE_SYSCALL macro */ |
20 | ||
21 | #ifndef _LINUX_POWERPC_SYSDEP_H | |
22 | #define _LINUX_POWERPC_SYSDEP_H 1 | |
23 | ||
24 | #include <sysdeps/unix/powerpc/sysdep.h> | |
00c2b3b9 | 25 | #include <tls.h> |
cfc91acd RM |
26 | |
27 | /* Define __set_errno() for INLINE_SYSCALL macro below. */ | |
28 | #ifndef __ASSEMBLER__ | |
29 | #include <errno.h> | |
30 | #endif | |
31 | ||
5bfed16d UD |
32 | /* Some systen calls got renamed over time, but retained the same semantics. |
33 | Handle them here so they can be catched by both C and assembler stubs in | |
34 | glibc. */ | |
35 | ||
36 | #ifdef __NR_pread64 | |
37 | # ifdef __NR_pread | |
38 | # error "__NR_pread and __NR_pread64 both defined???" | |
39 | # endif | |
40 | # define __NR_pread __NR_pread64 | |
41 | #endif | |
42 | ||
43 | #ifdef __NR_pwrite64 | |
44 | # ifdef __NR_pwrite | |
45 | # error "__NR_pwrite and __NR_pwrite64 both defined???" | |
46 | # endif | |
47 | # define __NR_pwrite __NR_pwrite64 | |
48 | #endif | |
49 | ||
cfc91acd RM |
50 | /* For Linux we can use the system call table in the header file |
51 | /usr/include/asm/unistd.h | |
52 | of the kernel. But these symbols do not follow the SYS_* syntax | |
53 | so we have to redefine the `SYS_ify' macro here. */ | |
54 | #undef SYS_ify | |
965a54a4 | 55 | #define SYS_ify(syscall_name) __NR_##syscall_name |
cfc91acd RM |
56 | |
57 | #ifdef __ASSEMBLER__ | |
58 | ||
59 | /* This seems to always be the case on PPC. */ | |
8c2e201b | 60 | # define ALIGNARG(log2) log2 |
cfc91acd | 61 | /* For ELF we need the `.type' directive to make shared libs work right. */ |
8c2e201b UD |
62 | # define ASM_TYPE_DIRECTIVE(name,typearg) .type name,typearg; |
63 | # define ASM_SIZE_DIRECTIVE(name) .size name,.-name | |
cfc91acd | 64 | |
8c2e201b UD |
65 | #endif /* __ASSEMBLER__ */ |
66 | ||
67 | /* This version is for kernels that implement system calls that | |
68 | behave like function calls as far as register saving. | |
69 | It falls back to the syscall in the case that the vDSO doesn't | |
70 | exist or fails for ENOSYS */ | |
71 | #ifdef SHARED | |
72 | # define INLINE_VSYSCALL(name, nr, args...) \ | |
73 | ({ \ | |
74 | __label__ out; \ | |
75 | __label__ iserr; \ | |
76 | INTERNAL_SYSCALL_DECL (sc_err); \ | |
77 | long int sc_ret; \ | |
78 | \ | |
79 | if (__vdso_##name != NULL) \ | |
80 | { \ | |
81 | sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, sc_err, nr, ##args); \ | |
82 | if (!INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \ | |
83 | goto out; \ | |
84 | if (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err) != ENOSYS) \ | |
85 | goto iserr; \ | |
86 | } \ | |
87 | \ | |
88 | sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, ##args); \ | |
89 | if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \ | |
90 | { \ | |
91 | iserr: \ | |
92 | __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \ | |
93 | sc_ret = -1L; \ | |
94 | } \ | |
95 | out: \ | |
96 | sc_ret; \ | |
97 | }) | |
98 | #else | |
99 | # define INLINE_VSYSCALL(name, nr, args...) \ | |
100 | INLINE_SYSCALL (name, nr, ##args) | |
101 | #endif | |
102 | ||
103 | #ifdef SHARED | |
104 | # define INTERNAL_VSYSCALL(name, err, nr, args...) \ | |
105 | ({ \ | |
106 | __label__ out; \ | |
107 | long int v_ret; \ | |
108 | \ | |
109 | if (__vdso_##name != NULL) \ | |
110 | { \ | |
111 | v_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, nr, ##args); \ | |
112 | if (!INTERNAL_SYSCALL_ERROR_P (v_ret, err) \ | |
113 | || INTERNAL_SYSCALL_ERRNO (v_ret, err) != ENOSYS) \ | |
114 | goto out; \ | |
115 | } \ | |
116 | v_ret = INTERNAL_SYSCALL (name, err, nr, ##args); \ | |
117 | out: \ | |
118 | v_ret; \ | |
119 | }) | |
120 | #else | |
121 | # define INTERNAL_VSYSCALL(name, err, nr, args...) \ | |
122 | INTERNAL_SYSCALL (name, err, nr, ##args) | |
123 | #endif | |
124 | ||
125 | /* This version is for internal uses when there is no desire | |
126 | to set errno */ | |
127 | #define INTERNAL_VSYSCALL_NO_SYSCALL_FALLBACK(name, err, nr, args...) \ | |
128 | ({ \ | |
129 | long int sc_ret = ENOSYS; \ | |
130 | \ | |
131 | if (__vdso_##name != NULL) \ | |
132 | sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, nr, ##args); \ | |
133 | else \ | |
134 | err = 1 << 28; \ | |
135 | sc_ret; \ | |
136 | }) | |
137 | ||
138 | /* List of system calls which are supported as vsyscalls. */ | |
139 | #define HAVE_CLOCK_GETRES_VSYSCALL 1 | |
140 | #define HAVE_CLOCK_GETTIME_VSYSCALL 1 | |
141 | ||
142 | /* Define a macro which expands inline into the wrapper code for a system | |
143 | call. This use is for internal calls that do not need to handle errors | |
144 | normally. It will never touch errno. This returns just what the kernel | |
145 | gave back in the non-error (CR0.SO cleared) case, otherwise (CR0.SO set) | |
146 | the negation of the return value in the kernel gets reverted. */ | |
147 | ||
148 | #define INTERNAL_VSYSCALL_NCS(funcptr, err, nr, args...) \ | |
149 | ({ \ | |
150 | register void *r0 __asm__ ("r0"); \ | |
151 | register long int r3 __asm__ ("r3"); \ | |
152 | register long int r4 __asm__ ("r4"); \ | |
153 | register long int r5 __asm__ ("r5"); \ | |
154 | register long int r6 __asm__ ("r6"); \ | |
155 | register long int r7 __asm__ ("r7"); \ | |
156 | register long int r8 __asm__ ("r8"); \ | |
157 | LOADARGS_##nr (funcptr, args); \ | |
158 | __asm__ __volatile__ \ | |
159 | ("mtctr %0\n\t" \ | |
160 | "bctrl\n\t" \ | |
161 | "mfcr %0\n\t" \ | |
162 | "0:" \ | |
163 | : "=&r" (r0), \ | |
164 | "=&r" (r3), "=&r" (r4), "=&r" (r5), \ | |
165 | "=&r" (r6), "=&r" (r7), "=&r" (r8) \ | |
166 | : ASM_INPUT_##nr \ | |
167 | : "r9", "r10", "r11", "r12", \ | |
168 | "cr0", "ctr", "lr", "memory"); \ | |
169 | err = (long int) r0; \ | |
d55fd7a5 | 170 | r3; \ |
8c2e201b | 171 | }) |
cfc91acd RM |
172 | |
173 | #undef INLINE_SYSCALL | |
cfc91acd | 174 | |
cfc91acd RM |
175 | /* This version is for kernels that implement system calls that |
176 | behave like function calls as far as register saving. */ | |
1e2f8718 UD |
177 | #define INLINE_SYSCALL(name, nr, args...) \ |
178 | ({ \ | |
574b892e | 179 | INTERNAL_SYSCALL_DECL (sc_err); \ |
c67a469f | 180 | long int sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, args); \ |
574b892e UD |
181 | if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \ |
182 | { \ | |
183 | __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \ | |
184 | sc_ret = -1L; \ | |
185 | } \ | |
186 | sc_ret; \ | |
cfc91acd RM |
187 | }) |
188 | ||
aff4519d UD |
189 | /* Define a macro which expands inline into the wrapper code for a system |
190 | call. This use is for internal calls that do not need to handle errors | |
191 | normally. It will never touch errno. This returns just what the kernel | |
192 | gave back in the non-error (CR0.SO cleared) case, otherwise (CR0.SO set) | |
193 | the negation of the return value in the kernel gets reverted. */ | |
194 | ||
1e2f8718 | 195 | #undef INTERNAL_SYSCALL |
2edb61e3 | 196 | #define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \ |
aff4519d | 197 | ({ \ |
c67a469f UD |
198 | register long int r0 __asm__ ("r0"); \ |
199 | register long int r3 __asm__ ("r3"); \ | |
200 | register long int r4 __asm__ ("r4"); \ | |
201 | register long int r5 __asm__ ("r5"); \ | |
202 | register long int r6 __asm__ ("r6"); \ | |
203 | register long int r7 __asm__ ("r7"); \ | |
204 | register long int r8 __asm__ ("r8"); \ | |
8c2e201b | 205 | LOADARGS_##nr (name, ##args); \ |
aff4519d UD |
206 | __asm__ __volatile__ \ |
207 | ("sc\n\t" \ | |
574b892e | 208 | "mfcr %0\n\t" \ |
aff4519d UD |
209 | "0:" \ |
210 | : "=&r" (r0), \ | |
1e2f8718 UD |
211 | "=&r" (r3), "=&r" (r4), "=&r" (r5), \ |
212 | "=&r" (r6), "=&r" (r7), "=&r" (r8) \ | |
aff4519d | 213 | : ASM_INPUT_##nr \ |
1e2f8718 | 214 | : "r9", "r10", "r11", "r12", \ |
aff4519d | 215 | "cr0", "ctr", "memory"); \ |
574b892e | 216 | err = r0; \ |
d55fd7a5 | 217 | r3; \ |
aff4519d | 218 | }) |
2edb61e3 | 219 | #define INTERNAL_SYSCALL(name, err, nr, args...) \ |
8c2e201b | 220 | INTERNAL_SYSCALL_NCS (__NR_##name, err, nr, args) |
574b892e | 221 | |
1e2f8718 | 222 | #undef INTERNAL_SYSCALL_DECL |
c67a469f | 223 | #define INTERNAL_SYSCALL_DECL(err) long int err |
1e2f8718 UD |
224 | |
225 | #undef INTERNAL_SYSCALL_ERROR_P | |
226 | #define INTERNAL_SYSCALL_ERROR_P(val, err) \ | |
5949daa0 | 227 | ((void) (val), __builtin_expect ((err) & (1 << 28), 0)) |
574b892e | 228 | |
1e2f8718 UD |
229 | #undef INTERNAL_SYSCALL_ERRNO |
230 | #define INTERNAL_SYSCALL_ERRNO(val, err) (val) | |
aff4519d UD |
231 | |
232 | #define LOADARGS_0(name, dummy) \ | |
2edb61e3 | 233 | r0 = name |
c67a469f UD |
234 | #define LOADARGS_1(name, __arg1) \ |
235 | long int arg1 = (long int) (__arg1); \ | |
aff4519d | 236 | LOADARGS_0(name, 0); \ |
2edb61e3 | 237 | extern void __illegally_sized_syscall_arg1 (void); \ |
c67a469f | 238 | if (__builtin_classify_type (__arg1) != 5 && sizeof (__arg1) > 8) \ |
2edb61e3 | 239 | __illegally_sized_syscall_arg1 (); \ |
c67a469f UD |
240 | r3 = arg1 |
241 | #define LOADARGS_2(name, __arg1, __arg2) \ | |
242 | long int arg2 = (long int) (__arg2); \ | |
243 | LOADARGS_1(name, __arg1); \ | |
2edb61e3 | 244 | extern void __illegally_sized_syscall_arg2 (void); \ |
c67a469f | 245 | if (__builtin_classify_type (__arg2) != 5 && sizeof (__arg2) > 8) \ |
2edb61e3 | 246 | __illegally_sized_syscall_arg2 (); \ |
c67a469f UD |
247 | r4 = arg2 |
248 | #define LOADARGS_3(name, __arg1, __arg2, __arg3) \ | |
249 | long int arg3 = (long int) (__arg3); \ | |
250 | LOADARGS_2(name, __arg1, __arg2); \ | |
2edb61e3 | 251 | extern void __illegally_sized_syscall_arg3 (void); \ |
c67a469f | 252 | if (__builtin_classify_type (__arg3) != 5 && sizeof (__arg3) > 8) \ |
2edb61e3 | 253 | __illegally_sized_syscall_arg3 (); \ |
c67a469f UD |
254 | r5 = arg3 |
255 | #define LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4) \ | |
256 | long int arg4 = (long int) (__arg4); \ | |
257 | LOADARGS_3(name, __arg1, __arg2, __arg3); \ | |
2edb61e3 | 258 | extern void __illegally_sized_syscall_arg4 (void); \ |
c67a469f | 259 | if (__builtin_classify_type (__arg4) != 5 && sizeof (__arg4) > 8) \ |
2edb61e3 | 260 | __illegally_sized_syscall_arg4 (); \ |
c67a469f UD |
261 | r6 = arg4 |
262 | #define LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \ | |
263 | long int arg5 = (long int) (__arg5); \ | |
264 | LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4); \ | |
2edb61e3 | 265 | extern void __illegally_sized_syscall_arg5 (void); \ |
c67a469f | 266 | if (__builtin_classify_type (__arg5) != 5 && sizeof (__arg5) > 8) \ |
2edb61e3 | 267 | __illegally_sized_syscall_arg5 (); \ |
c67a469f UD |
268 | r7 = arg5 |
269 | #define LOADARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \ | |
270 | long int arg6 = (long int) (__arg6); \ | |
271 | LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \ | |
2edb61e3 | 272 | extern void __illegally_sized_syscall_arg6 (void); \ |
c67a469f | 273 | if (__builtin_classify_type (__arg6) != 5 && sizeof (__arg6) > 8) \ |
2edb61e3 | 274 | __illegally_sized_syscall_arg6 (); \ |
c67a469f | 275 | r8 = arg6 |
cfc91acd RM |
276 | |
277 | #define ASM_INPUT_0 "0" (r0) | |
278 | #define ASM_INPUT_1 ASM_INPUT_0, "1" (r3) | |
279 | #define ASM_INPUT_2 ASM_INPUT_1, "2" (r4) | |
280 | #define ASM_INPUT_3 ASM_INPUT_2, "3" (r5) | |
281 | #define ASM_INPUT_4 ASM_INPUT_3, "4" (r6) | |
282 | #define ASM_INPUT_5 ASM_INPUT_4, "5" (r7) | |
283 | #define ASM_INPUT_6 ASM_INPUT_5, "6" (r8) | |
284 | ||
00c2b3b9 UD |
285 | |
286 | /* Pointer mangling support. */ | |
287 | #if defined NOT_IN_libc && defined IS_IN_rtld | |
288 | /* We cannot use the thread descriptor because in ld.so we use setjmp | |
289 | earlier than the descriptor is initialized. */ | |
290 | #else | |
291 | # ifdef __ASSEMBLER__ | |
292 | # define PTR_MANGLE(reg, tmpreg) \ | |
293 | ld tmpreg,POINTER_GUARD(r13); \ | |
294 | xor reg,tmpreg,reg | |
5ad77144 UD |
295 | # define PTR_MANGLE2(reg, tmpreg) \ |
296 | xor reg,tmpreg,reg | |
4a132246 UD |
297 | # define PTR_MANGLE3(destreg, reg, tmpreg) \ |
298 | ld tmpreg,POINTER_GUARD(r13); \ | |
299 | xor destreg,tmpreg,reg | |
00c2b3b9 | 300 | # define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg) |
5ad77144 | 301 | # define PTR_DEMANGLE2(reg, tmpreg) PTR_MANGLE2 (reg, tmpreg) |
4a132246 | 302 | # define PTR_DEMANGLE3(destreg, reg, tmpreg) PTR_MANGLE3 (destreg, reg, tmpreg) |
00c2b3b9 UD |
303 | # else |
304 | # define PTR_MANGLE(var) \ | |
04a8b301 | 305 | (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ()) |
00c2b3b9 UD |
306 | # define PTR_DEMANGLE(var) PTR_MANGLE (var) |
307 | # endif | |
308 | #endif | |
309 | ||
cfc91acd | 310 | #endif /* linux/powerpc/powerpc64/sysdep.h */ |