]>
Commit | Line | Data |
---|---|---|
b168057a | 1 | /* Copyright (C) 1992-2015 Free Software Foundation, Inc. |
4cca6b86 UD |
2 | This file is part of the GNU C Library. |
3 | ||
4 | The GNU C Library is free software; you can redistribute it and/or | |
41bdb6e2 AJ |
5 | modify it under the terms of the GNU Lesser General Public |
6 | License as published by the Free Software Foundation; either | |
7 | version 2.1 of the License, or (at your option) any later version. | |
4cca6b86 UD |
8 | |
9 | The GNU C Library is distributed in the hope that it will be useful, | |
10 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
41bdb6e2 | 12 | Lesser General Public License for more details. |
4cca6b86 | 13 | |
41bdb6e2 | 14 | You should have received a copy of the GNU Lesser General Public |
59ba27a6 PE |
15 | License along with the GNU C Library; if not, see |
16 | <http://www.gnu.org/licenses/>. */ | |
4cca6b86 | 17 | |
924102e7 UD |
18 | #ifndef _LINUX_POWERPC_SYSDEP_H |
19 | #define _LINUX_POWERPC_SYSDEP_H 1 | |
20 | ||
be9cd93a | 21 | #include <sysdeps/unix/powerpc/sysdep.h> |
00c2b3b9 | 22 | #include <tls.h> |
4cca6b86 | 23 | |
5bfed16d UD |
24 | /* Some systen calls got renamed over time, but retained the same semantics. |
25 | Handle them here so they can be catched by both C and assembler stubs in | |
26 | glibc. */ | |
27 | ||
28 | #ifdef __NR_pread64 | |
29 | # ifdef __NR_pread | |
30 | # error "__NR_pread and __NR_pread64 both defined???" | |
31 | # endif | |
32 | # define __NR_pread __NR_pread64 | |
33 | #endif | |
34 | ||
35 | #ifdef __NR_pwrite64 | |
36 | # ifdef __NR_pwrite | |
37 | # error "__NR_pwrite and __NR_pwrite64 both defined???" | |
38 | # endif | |
39 | # define __NR_pwrite __NR_pwrite64 | |
40 | #endif | |
41 | ||
4cca6b86 UD |
42 | /* For Linux we can use the system call table in the header file |
43 | /usr/include/asm/unistd.h | |
44 | of the kernel. But these symbols do not follow the SYS_* syntax | |
45 | so we have to redefine the `SYS_ify' macro here. */ | |
46 | #undef SYS_ify | |
965a54a4 | 47 | #define SYS_ify(syscall_name) __NR_##syscall_name |
4cca6b86 | 48 | |
735d67f2 RM |
49 | #ifndef __ASSEMBLER__ |
50 | ||
51 | # include <errno.h> | |
52 | ||
8c2e201b UD |
53 | /* Define a macro which expands inline into the wrapper code for a VDSO |
54 | call. This use is for internal calls that do not need to handle errors | |
55 | normally. It will never touch errno. | |
56 | On powerpc a system call basically clobbers the same registers like a | |
57 | function call, with the exception of LR (which is needed for the | |
58 | "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal | |
59 | an error return status). */ | |
f534255e | 60 | # define INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, nr, type, args...) \ |
8c2e201b UD |
61 | ({ \ |
62 | register void *r0 __asm__ ("r0"); \ | |
63 | register long int r3 __asm__ ("r3"); \ | |
64 | register long int r4 __asm__ ("r4"); \ | |
65 | register long int r5 __asm__ ("r5"); \ | |
66 | register long int r6 __asm__ ("r6"); \ | |
67 | register long int r7 __asm__ ("r7"); \ | |
68 | register long int r8 __asm__ ("r8"); \ | |
69 | register long int r9 __asm__ ("r9"); \ | |
70 | register long int r10 __asm__ ("r10"); \ | |
71 | register long int r11 __asm__ ("r11"); \ | |
72 | register long int r12 __asm__ ("r12"); \ | |
471a1672 | 73 | register type rval __asm__ ("r3"); \ |
8c2e201b UD |
74 | LOADARGS_##nr (funcptr, args); \ |
75 | __asm__ __volatile__ \ | |
76 | ("mtctr %0\n\t" \ | |
77 | "bctrl\n\t" \ | |
78 | "mfcr %0" \ | |
471a1672 AB |
79 | : "+r" (r0), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), \ |
80 | "+r" (r8), "+r" (r9), "+r" (r10), "+r" (r11), "+r" (r12) \ | |
81 | : : "cr0", "ctr", "lr", "memory"); \ | |
8c2e201b | 82 | err = (long int) r0; \ |
471a1672 AB |
83 | __asm__ __volatile__ ("" : "=r" (rval) : "r" (r3), "r" (r4)); \ |
84 | rval; \ | |
8c2e201b UD |
85 | }) |
86 | ||
f534255e AZ |
87 | #define INTERNAL_VSYSCALL_CALL(funcptr, err, nr, args...) \ |
88 | INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, nr, long int, args) | |
89 | ||
735d67f2 | 90 | # undef INLINE_SYSCALL |
704bb2fd RM |
91 | # define INLINE_SYSCALL(name, nr, args...) \ |
92 | ({ \ | |
b398ae3c | 93 | INTERNAL_SYSCALL_DECL (sc_err); \ |
c67a469f | 94 | long int sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, args); \ |
b398ae3c | 95 | if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \ |
704bb2fd | 96 | { \ |
b398ae3c UD |
97 | __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \ |
98 | sc_ret = -1L; \ | |
704bb2fd | 99 | } \ |
b398ae3c | 100 | sc_ret; \ |
735d67f2 RM |
101 | }) |
102 | ||
73e9ae88 UD |
103 | /* Define a macro which expands inline into the wrapper code for a system |
104 | call. This use is for internal calls that do not need to handle errors | |
b398ae3c UD |
105 | normally. It will never touch errno. |
106 | On powerpc a system call basically clobbers the same registers like a | |
107 | function call, with the exception of LR (which is needed for the | |
108 | "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal | |
109 | an error return status). */ | |
73e9ae88 | 110 | |
6aca81bb | 111 | # undef INTERNAL_SYSCALL_DECL |
20f0018d | 112 | # define INTERNAL_SYSCALL_DECL(err) long int err __attribute__ ((unused)) |
6aca81bb | 113 | |
52f3d213 | 114 | # undef INTERNAL_SYSCALL |
2edb61e3 | 115 | # define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \ |
73e9ae88 | 116 | ({ \ |
c67a469f UD |
117 | register long int r0 __asm__ ("r0"); \ |
118 | register long int r3 __asm__ ("r3"); \ | |
119 | register long int r4 __asm__ ("r4"); \ | |
120 | register long int r5 __asm__ ("r5"); \ | |
121 | register long int r6 __asm__ ("r6"); \ | |
122 | register long int r7 __asm__ ("r7"); \ | |
123 | register long int r8 __asm__ ("r8"); \ | |
124 | register long int r9 __asm__ ("r9"); \ | |
125 | register long int r10 __asm__ ("r10"); \ | |
126 | register long int r11 __asm__ ("r11"); \ | |
127 | register long int r12 __asm__ ("r12"); \ | |
471a1672 | 128 | LOADARGS_##nr(name, args); \ |
56cf2763 | 129 | ABORT_TRANSACTION; \ |
73e9ae88 | 130 | __asm__ __volatile__ \ |
b398ae3c UD |
131 | ("sc \n\t" \ |
132 | "mfcr %0" \ | |
73e9ae88 UD |
133 | : "=&r" (r0), \ |
134 | "=&r" (r3), "=&r" (r4), "=&r" (r5), "=&r" (r6), "=&r" (r7), \ | |
135 | "=&r" (r8), "=&r" (r9), "=&r" (r10), "=&r" (r11), "=&r" (r12) \ | |
136 | : ASM_INPUT_##nr \ | |
137 | : "cr0", "ctr", "memory"); \ | |
b398ae3c | 138 | err = r0; \ |
73e9ae88 UD |
139 | (int) r3; \ |
140 | }) | |
2edb61e3 UD |
141 | # define INTERNAL_SYSCALL(name, err, nr, args...) \ |
142 | INTERNAL_SYSCALL_NCS (__NR_##name, err, nr, ##args) | |
6aca81bb | 143 | |
52f3d213 | 144 | # undef INTERNAL_SYSCALL_ERROR_P |
6aca81bb | 145 | # define INTERNAL_SYSCALL_ERROR_P(val, err) \ |
a2274a08 | 146 | ((void) (val), __builtin_expect ((err) & (1 << 28), 0)) |
6aca81bb | 147 | |
52f3d213 | 148 | # undef INTERNAL_SYSCALL_ERRNO |
b398ae3c | 149 | # define INTERNAL_SYSCALL_ERRNO(val, err) (val) |
73e9ae88 | 150 | |
f534255e AZ |
151 | # define INTERNAL_VSYSCALL_NO_SYSCALL_FALLBACK(name, err, type, nr, args...) \ |
152 | ({ \ | |
153 | type sc_ret = ENOSYS; \ | |
154 | \ | |
155 | __typeof (__vdso_##name) vdsop = __vdso_##name; \ | |
156 | PTR_DEMANGLE (vdsop); \ | |
157 | if (vdsop != NULL) \ | |
158 | sc_ret = \ | |
159 | INTERNAL_VSYSCALL_CALL_TYPE (vdsop, err, nr, type, ##args); \ | |
160 | else \ | |
161 | err = 1 << 28; \ | |
162 | sc_ret; \ | |
163 | }) | |
164 | ||
165 | /* List of system calls which are supported as vsyscalls. */ | |
166 | # define HAVE_CLOCK_GETRES_VSYSCALL 1 | |
167 | # define HAVE_CLOCK_GETTIME_VSYSCALL 1 | |
dd26c444 | 168 | # define HAVE_GETCPU_VSYSCALL 1 |
f534255e AZ |
169 | |
170 | ||
8c2e201b | 171 | # define LOADARGS_0(name, dummy) \ |
2edb61e3 | 172 | r0 = name |
c67a469f UD |
173 | # define LOADARGS_1(name, __arg1) \ |
174 | long int arg1 = (long int) (__arg1); \ | |
8c2e201b | 175 | LOADARGS_0(name, 0); \ |
2edb61e3 | 176 | extern void __illegally_sized_syscall_arg1 (void); \ |
c67a469f | 177 | if (__builtin_classify_type (__arg1) != 5 && sizeof (__arg1) > 4) \ |
2edb61e3 | 178 | __illegally_sized_syscall_arg1 (); \ |
c67a469f UD |
179 | r3 = arg1 |
180 | # define LOADARGS_2(name, __arg1, __arg2) \ | |
181 | long int arg2 = (long int) (__arg2); \ | |
182 | LOADARGS_1(name, __arg1); \ | |
2edb61e3 | 183 | extern void __illegally_sized_syscall_arg2 (void); \ |
c67a469f | 184 | if (__builtin_classify_type (__arg2) != 5 && sizeof (__arg2) > 4) \ |
2edb61e3 | 185 | __illegally_sized_syscall_arg2 (); \ |
c67a469f UD |
186 | r4 = arg2 |
187 | # define LOADARGS_3(name, __arg1, __arg2, __arg3) \ | |
188 | long int arg3 = (long int) (__arg3); \ | |
189 | LOADARGS_2(name, __arg1, __arg2); \ | |
2edb61e3 | 190 | extern void __illegally_sized_syscall_arg3 (void); \ |
c67a469f | 191 | if (__builtin_classify_type (__arg3) != 5 && sizeof (__arg3) > 4) \ |
2edb61e3 | 192 | __illegally_sized_syscall_arg3 (); \ |
c67a469f UD |
193 | r5 = arg3 |
194 | # define LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4) \ | |
195 | long int arg4 = (long int) (__arg4); \ | |
196 | LOADARGS_3(name, __arg1, __arg2, __arg3); \ | |
2edb61e3 | 197 | extern void __illegally_sized_syscall_arg4 (void); \ |
c67a469f | 198 | if (__builtin_classify_type (__arg4) != 5 && sizeof (__arg4) > 4) \ |
2edb61e3 | 199 | __illegally_sized_syscall_arg4 (); \ |
c67a469f UD |
200 | r6 = arg4 |
201 | # define LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \ | |
202 | long int arg5 = (long int) (__arg5); \ | |
203 | LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4); \ | |
2edb61e3 | 204 | extern void __illegally_sized_syscall_arg5 (void); \ |
c67a469f | 205 | if (__builtin_classify_type (__arg5) != 5 && sizeof (__arg5) > 4) \ |
2edb61e3 | 206 | __illegally_sized_syscall_arg5 (); \ |
c67a469f UD |
207 | r7 = arg5 |
208 | # define LOADARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \ | |
209 | long int arg6 = (long int) (__arg6); \ | |
210 | LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \ | |
2edb61e3 | 211 | extern void __illegally_sized_syscall_arg6 (void); \ |
c67a469f | 212 | if (__builtin_classify_type (__arg6) != 5 && sizeof (__arg6) > 4) \ |
2edb61e3 | 213 | __illegally_sized_syscall_arg6 (); \ |
c67a469f | 214 | r8 = arg6 |
735d67f2 | 215 | |
704bb2fd RM |
216 | # define ASM_INPUT_0 "0" (r0) |
217 | # define ASM_INPUT_1 ASM_INPUT_0, "1" (r3) | |
218 | # define ASM_INPUT_2 ASM_INPUT_1, "2" (r4) | |
219 | # define ASM_INPUT_3 ASM_INPUT_2, "3" (r5) | |
220 | # define ASM_INPUT_4 ASM_INPUT_3, "4" (r6) | |
221 | # define ASM_INPUT_5 ASM_INPUT_4, "5" (r7) | |
222 | # define ASM_INPUT_6 ASM_INPUT_5, "6" (r8) | |
735d67f2 RM |
223 | |
224 | #endif /* __ASSEMBLER__ */ | |
225 | ||
226 | ||
00c2b3b9 | 227 | /* Pointer mangling support. */ |
a3848485 | 228 | #if IS_IN (rtld) |
00c2b3b9 UD |
229 | /* We cannot use the thread descriptor because in ld.so we use setjmp |
230 | earlier than the descriptor is initialized. */ | |
231 | #else | |
232 | # ifdef __ASSEMBLER__ | |
233 | # define PTR_MANGLE(reg, tmpreg) \ | |
234 | lwz tmpreg,POINTER_GUARD(r2); \ | |
235 | xor reg,tmpreg,reg | |
5ad77144 | 236 | # define PTR_MANGLE2(reg, tmpreg) \ |
4e54d7e4 | 237 | xor reg,tmpreg,reg |
4a132246 UD |
238 | # define PTR_MANGLE3(destreg, reg, tmpreg) \ |
239 | lwz tmpreg,POINTER_GUARD(r2); \ | |
240 | xor destreg,tmpreg,reg | |
5ad77144 UD |
241 | # define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg) |
242 | # define PTR_DEMANGLE2(reg, tmpreg) PTR_MANGLE2 (reg, tmpreg) | |
4a132246 | 243 | # define PTR_DEMANGLE3(destreg, reg, tmpreg) PTR_MANGLE3 (destreg, reg, tmpreg) |
00c2b3b9 UD |
244 | # else |
245 | # define PTR_MANGLE(var) \ | |
db169ed5 | 246 | (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ()) |
00c2b3b9 UD |
247 | # define PTR_DEMANGLE(var) PTR_MANGLE (var) |
248 | # endif | |
249 | #endif | |
250 | ||
b398ae3c | 251 | #endif /* linux/powerpc/powerpc32/sysdep.h */ |