]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / sysdeps / unix / sysv / linux / powerpc / powerpc32 / sysdep.h
CommitLineData
b168057a 1/* Copyright (C) 1992-2015 Free Software Foundation, Inc.
4cca6b86
UD
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
41bdb6e2
AJ
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
4cca6b86
UD
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
41bdb6e2 12 Lesser General Public License for more details.
4cca6b86 13
41bdb6e2 14 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
15 License along with the GNU C Library; if not, see
16 <http://www.gnu.org/licenses/>. */
4cca6b86 17
924102e7
UD
18#ifndef _LINUX_POWERPC_SYSDEP_H
19#define _LINUX_POWERPC_SYSDEP_H 1
20
be9cd93a 21#include <sysdeps/unix/powerpc/sysdep.h>
00c2b3b9 22#include <tls.h>
4cca6b86 23
5bfed16d
UD
24/* Some systen calls got renamed over time, but retained the same semantics.
25 Handle them here so they can be catched by both C and assembler stubs in
26 glibc. */
27
28#ifdef __NR_pread64
29# ifdef __NR_pread
30# error "__NR_pread and __NR_pread64 both defined???"
31# endif
32# define __NR_pread __NR_pread64
33#endif
34
35#ifdef __NR_pwrite64
36# ifdef __NR_pwrite
37# error "__NR_pwrite and __NR_pwrite64 both defined???"
38# endif
39# define __NR_pwrite __NR_pwrite64
40#endif
41
4cca6b86
UD
42/* For Linux we can use the system call table in the header file
43 /usr/include/asm/unistd.h
44 of the kernel. But these symbols do not follow the SYS_* syntax
45 so we have to redefine the `SYS_ify' macro here. */
46#undef SYS_ify
965a54a4 47#define SYS_ify(syscall_name) __NR_##syscall_name
4cca6b86 48
735d67f2
RM
49#ifndef __ASSEMBLER__
50
51# include <errno.h>
52
8c2e201b
UD
53# ifdef SHARED
54# define INLINE_VSYSCALL(name, nr, args...) \
55 ({ \
56 __label__ out; \
57 __label__ iserr; \
58 INTERNAL_SYSCALL_DECL (sc_err); \
59 long int sc_ret; \
60 \
61 if (__vdso_##name != NULL) \
62 { \
471a1672
AB
63 sc_ret = \
64 INTERNAL_VSYSCALL_NCS (__vdso_##name, sc_err, long int, nr, ##args);\
8c2e201b
UD
65 if (!INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
66 goto out; \
67 if (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err) != ENOSYS) \
68 goto iserr; \
69 } \
70 \
71 sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, ##args); \
72 if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
73 { \
74 iserr: \
75 __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \
76 sc_ret = -1L; \
77 } \
78 out: \
79 sc_ret; \
80 })
81# else
82# define INLINE_VSYSCALL(name, nr, args...) \
83 INLINE_SYSCALL (name, nr, ##args)
84# endif
85
86# ifdef SHARED
87# define INTERNAL_VSYSCALL(name, err, nr, args...) \
88 ({ \
89 __label__ out; \
90 long int v_ret; \
91 \
92 if (__vdso_##name != NULL) \
93 { \
471a1672
AB
94 v_ret = \
95 INTERNAL_VSYSCALL_NCS (__vdso_##name, err, long int, nr, ##args); \
8c2e201b
UD
96 if (!INTERNAL_SYSCALL_ERROR_P (v_ret, err) \
97 || INTERNAL_SYSCALL_ERRNO (v_ret, err) != ENOSYS) \
98 goto out; \
99 } \
100 v_ret = INTERNAL_SYSCALL (name, err, nr, ##args); \
101 out: \
102 v_ret; \
103 })
104# else
105# define INTERNAL_VSYSCALL(name, err, nr, args...) \
106 INTERNAL_SYSCALL (name, err, nr, ##args)
107# endif
108
471a1672 109# define INTERNAL_VSYSCALL_NO_SYSCALL_FALLBACK(name, err, type, nr, args...) \
8c2e201b 110 ({ \
471a1672 111 type sc_ret = ENOSYS; \
8c2e201b
UD
112 \
113 if (__vdso_##name != NULL) \
471a1672 114 sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, type, nr, ##args); \
8c2e201b
UD
115 else \
116 err = 1 << 28; \
117 sc_ret; \
118 })
119
120/* List of system calls which are supported as vsyscalls. */
121# define HAVE_CLOCK_GETRES_VSYSCALL 1
122# define HAVE_CLOCK_GETTIME_VSYSCALL 1
123
124/* Define a macro which expands inline into the wrapper code for a VDSO
125 call. This use is for internal calls that do not need to handle errors
126 normally. It will never touch errno.
127 On powerpc a system call basically clobbers the same registers like a
128 function call, with the exception of LR (which is needed for the
129 "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal
130 an error return status). */
471a1672 131# define INTERNAL_VSYSCALL_NCS(funcptr, err, type, nr, args...) \
8c2e201b
UD
132 ({ \
133 register void *r0 __asm__ ("r0"); \
134 register long int r3 __asm__ ("r3"); \
135 register long int r4 __asm__ ("r4"); \
136 register long int r5 __asm__ ("r5"); \
137 register long int r6 __asm__ ("r6"); \
138 register long int r7 __asm__ ("r7"); \
139 register long int r8 __asm__ ("r8"); \
140 register long int r9 __asm__ ("r9"); \
141 register long int r10 __asm__ ("r10"); \
142 register long int r11 __asm__ ("r11"); \
143 register long int r12 __asm__ ("r12"); \
471a1672 144 register type rval __asm__ ("r3"); \
8c2e201b
UD
145 LOADARGS_##nr (funcptr, args); \
146 __asm__ __volatile__ \
147 ("mtctr %0\n\t" \
148 "bctrl\n\t" \
149 "mfcr %0" \
471a1672
AB
150 : "+r" (r0), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), \
151 "+r" (r8), "+r" (r9), "+r" (r10), "+r" (r11), "+r" (r12) \
152 : : "cr0", "ctr", "lr", "memory"); \
8c2e201b 153 err = (long int) r0; \
471a1672
AB
154 __asm__ __volatile__ ("" : "=r" (rval) : "r" (r3), "r" (r4)); \
155 rval; \
8c2e201b
UD
156 })
157
735d67f2 158# undef INLINE_SYSCALL
704bb2fd
RM
159# define INLINE_SYSCALL(name, nr, args...) \
160 ({ \
b398ae3c 161 INTERNAL_SYSCALL_DECL (sc_err); \
c67a469f 162 long int sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, args); \
b398ae3c 163 if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
704bb2fd 164 { \
b398ae3c
UD
165 __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \
166 sc_ret = -1L; \
704bb2fd 167 } \
b398ae3c 168 sc_ret; \
735d67f2
RM
169 })
170
73e9ae88
UD
171/* Define a macro which expands inline into the wrapper code for a system
172 call. This use is for internal calls that do not need to handle errors
b398ae3c
UD
173 normally. It will never touch errno.
174 On powerpc a system call basically clobbers the same registers like a
175 function call, with the exception of LR (which is needed for the
176 "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal
177 an error return status). */
73e9ae88 178
6aca81bb 179# undef INTERNAL_SYSCALL_DECL
20f0018d 180# define INTERNAL_SYSCALL_DECL(err) long int err __attribute__ ((unused))
6aca81bb 181
52f3d213 182# undef INTERNAL_SYSCALL
2edb61e3 183# define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
73e9ae88 184 ({ \
c67a469f
UD
185 register long int r0 __asm__ ("r0"); \
186 register long int r3 __asm__ ("r3"); \
187 register long int r4 __asm__ ("r4"); \
188 register long int r5 __asm__ ("r5"); \
189 register long int r6 __asm__ ("r6"); \
190 register long int r7 __asm__ ("r7"); \
191 register long int r8 __asm__ ("r8"); \
192 register long int r9 __asm__ ("r9"); \
193 register long int r10 __asm__ ("r10"); \
194 register long int r11 __asm__ ("r11"); \
195 register long int r12 __asm__ ("r12"); \
471a1672 196 LOADARGS_##nr(name, args); \
73e9ae88 197 __asm__ __volatile__ \
b398ae3c
UD
198 ("sc \n\t" \
199 "mfcr %0" \
73e9ae88
UD
200 : "=&r" (r0), \
201 "=&r" (r3), "=&r" (r4), "=&r" (r5), "=&r" (r6), "=&r" (r7), \
202 "=&r" (r8), "=&r" (r9), "=&r" (r10), "=&r" (r11), "=&r" (r12) \
203 : ASM_INPUT_##nr \
204 : "cr0", "ctr", "memory"); \
b398ae3c 205 err = r0; \
73e9ae88
UD
206 (int) r3; \
207 })
2edb61e3
UD
208# define INTERNAL_SYSCALL(name, err, nr, args...) \
209 INTERNAL_SYSCALL_NCS (__NR_##name, err, nr, ##args)
6aca81bb 210
52f3d213 211# undef INTERNAL_SYSCALL_ERROR_P
6aca81bb 212# define INTERNAL_SYSCALL_ERROR_P(val, err) \
a2274a08 213 ((void) (val), __builtin_expect ((err) & (1 << 28), 0))
6aca81bb 214
52f3d213 215# undef INTERNAL_SYSCALL_ERRNO
b398ae3c 216# define INTERNAL_SYSCALL_ERRNO(val, err) (val)
73e9ae88 217
8c2e201b 218# define LOADARGS_0(name, dummy) \
2edb61e3 219 r0 = name
c67a469f
UD
220# define LOADARGS_1(name, __arg1) \
221 long int arg1 = (long int) (__arg1); \
8c2e201b 222 LOADARGS_0(name, 0); \
2edb61e3 223 extern void __illegally_sized_syscall_arg1 (void); \
c67a469f 224 if (__builtin_classify_type (__arg1) != 5 && sizeof (__arg1) > 4) \
2edb61e3 225 __illegally_sized_syscall_arg1 (); \
c67a469f
UD
226 r3 = arg1
227# define LOADARGS_2(name, __arg1, __arg2) \
228 long int arg2 = (long int) (__arg2); \
229 LOADARGS_1(name, __arg1); \
2edb61e3 230 extern void __illegally_sized_syscall_arg2 (void); \
c67a469f 231 if (__builtin_classify_type (__arg2) != 5 && sizeof (__arg2) > 4) \
2edb61e3 232 __illegally_sized_syscall_arg2 (); \
c67a469f
UD
233 r4 = arg2
234# define LOADARGS_3(name, __arg1, __arg2, __arg3) \
235 long int arg3 = (long int) (__arg3); \
236 LOADARGS_2(name, __arg1, __arg2); \
2edb61e3 237 extern void __illegally_sized_syscall_arg3 (void); \
c67a469f 238 if (__builtin_classify_type (__arg3) != 5 && sizeof (__arg3) > 4) \
2edb61e3 239 __illegally_sized_syscall_arg3 (); \
c67a469f
UD
240 r5 = arg3
241# define LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4) \
242 long int arg4 = (long int) (__arg4); \
243 LOADARGS_3(name, __arg1, __arg2, __arg3); \
2edb61e3 244 extern void __illegally_sized_syscall_arg4 (void); \
c67a469f 245 if (__builtin_classify_type (__arg4) != 5 && sizeof (__arg4) > 4) \
2edb61e3 246 __illegally_sized_syscall_arg4 (); \
c67a469f
UD
247 r6 = arg4
248# define LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \
249 long int arg5 = (long int) (__arg5); \
250 LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4); \
2edb61e3 251 extern void __illegally_sized_syscall_arg5 (void); \
c67a469f 252 if (__builtin_classify_type (__arg5) != 5 && sizeof (__arg5) > 4) \
2edb61e3 253 __illegally_sized_syscall_arg5 (); \
c67a469f
UD
254 r7 = arg5
255# define LOADARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \
256 long int arg6 = (long int) (__arg6); \
257 LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \
2edb61e3 258 extern void __illegally_sized_syscall_arg6 (void); \
c67a469f 259 if (__builtin_classify_type (__arg6) != 5 && sizeof (__arg6) > 4) \
2edb61e3 260 __illegally_sized_syscall_arg6 (); \
c67a469f 261 r8 = arg6
735d67f2 262
704bb2fd
RM
263# define ASM_INPUT_0 "0" (r0)
264# define ASM_INPUT_1 ASM_INPUT_0, "1" (r3)
265# define ASM_INPUT_2 ASM_INPUT_1, "2" (r4)
266# define ASM_INPUT_3 ASM_INPUT_2, "3" (r5)
267# define ASM_INPUT_4 ASM_INPUT_3, "4" (r6)
268# define ASM_INPUT_5 ASM_INPUT_4, "5" (r7)
269# define ASM_INPUT_6 ASM_INPUT_5, "6" (r8)
735d67f2
RM
270
271#endif /* __ASSEMBLER__ */
272
273
00c2b3b9 274/* Pointer mangling support. */
a3848485 275#if IS_IN (rtld)
00c2b3b9
UD
276/* We cannot use the thread descriptor because in ld.so we use setjmp
277 earlier than the descriptor is initialized. */
278#else
279# ifdef __ASSEMBLER__
280# define PTR_MANGLE(reg, tmpreg) \
281 lwz tmpreg,POINTER_GUARD(r2); \
282 xor reg,tmpreg,reg
5ad77144 283# define PTR_MANGLE2(reg, tmpreg) \
4e54d7e4 284 xor reg,tmpreg,reg
4a132246
UD
285# define PTR_MANGLE3(destreg, reg, tmpreg) \
286 lwz tmpreg,POINTER_GUARD(r2); \
287 xor destreg,tmpreg,reg
5ad77144
UD
288# define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg)
289# define PTR_DEMANGLE2(reg, tmpreg) PTR_MANGLE2 (reg, tmpreg)
4a132246 290# define PTR_DEMANGLE3(destreg, reg, tmpreg) PTR_MANGLE3 (destreg, reg, tmpreg)
00c2b3b9
UD
291# else
292# define PTR_MANGLE(var) \
db169ed5 293 (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
00c2b3b9
UD
294# define PTR_DEMANGLE(var) PTR_MANGLE (var)
295# endif
296#endif
297
b398ae3c 298#endif /* linux/powerpc/powerpc32/sysdep.h */