]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep.h
Update copyright notices with scripts/update-copyrights
[thirdparty/glibc.git] / sysdeps / unix / sysv / linux / powerpc / powerpc64 / sysdep.h
CommitLineData
d4697bc9 1/* Copyright (C) 1992-2014 Free Software Foundation, Inc.
cfc91acd
RM
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
15 License along with the GNU C Library; if not, see
16 <http://www.gnu.org/licenses/>. */
1e2f8718 17
cfc91acd
RM
18/* Alan Modra <amodra@bigpond.net.au> rewrote the INLINE_SYSCALL macro */
19
20#ifndef _LINUX_POWERPC_SYSDEP_H
21#define _LINUX_POWERPC_SYSDEP_H 1
22
23#include <sysdeps/unix/powerpc/sysdep.h>
00c2b3b9 24#include <tls.h>
cfc91acd
RM
25
26/* Define __set_errno() for INLINE_SYSCALL macro below. */
27#ifndef __ASSEMBLER__
28#include <errno.h>
29#endif
30
5bfed16d
UD
31/* Some systen calls got renamed over time, but retained the same semantics.
32 Handle them here so they can be catched by both C and assembler stubs in
33 glibc. */
34
35#ifdef __NR_pread64
36# ifdef __NR_pread
37# error "__NR_pread and __NR_pread64 both defined???"
38# endif
39# define __NR_pread __NR_pread64
40#endif
41
42#ifdef __NR_pwrite64
43# ifdef __NR_pwrite
44# error "__NR_pwrite and __NR_pwrite64 both defined???"
45# endif
46# define __NR_pwrite __NR_pwrite64
47#endif
48
cfc91acd
RM
49/* For Linux we can use the system call table in the header file
50 /usr/include/asm/unistd.h
51 of the kernel. But these symbols do not follow the SYS_* syntax
52 so we have to redefine the `SYS_ify' macro here. */
53#undef SYS_ify
965a54a4 54#define SYS_ify(syscall_name) __NR_##syscall_name
cfc91acd
RM
55
56#ifdef __ASSEMBLER__
57
58/* This seems to always be the case on PPC. */
8c2e201b 59# define ALIGNARG(log2) log2
8c2e201b 60# define ASM_SIZE_DIRECTIVE(name) .size name,.-name
cfc91acd 61
8c2e201b
UD
62#endif /* __ASSEMBLER__ */
63
64/* This version is for kernels that implement system calls that
65 behave like function calls as far as register saving.
66 It falls back to the syscall in the case that the vDSO doesn't
67 exist or fails for ENOSYS */
68#ifdef SHARED
69# define INLINE_VSYSCALL(name, nr, args...) \
70 ({ \
71 __label__ out; \
72 __label__ iserr; \
73 INTERNAL_SYSCALL_DECL (sc_err); \
74 long int sc_ret; \
75 \
76 if (__vdso_##name != NULL) \
77 { \
471a1672
AB
78 sc_ret = \
79 INTERNAL_VSYSCALL_NCS (__vdso_##name, sc_err, long int, nr, ##args);\
8c2e201b
UD
80 if (!INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
81 goto out; \
82 if (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err) != ENOSYS) \
83 goto iserr; \
84 } \
85 \
86 sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, ##args); \
87 if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
88 { \
89 iserr: \
90 __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \
91 sc_ret = -1L; \
92 } \
93 out: \
94 sc_ret; \
95 })
96#else
97# define INLINE_VSYSCALL(name, nr, args...) \
98 INLINE_SYSCALL (name, nr, ##args)
99#endif
100
101#ifdef SHARED
102# define INTERNAL_VSYSCALL(name, err, nr, args...) \
103 ({ \
104 __label__ out; \
105 long int v_ret; \
106 \
107 if (__vdso_##name != NULL) \
108 { \
471a1672
AB
109 v_ret = \
110 INTERNAL_VSYSCALL_NCS (__vdso_##name, err, long int, nr, ##args); \
8c2e201b
UD
111 if (!INTERNAL_SYSCALL_ERROR_P (v_ret, err) \
112 || INTERNAL_SYSCALL_ERRNO (v_ret, err) != ENOSYS) \
113 goto out; \
114 } \
115 v_ret = INTERNAL_SYSCALL (name, err, nr, ##args); \
116 out: \
117 v_ret; \
118 })
119#else
120# define INTERNAL_VSYSCALL(name, err, nr, args...) \
121 INTERNAL_SYSCALL (name, err, nr, ##args)
122#endif
123
124/* This version is for internal uses when there is no desire
125 to set errno */
471a1672 126#define INTERNAL_VSYSCALL_NO_SYSCALL_FALLBACK(name, err, type, nr, args...) \
8c2e201b 127 ({ \
471a1672 128 type sc_ret = ENOSYS; \
8c2e201b
UD
129 \
130 if (__vdso_##name != NULL) \
471a1672 131 sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, type, nr, ##args); \
8c2e201b
UD
132 else \
133 err = 1 << 28; \
134 sc_ret; \
135 })
136
137/* List of system calls which are supported as vsyscalls. */
138#define HAVE_CLOCK_GETRES_VSYSCALL 1
139#define HAVE_CLOCK_GETTIME_VSYSCALL 1
140
141/* Define a macro which expands inline into the wrapper code for a system
142 call. This use is for internal calls that do not need to handle errors
143 normally. It will never touch errno. This returns just what the kernel
144 gave back in the non-error (CR0.SO cleared) case, otherwise (CR0.SO set)
145 the negation of the return value in the kernel gets reverted. */
146
471a1672 147#define INTERNAL_VSYSCALL_NCS(funcptr, err, type, nr, args...) \
8c2e201b
UD
148 ({ \
149 register void *r0 __asm__ ("r0"); \
150 register long int r3 __asm__ ("r3"); \
151 register long int r4 __asm__ ("r4"); \
152 register long int r5 __asm__ ("r5"); \
153 register long int r6 __asm__ ("r6"); \
154 register long int r7 __asm__ ("r7"); \
155 register long int r8 __asm__ ("r8"); \
471a1672 156 register type rval __asm__ ("r3"); \
8c2e201b
UD
157 LOADARGS_##nr (funcptr, args); \
158 __asm__ __volatile__ \
159 ("mtctr %0\n\t" \
160 "bctrl\n\t" \
161 "mfcr %0\n\t" \
162 "0:" \
471a1672
AB
163 : "+r" (r0), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), \
164 "+r" (r7), "+r" (r8) \
165 : : "r9", "r10", "r11", "r12", "cr0", "ctr", "lr", "memory"); \
166 err = (long int) r0; \
167 __asm__ __volatile__ ("" : "=r" (rval) : "r" (r3)); \
168 rval; \
8c2e201b 169 })
cfc91acd
RM
170
171#undef INLINE_SYSCALL
cfc91acd 172
cfc91acd
RM
173/* This version is for kernels that implement system calls that
174 behave like function calls as far as register saving. */
1e2f8718
UD
175#define INLINE_SYSCALL(name, nr, args...) \
176 ({ \
574b892e 177 INTERNAL_SYSCALL_DECL (sc_err); \
c67a469f 178 long int sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, args); \
574b892e
UD
179 if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
180 { \
181 __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \
182 sc_ret = -1L; \
183 } \
184 sc_ret; \
cfc91acd
RM
185 })
186
aff4519d
UD
187/* Define a macro which expands inline into the wrapper code for a system
188 call. This use is for internal calls that do not need to handle errors
189 normally. It will never touch errno. This returns just what the kernel
190 gave back in the non-error (CR0.SO cleared) case, otherwise (CR0.SO set)
191 the negation of the return value in the kernel gets reverted. */
192
1e2f8718 193#undef INTERNAL_SYSCALL
2edb61e3 194#define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
aff4519d 195 ({ \
c67a469f
UD
196 register long int r0 __asm__ ("r0"); \
197 register long int r3 __asm__ ("r3"); \
198 register long int r4 __asm__ ("r4"); \
199 register long int r5 __asm__ ("r5"); \
200 register long int r6 __asm__ ("r6"); \
201 register long int r7 __asm__ ("r7"); \
202 register long int r8 __asm__ ("r8"); \
8c2e201b 203 LOADARGS_##nr (name, ##args); \
aff4519d
UD
204 __asm__ __volatile__ \
205 ("sc\n\t" \
574b892e 206 "mfcr %0\n\t" \
aff4519d
UD
207 "0:" \
208 : "=&r" (r0), \
1e2f8718
UD
209 "=&r" (r3), "=&r" (r4), "=&r" (r5), \
210 "=&r" (r6), "=&r" (r7), "=&r" (r8) \
aff4519d 211 : ASM_INPUT_##nr \
1e2f8718 212 : "r9", "r10", "r11", "r12", \
aff4519d 213 "cr0", "ctr", "memory"); \
574b892e 214 err = r0; \
d55fd7a5 215 r3; \
aff4519d 216 })
2edb61e3 217#define INTERNAL_SYSCALL(name, err, nr, args...) \
8c2e201b 218 INTERNAL_SYSCALL_NCS (__NR_##name, err, nr, args)
574b892e 219
1e2f8718 220#undef INTERNAL_SYSCALL_DECL
f16946dd 221#define INTERNAL_SYSCALL_DECL(err) long int err __attribute__ ((unused))
1e2f8718
UD
222
223#undef INTERNAL_SYSCALL_ERROR_P
224#define INTERNAL_SYSCALL_ERROR_P(val, err) \
5949daa0 225 ((void) (val), __builtin_expect ((err) & (1 << 28), 0))
574b892e 226
1e2f8718
UD
227#undef INTERNAL_SYSCALL_ERRNO
228#define INTERNAL_SYSCALL_ERRNO(val, err) (val)
aff4519d
UD
229
230#define LOADARGS_0(name, dummy) \
2edb61e3 231 r0 = name
c67a469f
UD
232#define LOADARGS_1(name, __arg1) \
233 long int arg1 = (long int) (__arg1); \
aff4519d 234 LOADARGS_0(name, 0); \
2edb61e3 235 extern void __illegally_sized_syscall_arg1 (void); \
c67a469f 236 if (__builtin_classify_type (__arg1) != 5 && sizeof (__arg1) > 8) \
2edb61e3 237 __illegally_sized_syscall_arg1 (); \
c67a469f
UD
238 r3 = arg1
239#define LOADARGS_2(name, __arg1, __arg2) \
240 long int arg2 = (long int) (__arg2); \
241 LOADARGS_1(name, __arg1); \
2edb61e3 242 extern void __illegally_sized_syscall_arg2 (void); \
c67a469f 243 if (__builtin_classify_type (__arg2) != 5 && sizeof (__arg2) > 8) \
2edb61e3 244 __illegally_sized_syscall_arg2 (); \
c67a469f
UD
245 r4 = arg2
246#define LOADARGS_3(name, __arg1, __arg2, __arg3) \
247 long int arg3 = (long int) (__arg3); \
248 LOADARGS_2(name, __arg1, __arg2); \
2edb61e3 249 extern void __illegally_sized_syscall_arg3 (void); \
c67a469f 250 if (__builtin_classify_type (__arg3) != 5 && sizeof (__arg3) > 8) \
2edb61e3 251 __illegally_sized_syscall_arg3 (); \
c67a469f
UD
252 r5 = arg3
253#define LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4) \
254 long int arg4 = (long int) (__arg4); \
255 LOADARGS_3(name, __arg1, __arg2, __arg3); \
2edb61e3 256 extern void __illegally_sized_syscall_arg4 (void); \
c67a469f 257 if (__builtin_classify_type (__arg4) != 5 && sizeof (__arg4) > 8) \
2edb61e3 258 __illegally_sized_syscall_arg4 (); \
c67a469f
UD
259 r6 = arg4
260#define LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \
261 long int arg5 = (long int) (__arg5); \
262 LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4); \
2edb61e3 263 extern void __illegally_sized_syscall_arg5 (void); \
c67a469f 264 if (__builtin_classify_type (__arg5) != 5 && sizeof (__arg5) > 8) \
2edb61e3 265 __illegally_sized_syscall_arg5 (); \
c67a469f
UD
266 r7 = arg5
267#define LOADARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \
268 long int arg6 = (long int) (__arg6); \
269 LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \
2edb61e3 270 extern void __illegally_sized_syscall_arg6 (void); \
c67a469f 271 if (__builtin_classify_type (__arg6) != 5 && sizeof (__arg6) > 8) \
2edb61e3 272 __illegally_sized_syscall_arg6 (); \
c67a469f 273 r8 = arg6
cfc91acd
RM
274
275#define ASM_INPUT_0 "0" (r0)
276#define ASM_INPUT_1 ASM_INPUT_0, "1" (r3)
277#define ASM_INPUT_2 ASM_INPUT_1, "2" (r4)
278#define ASM_INPUT_3 ASM_INPUT_2, "3" (r5)
279#define ASM_INPUT_4 ASM_INPUT_3, "4" (r6)
280#define ASM_INPUT_5 ASM_INPUT_4, "5" (r7)
281#define ASM_INPUT_6 ASM_INPUT_5, "6" (r8)
282
00c2b3b9
UD
283
284/* Pointer mangling support. */
285#if defined NOT_IN_libc && defined IS_IN_rtld
286/* We cannot use the thread descriptor because in ld.so we use setjmp
287 earlier than the descriptor is initialized. */
288#else
289# ifdef __ASSEMBLER__
290# define PTR_MANGLE(reg, tmpreg) \
291 ld tmpreg,POINTER_GUARD(r13); \
292 xor reg,tmpreg,reg
5ad77144
UD
293# define PTR_MANGLE2(reg, tmpreg) \
294 xor reg,tmpreg,reg
4a132246
UD
295# define PTR_MANGLE3(destreg, reg, tmpreg) \
296 ld tmpreg,POINTER_GUARD(r13); \
297 xor destreg,tmpreg,reg
00c2b3b9 298# define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg)
5ad77144 299# define PTR_DEMANGLE2(reg, tmpreg) PTR_MANGLE2 (reg, tmpreg)
4a132246 300# define PTR_DEMANGLE3(destreg, reg, tmpreg) PTR_MANGLE3 (destreg, reg, tmpreg)
00c2b3b9
UD
301# else
302# define PTR_MANGLE(var) \
04a8b301 303 (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
00c2b3b9
UD
304# define PTR_DEMANGLE(var) PTR_MANGLE (var)
305# endif
306#endif
307
cfc91acd 308#endif /* linux/powerpc/powerpc64/sysdep.h */