]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep.h
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / sysdeps / unix / sysv / linux / powerpc / powerpc64 / sysdep.h
1 /* Copyright (C) 1992-2017 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <http://www.gnu.org/licenses/>. */
17
18 /* Alan Modra <amodra@bigpond.net.au> rewrote the INLINE_SYSCALL macro */
19
20 #ifndef _LINUX_POWERPC_SYSDEP_H
21 #define _LINUX_POWERPC_SYSDEP_H 1
22
23 #include <sysdeps/unix/sysv/linux/sysdep.h>
24 #include <sysdeps/unix/powerpc/sysdep.h>
25 #include <tls.h>
26
27 /* Define __set_errno() for INLINE_SYSCALL macro below. */
28 #ifndef __ASSEMBLER__
29 #include <errno.h>
30 #endif
31
32 /* For Linux we can use the system call table in the header file
33 /usr/include/asm/unistd.h
34 of the kernel. But these symbols do not follow the SYS_* syntax
35 so we have to redefine the `SYS_ify' macro here. */
36 #undef SYS_ify
37 #define SYS_ify(syscall_name) __NR_##syscall_name
38
39 #ifdef __ASSEMBLER__
40
41 /* This seems to always be the case on PPC. */
42 # define ALIGNARG(log2) log2
43 # define ASM_SIZE_DIRECTIVE(name) .size name,.-name
44
45 #endif /* __ASSEMBLER__ */
46
47 /* This version is for internal uses when there is no desire
48 to set errno */
49 #define INTERNAL_VSYSCALL_NO_SYSCALL_FALLBACK(name, err, type, nr, args...) \
50 ({ \
51 type sc_ret = ENOSYS; \
52 \
53 __typeof (__vdso_##name) vdsop = __vdso_##name; \
54 PTR_DEMANGLE (vdsop); \
55 if (vdsop != NULL) \
56 sc_ret = \
57 INTERNAL_VSYSCALL_CALL_TYPE (vdsop, err, type, nr, ##args); \
58 else \
59 err = 1 << 28; \
60 sc_ret; \
61 })
62
63 /* List of system calls which are supported as vsyscalls. */
64 #define HAVE_CLOCK_GETRES_VSYSCALL 1
65 #define HAVE_CLOCK_GETTIME_VSYSCALL 1
66 #define HAVE_GETCPU_VSYSCALL 1
67
68 /* Define a macro which expands inline into the wrapper code for a system
69 call. This use is for internal calls that do not need to handle errors
70 normally. It will never touch errno. This returns just what the kernel
71 gave back in the non-error (CR0.SO cleared) case, otherwise (CR0.SO set)
72 the negation of the return value in the kernel gets reverted. */
73
74 #define INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, type, nr, args...) \
75 ({ \
76 register void *r0 __asm__ ("r0"); \
77 register long int r3 __asm__ ("r3"); \
78 register long int r4 __asm__ ("r4"); \
79 register long int r5 __asm__ ("r5"); \
80 register long int r6 __asm__ ("r6"); \
81 register long int r7 __asm__ ("r7"); \
82 register long int r8 __asm__ ("r8"); \
83 register type rval __asm__ ("r3"); \
84 LOADARGS_##nr (funcptr, args); \
85 __asm__ __volatile__ \
86 ("mtctr %0\n\t" \
87 "bctrl\n\t" \
88 "mfcr %0\n\t" \
89 "0:" \
90 : "+r" (r0), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), \
91 "+r" (r7), "+r" (r8) \
92 : : "r9", "r10", "r11", "r12", "cr0", "ctr", "lr", "memory"); \
93 err = (long int) r0; \
94 __asm__ __volatile__ ("" : "=r" (rval) : "r" (r3)); \
95 rval; \
96 })
97
98 #define INTERNAL_VSYSCALL_CALL(funcptr, err, nr, args...) \
99 INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, long int, nr, args)
100
101 #undef INLINE_SYSCALL
102
103 /* This version is for kernels that implement system calls that
104 behave like function calls as far as register saving. */
105 #define INLINE_SYSCALL(name, nr, args...) \
106 ({ \
107 INTERNAL_SYSCALL_DECL (sc_err); \
108 long int sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, args); \
109 if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
110 { \
111 __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \
112 sc_ret = -1L; \
113 } \
114 sc_ret; \
115 })
116
117 /* Define a macro which expands inline into the wrapper code for a system
118 call. This use is for internal calls that do not need to handle errors
119 normally. It will never touch errno. This returns just what the kernel
120 gave back in the non-error (CR0.SO cleared) case, otherwise (CR0.SO set)
121 the negation of the return value in the kernel gets reverted. */
122
123 #undef INTERNAL_SYSCALL
124 #define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
125 ({ \
126 register long int r0 __asm__ ("r0"); \
127 register long int r3 __asm__ ("r3"); \
128 register long int r4 __asm__ ("r4"); \
129 register long int r5 __asm__ ("r5"); \
130 register long int r6 __asm__ ("r6"); \
131 register long int r7 __asm__ ("r7"); \
132 register long int r8 __asm__ ("r8"); \
133 LOADARGS_##nr (name, ##args); \
134 ABORT_TRANSACTION; \
135 __asm__ __volatile__ \
136 ("sc\n\t" \
137 "mfcr %0\n\t" \
138 "0:" \
139 : "=&r" (r0), \
140 "=&r" (r3), "=&r" (r4), "=&r" (r5), \
141 "=&r" (r6), "=&r" (r7), "=&r" (r8) \
142 : ASM_INPUT_##nr \
143 : "r9", "r10", "r11", "r12", \
144 "cr0", "ctr", "memory"); \
145 err = r0; \
146 r3; \
147 })
148 #define INTERNAL_SYSCALL(name, err, nr, args...) \
149 INTERNAL_SYSCALL_NCS (__NR_##name, err, nr, args)
150
151 #undef INTERNAL_SYSCALL_DECL
152 #define INTERNAL_SYSCALL_DECL(err) long int err __attribute__ ((unused))
153
154 #undef INTERNAL_SYSCALL_ERROR_P
155 #define INTERNAL_SYSCALL_ERROR_P(val, err) \
156 ((void) (val), __builtin_expect ((err) & (1 << 28), 0))
157
158 #undef INTERNAL_SYSCALL_ERRNO
159 #define INTERNAL_SYSCALL_ERRNO(val, err) (val)
160
161 #define LOADARGS_0(name, dummy) \
162 r0 = name
163 #define LOADARGS_1(name, __arg1) \
164 long int arg1 = (long int) (__arg1); \
165 LOADARGS_0(name, 0); \
166 extern void __illegally_sized_syscall_arg1 (void); \
167 if (__builtin_classify_type (__arg1) != 5 && sizeof (__arg1) > 8) \
168 __illegally_sized_syscall_arg1 (); \
169 r3 = arg1
170 #define LOADARGS_2(name, __arg1, __arg2) \
171 long int arg2 = (long int) (__arg2); \
172 LOADARGS_1(name, __arg1); \
173 extern void __illegally_sized_syscall_arg2 (void); \
174 if (__builtin_classify_type (__arg2) != 5 && sizeof (__arg2) > 8) \
175 __illegally_sized_syscall_arg2 (); \
176 r4 = arg2
177 #define LOADARGS_3(name, __arg1, __arg2, __arg3) \
178 long int arg3 = (long int) (__arg3); \
179 LOADARGS_2(name, __arg1, __arg2); \
180 extern void __illegally_sized_syscall_arg3 (void); \
181 if (__builtin_classify_type (__arg3) != 5 && sizeof (__arg3) > 8) \
182 __illegally_sized_syscall_arg3 (); \
183 r5 = arg3
184 #define LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4) \
185 long int arg4 = (long int) (__arg4); \
186 LOADARGS_3(name, __arg1, __arg2, __arg3); \
187 extern void __illegally_sized_syscall_arg4 (void); \
188 if (__builtin_classify_type (__arg4) != 5 && sizeof (__arg4) > 8) \
189 __illegally_sized_syscall_arg4 (); \
190 r6 = arg4
191 #define LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \
192 long int arg5 = (long int) (__arg5); \
193 LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4); \
194 extern void __illegally_sized_syscall_arg5 (void); \
195 if (__builtin_classify_type (__arg5) != 5 && sizeof (__arg5) > 8) \
196 __illegally_sized_syscall_arg5 (); \
197 r7 = arg5
198 #define LOADARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \
199 long int arg6 = (long int) (__arg6); \
200 LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \
201 extern void __illegally_sized_syscall_arg6 (void); \
202 if (__builtin_classify_type (__arg6) != 5 && sizeof (__arg6) > 8) \
203 __illegally_sized_syscall_arg6 (); \
204 r8 = arg6
205
206 #define ASM_INPUT_0 "0" (r0)
207 #define ASM_INPUT_1 ASM_INPUT_0, "1" (r3)
208 #define ASM_INPUT_2 ASM_INPUT_1, "2" (r4)
209 #define ASM_INPUT_3 ASM_INPUT_2, "3" (r5)
210 #define ASM_INPUT_4 ASM_INPUT_3, "4" (r6)
211 #define ASM_INPUT_5 ASM_INPUT_4, "5" (r7)
212 #define ASM_INPUT_6 ASM_INPUT_5, "6" (r8)
213
214
215 /* Pointer mangling support. */
216 #if IS_IN (rtld)
217 /* We cannot use the thread descriptor because in ld.so we use setjmp
218 earlier than the descriptor is initialized. */
219 #else
220 # ifdef __ASSEMBLER__
221 # define PTR_MANGLE(reg, tmpreg) \
222 ld tmpreg,POINTER_GUARD(r13); \
223 xor reg,tmpreg,reg
224 # define PTR_MANGLE2(reg, tmpreg) \
225 xor reg,tmpreg,reg
226 # define PTR_MANGLE3(destreg, reg, tmpreg) \
227 ld tmpreg,POINTER_GUARD(r13); \
228 xor destreg,tmpreg,reg
229 # define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg)
230 # define PTR_DEMANGLE2(reg, tmpreg) PTR_MANGLE2 (reg, tmpreg)
231 # define PTR_DEMANGLE3(destreg, reg, tmpreg) PTR_MANGLE3 (destreg, reg, tmpreg)
232 # else
233 # define PTR_MANGLE(var) \
234 (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
235 # define PTR_DEMANGLE(var) PTR_MANGLE (var)
236 # endif
237 #endif
238
239 #endif /* linux/powerpc/powerpc64/sysdep.h */