]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep.h
e662cca4fe15458692ceded560f5c1265cd08a63
[thirdparty/glibc.git] / sysdeps / unix / sysv / linux / powerpc / powerpc64 / sysdep.h
1 /* Copyright (C) 1992-2019 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <https://www.gnu.org/licenses/>. */
17
18 /* Alan Modra <amodra@bigpond.net.au> rewrote the INLINE_SYSCALL macro */
19
20 #ifndef _LINUX_POWERPC_SYSDEP_H
21 #define _LINUX_POWERPC_SYSDEP_H 1
22
23 #include <sysdeps/unix/sysv/linux/sysdep.h>
24 #include <sysdeps/unix/powerpc/sysdep.h>
25 #include <tls.h>
26
27 /* Define __set_errno() for INLINE_SYSCALL macro below. */
28 #ifndef __ASSEMBLER__
29 #include <errno.h>
30 #endif
31
32 /* For Linux we can use the system call table in the header file
33 /usr/include/asm/unistd.h
34 of the kernel. But these symbols do not follow the SYS_* syntax
35 so we have to redefine the `SYS_ify' macro here. */
36 #undef SYS_ify
37 #define SYS_ify(syscall_name) __NR_##syscall_name
38
39 #ifdef __ASSEMBLER__
40
41 /* This seems to always be the case on PPC. */
42 # define ALIGNARG(log2) log2
43 # define ASM_SIZE_DIRECTIVE(name) .size name,.-name
44
45 #endif /* __ASSEMBLER__ */
46
47 /* This version is for internal uses when there is no desire
48 to set errno */
49 #define INTERNAL_VSYSCALL_NO_SYSCALL_FALLBACK(name, err, type, nr, args...) \
50 ({ \
51 type sc_ret = ENOSYS; \
52 \
53 __typeof (__vdso_##name) vdsop = __vdso_##name; \
54 PTR_DEMANGLE (vdsop); \
55 if (vdsop != NULL) \
56 sc_ret = \
57 INTERNAL_VSYSCALL_CALL_TYPE (vdsop, err, type, nr, ##args); \
58 else \
59 err = 1 << 28; \
60 sc_ret; \
61 })
62
63 /* List of system calls which are supported as vsyscalls. */
64 #define HAVE_CLOCK_GETRES_VSYSCALL 1
65 #define HAVE_CLOCK_GETTIME_VSYSCALL 1
66 #define HAVE_GETCPU_VSYSCALL 1
67
68 /* Define a macro which expands inline into the wrapper code for a system
69 call. This use is for internal calls that do not need to handle errors
70 normally. It will never touch errno. This returns just what the kernel
71 gave back in the non-error (CR0.SO cleared) case, otherwise (CR0.SO set)
72 the negation of the return value in the kernel gets reverted. */
73
74 #define INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, type, nr, args...) \
75 ({ \
76 register void *r0 __asm__ ("r0"); \
77 register long int r3 __asm__ ("r3"); \
78 register long int r4 __asm__ ("r4"); \
79 register long int r5 __asm__ ("r5"); \
80 register long int r6 __asm__ ("r6"); \
81 register long int r7 __asm__ ("r7"); \
82 register long int r8 __asm__ ("r8"); \
83 register type rval __asm__ ("r3"); \
84 LOADARGS_##nr (funcptr, args); \
85 __asm__ __volatile__ \
86 ("mtctr %0\n\t" \
87 "bctrl\n\t" \
88 "mfcr %0\n\t" \
89 "0:" \
90 : "+r" (r0), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), \
91 "+r" (r7), "+r" (r8) \
92 : : "r9", "r10", "r11", "r12", "cr0", "ctr", "lr", "memory"); \
93 err = (long int) r0; \
94 __asm__ __volatile__ ("" : "=r" (rval) : "r" (r3)); \
95 rval; \
96 })
97
98 #define INTERNAL_VSYSCALL_CALL(funcptr, err, nr, args...) \
99 INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, long int, nr, args)
100
101 #undef INLINE_SYSCALL
102
103 /* This version is for kernels that implement system calls that
104 behave like function calls as far as register saving. */
105 #define INLINE_SYSCALL(name, nr, args...) \
106 ({ \
107 INTERNAL_SYSCALL_DECL (sc_err); \
108 long int sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, args); \
109 if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
110 { \
111 __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \
112 sc_ret = -1L; \
113 } \
114 sc_ret; \
115 })
116
117 /* Define a macro which expands inline into the wrapper code for a system
118 call. This use is for internal calls that do not need to handle errors
119 normally. It will never touch errno. This returns just what the kernel
120 gave back in the non-error (CR0.SO cleared) case, otherwise (CR0.SO set)
121 the negation of the return value in the kernel gets reverted. */
122
123 #undef INTERNAL_SYSCALL
124 #define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
125 ({ \
126 register long int r0 __asm__ ("r0"); \
127 register long int r3 __asm__ ("r3"); \
128 register long int r4 __asm__ ("r4"); \
129 register long int r5 __asm__ ("r5"); \
130 register long int r6 __asm__ ("r6"); \
131 register long int r7 __asm__ ("r7"); \
132 register long int r8 __asm__ ("r8"); \
133 LOADARGS_##nr (name, ##args); \
134 __asm__ __volatile__ \
135 ("sc\n\t" \
136 "mfcr %0\n\t" \
137 "0:" \
138 : "=&r" (r0), \
139 "=&r" (r3), "=&r" (r4), "=&r" (r5), \
140 "=&r" (r6), "=&r" (r7), "=&r" (r8) \
141 : ASM_INPUT_##nr \
142 : "r9", "r10", "r11", "r12", \
143 "cr0", "ctr", "memory"); \
144 err = r0; \
145 r3; \
146 })
147 #define INTERNAL_SYSCALL(name, err, nr, args...) \
148 INTERNAL_SYSCALL_NCS (__NR_##name, err, nr, args)
149
150 #undef INTERNAL_SYSCALL_DECL
151 #define INTERNAL_SYSCALL_DECL(err) long int err __attribute__ ((unused))
152
153 #undef INTERNAL_SYSCALL_ERROR_P
154 #define INTERNAL_SYSCALL_ERROR_P(val, err) \
155 ((void) (val), __builtin_expect ((err) & (1 << 28), 0))
156
157 #undef INTERNAL_SYSCALL_ERRNO
158 #define INTERNAL_SYSCALL_ERRNO(val, err) (val)
159
160 #define LOADARGS_0(name, dummy) \
161 r0 = name
162 #define LOADARGS_1(name, __arg1) \
163 long int arg1 = (long int) (__arg1); \
164 LOADARGS_0(name, 0); \
165 extern void __illegally_sized_syscall_arg1 (void); \
166 if (__builtin_classify_type (__arg1) != 5 && sizeof (__arg1) > 8) \
167 __illegally_sized_syscall_arg1 (); \
168 r3 = arg1
169 #define LOADARGS_2(name, __arg1, __arg2) \
170 long int arg2 = (long int) (__arg2); \
171 LOADARGS_1(name, __arg1); \
172 extern void __illegally_sized_syscall_arg2 (void); \
173 if (__builtin_classify_type (__arg2) != 5 && sizeof (__arg2) > 8) \
174 __illegally_sized_syscall_arg2 (); \
175 r4 = arg2
176 #define LOADARGS_3(name, __arg1, __arg2, __arg3) \
177 long int arg3 = (long int) (__arg3); \
178 LOADARGS_2(name, __arg1, __arg2); \
179 extern void __illegally_sized_syscall_arg3 (void); \
180 if (__builtin_classify_type (__arg3) != 5 && sizeof (__arg3) > 8) \
181 __illegally_sized_syscall_arg3 (); \
182 r5 = arg3
183 #define LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4) \
184 long int arg4 = (long int) (__arg4); \
185 LOADARGS_3(name, __arg1, __arg2, __arg3); \
186 extern void __illegally_sized_syscall_arg4 (void); \
187 if (__builtin_classify_type (__arg4) != 5 && sizeof (__arg4) > 8) \
188 __illegally_sized_syscall_arg4 (); \
189 r6 = arg4
190 #define LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \
191 long int arg5 = (long int) (__arg5); \
192 LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4); \
193 extern void __illegally_sized_syscall_arg5 (void); \
194 if (__builtin_classify_type (__arg5) != 5 && sizeof (__arg5) > 8) \
195 __illegally_sized_syscall_arg5 (); \
196 r7 = arg5
197 #define LOADARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \
198 long int arg6 = (long int) (__arg6); \
199 LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \
200 extern void __illegally_sized_syscall_arg6 (void); \
201 if (__builtin_classify_type (__arg6) != 5 && sizeof (__arg6) > 8) \
202 __illegally_sized_syscall_arg6 (); \
203 r8 = arg6
204
205 #define ASM_INPUT_0 "0" (r0)
206 #define ASM_INPUT_1 ASM_INPUT_0, "1" (r3)
207 #define ASM_INPUT_2 ASM_INPUT_1, "2" (r4)
208 #define ASM_INPUT_3 ASM_INPUT_2, "3" (r5)
209 #define ASM_INPUT_4 ASM_INPUT_3, "4" (r6)
210 #define ASM_INPUT_5 ASM_INPUT_4, "5" (r7)
211 #define ASM_INPUT_6 ASM_INPUT_5, "6" (r8)
212
213
214 /* Pointer mangling support. */
215 #if IS_IN (rtld)
216 /* We cannot use the thread descriptor because in ld.so we use setjmp
217 earlier than the descriptor is initialized. */
218 #else
219 # ifdef __ASSEMBLER__
220 # define PTR_MANGLE(reg, tmpreg) \
221 ld tmpreg,POINTER_GUARD(r13); \
222 xor reg,tmpreg,reg
223 # define PTR_MANGLE2(reg, tmpreg) \
224 xor reg,tmpreg,reg
225 # define PTR_MANGLE3(destreg, reg, tmpreg) \
226 ld tmpreg,POINTER_GUARD(r13); \
227 xor destreg,tmpreg,reg
228 # define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg)
229 # define PTR_DEMANGLE2(reg, tmpreg) PTR_MANGLE2 (reg, tmpreg)
230 # define PTR_DEMANGLE3(destreg, reg, tmpreg) PTR_MANGLE3 (destreg, reg, tmpreg)
231 # else
232 # define PTR_MANGLE(var) \
233 (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
234 # define PTR_DEMANGLE(var) PTR_MANGLE (var)
235 # endif
236 #endif
237
238 /* In the PowerPC64 ABI, the unadorned F_GETLK* opcodes should be used
239 even by largefile64 code. */
240 #define FCNTL_ADJUST_CMD(__cmd) \
241 ({ int cmd_ = (__cmd); \
242 if (cmd_ >= F_GETLK64 && cmd_ <= F_SETLKW64) \
243 cmd_ -= F_GETLK64 - F_GETLK; \
244 cmd_; })
245
246
247 #endif /* linux/powerpc/powerpc64/sysdep.h */