]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep.h
Remove PREPARE_VERSION and PREPARE_VERSION_KNOW
[thirdparty/glibc.git] / sysdeps / unix / sysv / linux / powerpc / powerpc64 / sysdep.h
CommitLineData
04277e02 1/* Copyright (C) 1992-2019 Free Software Foundation, Inc.
cfc91acd
RM
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
59ba27a6 15 License along with the GNU C Library; if not, see
5a82c748 16 <https://www.gnu.org/licenses/>. */
1e2f8718 17
cfc91acd
RM
18/* Alan Modra <amodra@bigpond.net.au> rewrote the INLINE_SYSCALL macro */
19
20#ifndef _LINUX_POWERPC_SYSDEP_H
21#define _LINUX_POWERPC_SYSDEP_H 1
22
b8386c28 23#include <sysdeps/unix/sysv/linux/powerpc/sysdep.h>
fb1cf108 24#include <sysdeps/unix/sysv/linux/sysdep.h>
cfc91acd 25#include <sysdeps/unix/powerpc/sysdep.h>
00c2b3b9 26#include <tls.h>
cfc91acd
RM
27
28/* Define __set_errno() for INLINE_SYSCALL macro below. */
29#ifndef __ASSEMBLER__
30#include <errno.h>
31#endif
32
33/* For Linux we can use the system call table in the header file
34 /usr/include/asm/unistd.h
35 of the kernel. But these symbols do not follow the SYS_* syntax
36 so we have to redefine the `SYS_ify' macro here. */
37#undef SYS_ify
965a54a4 38#define SYS_ify(syscall_name) __NR_##syscall_name
cfc91acd
RM
39
40#ifdef __ASSEMBLER__
41
42/* This seems to always be the case on PPC. */
8c2e201b 43# define ALIGNARG(log2) log2
8c2e201b 44# define ASM_SIZE_DIRECTIVE(name) .size name,.-name
cfc91acd 45
8c2e201b
UD
46#endif /* __ASSEMBLER__ */
47
8c2e201b
UD
48/* This version is for internal uses when there is no desire
49 to set errno */
471a1672 50#define INTERNAL_VSYSCALL_NO_SYSCALL_FALLBACK(name, err, type, nr, args...) \
8c2e201b 51 ({ \
471a1672 52 type sc_ret = ENOSYS; \
8c2e201b 53 \
f534255e
AZ
54 __typeof (__vdso_##name) vdsop = __vdso_##name; \
55 PTR_DEMANGLE (vdsop); \
56 if (vdsop != NULL) \
57 sc_ret = \
58 INTERNAL_VSYSCALL_CALL_TYPE (vdsop, err, type, nr, ##args); \
8c2e201b
UD
59 else \
60 err = 1 << 28; \
61 sc_ret; \
62 })
63
8c2e201b
UD
64/* Define a macro which expands inline into the wrapper code for a system
65 call. This use is for internal calls that do not need to handle errors
66 normally. It will never touch errno. This returns just what the kernel
67 gave back in the non-error (CR0.SO cleared) case, otherwise (CR0.SO set)
68 the negation of the return value in the kernel gets reverted. */
69
f534255e 70#define INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, type, nr, args...) \
8c2e201b
UD
71 ({ \
72 register void *r0 __asm__ ("r0"); \
73 register long int r3 __asm__ ("r3"); \
74 register long int r4 __asm__ ("r4"); \
75 register long int r5 __asm__ ("r5"); \
76 register long int r6 __asm__ ("r6"); \
77 register long int r7 __asm__ ("r7"); \
78 register long int r8 __asm__ ("r8"); \
471a1672 79 register type rval __asm__ ("r3"); \
8c2e201b
UD
80 LOADARGS_##nr (funcptr, args); \
81 __asm__ __volatile__ \
82 ("mtctr %0\n\t" \
83 "bctrl\n\t" \
84 "mfcr %0\n\t" \
85 "0:" \
471a1672
AB
86 : "+r" (r0), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), \
87 "+r" (r7), "+r" (r8) \
88 : : "r9", "r10", "r11", "r12", "cr0", "ctr", "lr", "memory"); \
89 err = (long int) r0; \
90 __asm__ __volatile__ ("" : "=r" (rval) : "r" (r3)); \
91 rval; \
8c2e201b 92 })
cfc91acd 93
f534255e
AZ
94#define INTERNAL_VSYSCALL_CALL(funcptr, err, nr, args...) \
95 INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, long int, nr, args)
96
cfc91acd 97#undef INLINE_SYSCALL
cfc91acd 98
cfc91acd
RM
99/* This version is for kernels that implement system calls that
100 behave like function calls as far as register saving. */
1e2f8718
UD
101#define INLINE_SYSCALL(name, nr, args...) \
102 ({ \
574b892e 103 INTERNAL_SYSCALL_DECL (sc_err); \
c67a469f 104 long int sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, args); \
574b892e
UD
105 if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
106 { \
107 __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \
108 sc_ret = -1L; \
109 } \
110 sc_ret; \
cfc91acd
RM
111 })
112
aff4519d
UD
113/* Define a macro which expands inline into the wrapper code for a system
114 call. This use is for internal calls that do not need to handle errors
115 normally. It will never touch errno. This returns just what the kernel
116 gave back in the non-error (CR0.SO cleared) case, otherwise (CR0.SO set)
117 the negation of the return value in the kernel gets reverted. */
118
1e2f8718 119#undef INTERNAL_SYSCALL
2edb61e3 120#define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
aff4519d 121 ({ \
c67a469f
UD
122 register long int r0 __asm__ ("r0"); \
123 register long int r3 __asm__ ("r3"); \
124 register long int r4 __asm__ ("r4"); \
125 register long int r5 __asm__ ("r5"); \
126 register long int r6 __asm__ ("r6"); \
127 register long int r7 __asm__ ("r7"); \
128 register long int r8 __asm__ ("r8"); \
8c2e201b 129 LOADARGS_##nr (name, ##args); \
aff4519d
UD
130 __asm__ __volatile__ \
131 ("sc\n\t" \
574b892e 132 "mfcr %0\n\t" \
aff4519d
UD
133 "0:" \
134 : "=&r" (r0), \
1e2f8718
UD
135 "=&r" (r3), "=&r" (r4), "=&r" (r5), \
136 "=&r" (r6), "=&r" (r7), "=&r" (r8) \
aff4519d 137 : ASM_INPUT_##nr \
1e2f8718 138 : "r9", "r10", "r11", "r12", \
aff4519d 139 "cr0", "ctr", "memory"); \
574b892e 140 err = r0; \
d55fd7a5 141 r3; \
aff4519d 142 })
2edb61e3 143#define INTERNAL_SYSCALL(name, err, nr, args...) \
8c2e201b 144 INTERNAL_SYSCALL_NCS (__NR_##name, err, nr, args)
574b892e 145
1e2f8718 146#undef INTERNAL_SYSCALL_DECL
f16946dd 147#define INTERNAL_SYSCALL_DECL(err) long int err __attribute__ ((unused))
1e2f8718
UD
148
149#undef INTERNAL_SYSCALL_ERROR_P
150#define INTERNAL_SYSCALL_ERROR_P(val, err) \
5949daa0 151 ((void) (val), __builtin_expect ((err) & (1 << 28), 0))
574b892e 152
1e2f8718
UD
153#undef INTERNAL_SYSCALL_ERRNO
154#define INTERNAL_SYSCALL_ERRNO(val, err) (val)
aff4519d
UD
155
156#define LOADARGS_0(name, dummy) \
2edb61e3 157 r0 = name
c67a469f
UD
158#define LOADARGS_1(name, __arg1) \
159 long int arg1 = (long int) (__arg1); \
aff4519d 160 LOADARGS_0(name, 0); \
2edb61e3 161 extern void __illegally_sized_syscall_arg1 (void); \
c67a469f 162 if (__builtin_classify_type (__arg1) != 5 && sizeof (__arg1) > 8) \
2edb61e3 163 __illegally_sized_syscall_arg1 (); \
c67a469f
UD
164 r3 = arg1
165#define LOADARGS_2(name, __arg1, __arg2) \
166 long int arg2 = (long int) (__arg2); \
167 LOADARGS_1(name, __arg1); \
2edb61e3 168 extern void __illegally_sized_syscall_arg2 (void); \
c67a469f 169 if (__builtin_classify_type (__arg2) != 5 && sizeof (__arg2) > 8) \
2edb61e3 170 __illegally_sized_syscall_arg2 (); \
c67a469f
UD
171 r4 = arg2
172#define LOADARGS_3(name, __arg1, __arg2, __arg3) \
173 long int arg3 = (long int) (__arg3); \
174 LOADARGS_2(name, __arg1, __arg2); \
2edb61e3 175 extern void __illegally_sized_syscall_arg3 (void); \
c67a469f 176 if (__builtin_classify_type (__arg3) != 5 && sizeof (__arg3) > 8) \
2edb61e3 177 __illegally_sized_syscall_arg3 (); \
c67a469f
UD
178 r5 = arg3
179#define LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4) \
180 long int arg4 = (long int) (__arg4); \
181 LOADARGS_3(name, __arg1, __arg2, __arg3); \
2edb61e3 182 extern void __illegally_sized_syscall_arg4 (void); \
c67a469f 183 if (__builtin_classify_type (__arg4) != 5 && sizeof (__arg4) > 8) \
2edb61e3 184 __illegally_sized_syscall_arg4 (); \
c67a469f
UD
185 r6 = arg4
186#define LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \
187 long int arg5 = (long int) (__arg5); \
188 LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4); \
2edb61e3 189 extern void __illegally_sized_syscall_arg5 (void); \
c67a469f 190 if (__builtin_classify_type (__arg5) != 5 && sizeof (__arg5) > 8) \
2edb61e3 191 __illegally_sized_syscall_arg5 (); \
c67a469f
UD
192 r7 = arg5
193#define LOADARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \
194 long int arg6 = (long int) (__arg6); \
195 LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \
2edb61e3 196 extern void __illegally_sized_syscall_arg6 (void); \
c67a469f 197 if (__builtin_classify_type (__arg6) != 5 && sizeof (__arg6) > 8) \
2edb61e3 198 __illegally_sized_syscall_arg6 (); \
c67a469f 199 r8 = arg6
cfc91acd
RM
200
201#define ASM_INPUT_0 "0" (r0)
202#define ASM_INPUT_1 ASM_INPUT_0, "1" (r3)
203#define ASM_INPUT_2 ASM_INPUT_1, "2" (r4)
204#define ASM_INPUT_3 ASM_INPUT_2, "3" (r5)
205#define ASM_INPUT_4 ASM_INPUT_3, "4" (r6)
206#define ASM_INPUT_5 ASM_INPUT_4, "5" (r7)
207#define ASM_INPUT_6 ASM_INPUT_5, "6" (r8)
208
00c2b3b9
UD
209
210/* Pointer mangling support. */
a3848485 211#if IS_IN (rtld)
00c2b3b9
UD
212/* We cannot use the thread descriptor because in ld.so we use setjmp
213 earlier than the descriptor is initialized. */
214#else
215# ifdef __ASSEMBLER__
216# define PTR_MANGLE(reg, tmpreg) \
217 ld tmpreg,POINTER_GUARD(r13); \
218 xor reg,tmpreg,reg
5ad77144
UD
219# define PTR_MANGLE2(reg, tmpreg) \
220 xor reg,tmpreg,reg
4a132246
UD
221# define PTR_MANGLE3(destreg, reg, tmpreg) \
222 ld tmpreg,POINTER_GUARD(r13); \
223 xor destreg,tmpreg,reg
00c2b3b9 224# define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg)
5ad77144 225# define PTR_DEMANGLE2(reg, tmpreg) PTR_MANGLE2 (reg, tmpreg)
4a132246 226# define PTR_DEMANGLE3(destreg, reg, tmpreg) PTR_MANGLE3 (destreg, reg, tmpreg)
00c2b3b9
UD
227# else
228# define PTR_MANGLE(var) \
04a8b301 229 (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
00c2b3b9
UD
230# define PTR_DEMANGLE(var) PTR_MANGLE (var)
231# endif
232#endif
233
329ea513
ZW
234/* In the PowerPC64 ABI, the unadorned F_GETLK* opcodes should be used
235 even by largefile64 code. */
236#define FCNTL_ADJUST_CMD(__cmd) \
237 ({ int cmd_ = (__cmd); \
238 if (cmd_ >= F_GETLK64 && cmd_ <= F_SETLKW64) \
239 cmd_ -= F_GETLK64 - F_GETLK; \
240 cmd_; })
241
242
cfc91acd 243#endif /* linux/powerpc/powerpc64/sysdep.h */