]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h
powerpc: Simplify vsyscall internal macros
[thirdparty/glibc.git] / sysdeps / unix / sysv / linux / powerpc / powerpc32 / sysdep.h
CommitLineData
04277e02 1/* Copyright (C) 1992-2019 Free Software Foundation, Inc.
4cca6b86
UD
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
41bdb6e2
AJ
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
4cca6b86
UD
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
41bdb6e2 12 Lesser General Public License for more details.
4cca6b86 13
41bdb6e2 14 You should have received a copy of the GNU Lesser General Public
59ba27a6 15 License along with the GNU C Library; if not, see
5a82c748 16 <https://www.gnu.org/licenses/>. */
4cca6b86 17
924102e7
UD
18#ifndef _LINUX_POWERPC_SYSDEP_H
19#define _LINUX_POWERPC_SYSDEP_H 1
20
b8386c28 21#include <sysdeps/unix/sysv/linux/powerpc/sysdep.h>
fb1cf108 22#include <sysdeps/unix/sysv/linux/sysdep.h>
be9cd93a 23#include <sysdeps/unix/powerpc/sysdep.h>
00c2b3b9 24#include <tls.h>
4cca6b86
UD
25
26/* For Linux we can use the system call table in the header file
27 /usr/include/asm/unistd.h
28 of the kernel. But these symbols do not follow the SYS_* syntax
29 so we have to redefine the `SYS_ify' macro here. */
30#undef SYS_ify
965a54a4 31#define SYS_ify(syscall_name) __NR_##syscall_name
4cca6b86 32
735d67f2
RM
33#ifndef __ASSEMBLER__
34
35# include <errno.h>
36
8c2e201b
UD
37/* Define a macro which expands inline into the wrapper code for a VDSO
38 call. This use is for internal calls that do not need to handle errors
39 normally. It will never touch errno.
40 On powerpc a system call basically clobbers the same registers like a
41 function call, with the exception of LR (which is needed for the
42 "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal
43 an error return status). */
986a5064 44# define INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, type, nr, args...) \
8c2e201b
UD
45 ({ \
46 register void *r0 __asm__ ("r0"); \
47 register long int r3 __asm__ ("r3"); \
48 register long int r4 __asm__ ("r4"); \
49 register long int r5 __asm__ ("r5"); \
50 register long int r6 __asm__ ("r6"); \
51 register long int r7 __asm__ ("r7"); \
52 register long int r8 __asm__ ("r8"); \
53 register long int r9 __asm__ ("r9"); \
54 register long int r10 __asm__ ("r10"); \
55 register long int r11 __asm__ ("r11"); \
56 register long int r12 __asm__ ("r12"); \
471a1672 57 register type rval __asm__ ("r3"); \
8c2e201b
UD
58 LOADARGS_##nr (funcptr, args); \
59 __asm__ __volatile__ \
60 ("mtctr %0\n\t" \
61 "bctrl\n\t" \
62 "mfcr %0" \
471a1672
AB
63 : "+r" (r0), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), \
64 "+r" (r8), "+r" (r9), "+r" (r10), "+r" (r11), "+r" (r12) \
65 : : "cr0", "ctr", "lr", "memory"); \
8c2e201b 66 err = (long int) r0; \
471a1672
AB
67 __asm__ __volatile__ ("" : "=r" (rval) : "r" (r3), "r" (r4)); \
68 rval; \
8c2e201b
UD
69 })
70
f534255e 71#define INTERNAL_VSYSCALL_CALL(funcptr, err, nr, args...) \
986a5064 72 INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, long int, nr, args)
f534255e 73
735d67f2 74# undef INLINE_SYSCALL
704bb2fd
RM
75# define INLINE_SYSCALL(name, nr, args...) \
76 ({ \
b398ae3c 77 INTERNAL_SYSCALL_DECL (sc_err); \
c67a469f 78 long int sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, args); \
b398ae3c 79 if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
704bb2fd 80 { \
b398ae3c
UD
81 __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \
82 sc_ret = -1L; \
704bb2fd 83 } \
b398ae3c 84 sc_ret; \
735d67f2
RM
85 })
86
73e9ae88
UD
87/* Define a macro which expands inline into the wrapper code for a system
88 call. This use is for internal calls that do not need to handle errors
b398ae3c
UD
89 normally. It will never touch errno.
90 On powerpc a system call basically clobbers the same registers like a
91 function call, with the exception of LR (which is needed for the
92 "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal
93 an error return status). */
73e9ae88 94
6aca81bb 95# undef INTERNAL_SYSCALL_DECL
20f0018d 96# define INTERNAL_SYSCALL_DECL(err) long int err __attribute__ ((unused))
6aca81bb 97
52f3d213 98# undef INTERNAL_SYSCALL
2edb61e3 99# define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
73e9ae88 100 ({ \
c67a469f
UD
101 register long int r0 __asm__ ("r0"); \
102 register long int r3 __asm__ ("r3"); \
103 register long int r4 __asm__ ("r4"); \
104 register long int r5 __asm__ ("r5"); \
105 register long int r6 __asm__ ("r6"); \
106 register long int r7 __asm__ ("r7"); \
107 register long int r8 __asm__ ("r8"); \
108 register long int r9 __asm__ ("r9"); \
109 register long int r10 __asm__ ("r10"); \
110 register long int r11 __asm__ ("r11"); \
111 register long int r12 __asm__ ("r12"); \
471a1672 112 LOADARGS_##nr(name, args); \
73e9ae88 113 __asm__ __volatile__ \
b398ae3c
UD
114 ("sc \n\t" \
115 "mfcr %0" \
73e9ae88
UD
116 : "=&r" (r0), \
117 "=&r" (r3), "=&r" (r4), "=&r" (r5), "=&r" (r6), "=&r" (r7), \
118 "=&r" (r8), "=&r" (r9), "=&r" (r10), "=&r" (r11), "=&r" (r12) \
119 : ASM_INPUT_##nr \
120 : "cr0", "ctr", "memory"); \
b398ae3c 121 err = r0; \
73e9ae88
UD
122 (int) r3; \
123 })
2edb61e3
UD
124# define INTERNAL_SYSCALL(name, err, nr, args...) \
125 INTERNAL_SYSCALL_NCS (__NR_##name, err, nr, ##args)
6aca81bb 126
52f3d213 127# undef INTERNAL_SYSCALL_ERROR_P
6aca81bb 128# define INTERNAL_SYSCALL_ERROR_P(val, err) \
a2274a08 129 ((void) (val), __builtin_expect ((err) & (1 << 28), 0))
6aca81bb 130
52f3d213 131# undef INTERNAL_SYSCALL_ERRNO
b398ae3c 132# define INTERNAL_SYSCALL_ERRNO(val, err) (val)
73e9ae88 133
8c2e201b 134# define LOADARGS_0(name, dummy) \
2edb61e3 135 r0 = name
c67a469f
UD
136# define LOADARGS_1(name, __arg1) \
137 long int arg1 = (long int) (__arg1); \
8c2e201b 138 LOADARGS_0(name, 0); \
2edb61e3 139 extern void __illegally_sized_syscall_arg1 (void); \
c67a469f 140 if (__builtin_classify_type (__arg1) != 5 && sizeof (__arg1) > 4) \
2edb61e3 141 __illegally_sized_syscall_arg1 (); \
c67a469f
UD
142 r3 = arg1
143# define LOADARGS_2(name, __arg1, __arg2) \
144 long int arg2 = (long int) (__arg2); \
145 LOADARGS_1(name, __arg1); \
2edb61e3 146 extern void __illegally_sized_syscall_arg2 (void); \
c67a469f 147 if (__builtin_classify_type (__arg2) != 5 && sizeof (__arg2) > 4) \
2edb61e3 148 __illegally_sized_syscall_arg2 (); \
c67a469f
UD
149 r4 = arg2
150# define LOADARGS_3(name, __arg1, __arg2, __arg3) \
151 long int arg3 = (long int) (__arg3); \
152 LOADARGS_2(name, __arg1, __arg2); \
2edb61e3 153 extern void __illegally_sized_syscall_arg3 (void); \
c67a469f 154 if (__builtin_classify_type (__arg3) != 5 && sizeof (__arg3) > 4) \
2edb61e3 155 __illegally_sized_syscall_arg3 (); \
c67a469f
UD
156 r5 = arg3
157# define LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4) \
158 long int arg4 = (long int) (__arg4); \
159 LOADARGS_3(name, __arg1, __arg2, __arg3); \
2edb61e3 160 extern void __illegally_sized_syscall_arg4 (void); \
c67a469f 161 if (__builtin_classify_type (__arg4) != 5 && sizeof (__arg4) > 4) \
2edb61e3 162 __illegally_sized_syscall_arg4 (); \
c67a469f
UD
163 r6 = arg4
164# define LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \
165 long int arg5 = (long int) (__arg5); \
166 LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4); \
2edb61e3 167 extern void __illegally_sized_syscall_arg5 (void); \
c67a469f 168 if (__builtin_classify_type (__arg5) != 5 && sizeof (__arg5) > 4) \
2edb61e3 169 __illegally_sized_syscall_arg5 (); \
c67a469f
UD
170 r7 = arg5
171# define LOADARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \
172 long int arg6 = (long int) (__arg6); \
173 LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \
2edb61e3 174 extern void __illegally_sized_syscall_arg6 (void); \
c67a469f 175 if (__builtin_classify_type (__arg6) != 5 && sizeof (__arg6) > 4) \
2edb61e3 176 __illegally_sized_syscall_arg6 (); \
c67a469f 177 r8 = arg6
735d67f2 178
704bb2fd
RM
179# define ASM_INPUT_0 "0" (r0)
180# define ASM_INPUT_1 ASM_INPUT_0, "1" (r3)
181# define ASM_INPUT_2 ASM_INPUT_1, "2" (r4)
182# define ASM_INPUT_3 ASM_INPUT_2, "3" (r5)
183# define ASM_INPUT_4 ASM_INPUT_3, "4" (r6)
184# define ASM_INPUT_5 ASM_INPUT_4, "5" (r7)
185# define ASM_INPUT_6 ASM_INPUT_5, "6" (r8)
735d67f2
RM
186
187#endif /* __ASSEMBLER__ */
188
189
00c2b3b9 190/* Pointer mangling support. */
a3848485 191#if IS_IN (rtld)
00c2b3b9
UD
192/* We cannot use the thread descriptor because in ld.so we use setjmp
193 earlier than the descriptor is initialized. */
194#else
195# ifdef __ASSEMBLER__
196# define PTR_MANGLE(reg, tmpreg) \
197 lwz tmpreg,POINTER_GUARD(r2); \
198 xor reg,tmpreg,reg
5ad77144 199# define PTR_MANGLE2(reg, tmpreg) \
4e54d7e4 200 xor reg,tmpreg,reg
4a132246
UD
201# define PTR_MANGLE3(destreg, reg, tmpreg) \
202 lwz tmpreg,POINTER_GUARD(r2); \
203 xor destreg,tmpreg,reg
5ad77144
UD
204# define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg)
205# define PTR_DEMANGLE2(reg, tmpreg) PTR_MANGLE2 (reg, tmpreg)
4a132246 206# define PTR_DEMANGLE3(destreg, reg, tmpreg) PTR_MANGLE3 (destreg, reg, tmpreg)
00c2b3b9
UD
207# else
208# define PTR_MANGLE(var) \
db169ed5 209 (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
00c2b3b9
UD
210# define PTR_DEMANGLE(var) PTR_MANGLE (var)
211# endif
212#endif
213
b398ae3c 214#endif /* linux/powerpc/powerpc32/sysdep.h */