]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/unix/sysv/linux/mips/mips64/n32/sysdep.h
Prefer https to http for gnu.org and fsf.org URLs
[thirdparty/glibc.git] / sysdeps / unix / sysv / linux / mips / mips64 / n32 / sysdep.h
1 /* Copyright (C) 2000-2019 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library. If not, see
16 <https://www.gnu.org/licenses/>. */
17
18 #ifndef _LINUX_MIPS_SYSDEP_H
19 #define _LINUX_MIPS_SYSDEP_H 1
20
21 /* There is some commonality. */
22 #include <sysdeps/unix/sysv/linux/sysdep.h>
23 #include <sysdeps/unix/mips/mips64/n32/sysdep.h>
24
25 #include <tls.h>
26
27 /* In order to get __set_errno() definition in INLINE_SYSCALL. */
28 #ifndef __ASSEMBLER__
29 #include <errno.h>
30 #endif
31
32 /* For Linux we can use the system call table in the header file
33 /usr/include/asm/unistd.h
34 of the kernel. But these symbols do not follow the SYS_* syntax
35 so we have to redefine the `SYS_ify' macro here. */
36 #undef SYS_ify
37 #define SYS_ify(syscall_name) __NR_##syscall_name
38
39 #ifdef __ASSEMBLER__
40
41 /* We don't want the label for the error handler to be visible in the symbol
42 table when we define it here. */
43 # define SYSCALL_ERROR_LABEL 99b
44
45 #else /* ! __ASSEMBLER__ */
46
47 /* Convert X to a long long, without losing any bits if it is one
48 already or warning if it is a 32-bit pointer. */
49 #define ARGIFY(X) ((long long) (__typeof__ ((X) - (X))) (X))
50
51 /* Define a macro which expands into the inline wrapper code for a system
52 call. */
53 #undef INLINE_SYSCALL
54 #define INLINE_SYSCALL(name, nr, args...) \
55 ({ INTERNAL_SYSCALL_DECL (_sc_err); \
56 long result_var = INTERNAL_SYSCALL (name, _sc_err, nr, args); \
57 if ( INTERNAL_SYSCALL_ERROR_P (result_var, _sc_err) ) \
58 { \
59 __set_errno (INTERNAL_SYSCALL_ERRNO (result_var, _sc_err)); \
60 result_var = -1L; \
61 } \
62 result_var; })
63
64 #undef INTERNAL_SYSCALL_DECL
65 #define INTERNAL_SYSCALL_DECL(err) long err __attribute__ ((unused))
66
67 #undef INTERNAL_SYSCALL_ERROR_P
68 #define INTERNAL_SYSCALL_ERROR_P(val, err) ((void) (val), (long) (err))
69
70 #undef INTERNAL_SYSCALL_ERRNO
71 #define INTERNAL_SYSCALL_ERRNO(val, err) ((void) (err), val)
72
73 /* Note that the original Linux syscall restart convention required the
74 instruction immediately preceding SYSCALL to initialize $v0 with the
75 syscall number. Then if a restart triggered, $v0 would have been
76 clobbered by the syscall interrupted, and needed to be reinititalized.
77 The kernel would decrement the PC by 4 before switching back to the
78 user mode so that $v0 had been reloaded before SYSCALL was executed
79 again. This implied the place $v0 was loaded from must have been
80 preserved across a syscall, e.g. an immediate, static register, stack
81 slot, etc.
82
83 The convention was relaxed in Linux with a change applied to the kernel
84 GIT repository as commit 96187fb0bc30cd7919759d371d810e928048249d, that
85 first appeared in the 2.6.36 release. Since then the kernel has had
86 code that reloads $v0 upon syscall restart and resumes right at the
87 SYSCALL instruction, so no special arrangement is needed anymore.
88
89 For backwards compatibility with existing kernel binaries we support
90 the old convention by choosing the instruction preceding SYSCALL
91 carefully. This also means we have to force a 32-bit encoding of the
92 microMIPS MOVE instruction if one is used. */
93
94 #ifdef __mips_micromips
95 # define MOVE32 "move32"
96 #else
97 # define MOVE32 "move"
98 #endif
99
100 #undef INTERNAL_SYSCALL
101 #define INTERNAL_SYSCALL(name, err, nr, args...) \
102 internal_syscall##nr ("li\t%0, %2\t\t\t# " #name "\n\t", \
103 "IK" (SYS_ify (name)), \
104 0, err, args)
105
106 #undef INTERNAL_SYSCALL_NCS
107 #define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \
108 internal_syscall##nr (MOVE32 "\t%0, %2\n\t", \
109 "r" (__s0), \
110 number, err, args)
111
112 #define internal_syscall0(v0_init, input, number, err, dummy...) \
113 ({ \
114 long _sys_result; \
115 \
116 { \
117 register long long __s0 asm ("$16") __attribute__ ((unused)) \
118 = (number); \
119 register long long __v0 asm ("$2"); \
120 register long long __a3 asm ("$7"); \
121 __asm__ volatile ( \
122 ".set\tnoreorder\n\t" \
123 v0_init \
124 "syscall\n\t" \
125 ".set reorder" \
126 : "=r" (__v0), "=r" (__a3) \
127 : input \
128 : __SYSCALL_CLOBBERS); \
129 err = __a3; \
130 _sys_result = __v0; \
131 } \
132 _sys_result; \
133 })
134
135 #define internal_syscall1(v0_init, input, number, err, arg1) \
136 ({ \
137 long _sys_result; \
138 \
139 { \
140 register long long __s0 asm ("$16") __attribute__ ((unused)) \
141 = (number); \
142 register long long __v0 asm ("$2"); \
143 register long long __a0 asm ("$4") = ARGIFY (arg1); \
144 register long long __a3 asm ("$7"); \
145 __asm__ volatile ( \
146 ".set\tnoreorder\n\t" \
147 v0_init \
148 "syscall\n\t" \
149 ".set reorder" \
150 : "=r" (__v0), "=r" (__a3) \
151 : input, "r" (__a0) \
152 : __SYSCALL_CLOBBERS); \
153 err = __a3; \
154 _sys_result = __v0; \
155 } \
156 _sys_result; \
157 })
158
159 #define internal_syscall2(v0_init, input, number, err, arg1, arg2) \
160 ({ \
161 long _sys_result; \
162 \
163 { \
164 register long long __s0 asm ("$16") __attribute__ ((unused)) \
165 = (number); \
166 register long long __v0 asm ("$2"); \
167 register long long __a0 asm ("$4") = ARGIFY (arg1); \
168 register long long __a1 asm ("$5") = ARGIFY (arg2); \
169 register long long __a3 asm ("$7"); \
170 __asm__ volatile ( \
171 ".set\tnoreorder\n\t" \
172 v0_init \
173 "syscall\n\t" \
174 ".set\treorder" \
175 : "=r" (__v0), "=r" (__a3) \
176 : input, "r" (__a0), "r" (__a1) \
177 : __SYSCALL_CLOBBERS); \
178 err = __a3; \
179 _sys_result = __v0; \
180 } \
181 _sys_result; \
182 })
183
184 #define internal_syscall3(v0_init, input, number, err, \
185 arg1, arg2, arg3) \
186 ({ \
187 long _sys_result; \
188 \
189 { \
190 register long long __s0 asm ("$16") __attribute__ ((unused)) \
191 = (number); \
192 register long long __v0 asm ("$2"); \
193 register long long __a0 asm ("$4") = ARGIFY (arg1); \
194 register long long __a1 asm ("$5") = ARGIFY (arg2); \
195 register long long __a2 asm ("$6") = ARGIFY (arg3); \
196 register long long __a3 asm ("$7"); \
197 __asm__ volatile ( \
198 ".set\tnoreorder\n\t" \
199 v0_init \
200 "syscall\n\t" \
201 ".set\treorder" \
202 : "=r" (__v0), "=r" (__a3) \
203 : input, "r" (__a0), "r" (__a1), "r" (__a2) \
204 : __SYSCALL_CLOBBERS); \
205 err = __a3; \
206 _sys_result = __v0; \
207 } \
208 _sys_result; \
209 })
210
211 #define internal_syscall4(v0_init, input, number, err, \
212 arg1, arg2, arg3, arg4) \
213 ({ \
214 long _sys_result; \
215 \
216 { \
217 register long long __s0 asm ("$16") __attribute__ ((unused)) \
218 = (number); \
219 register long long __v0 asm ("$2"); \
220 register long long __a0 asm ("$4") = ARGIFY (arg1); \
221 register long long __a1 asm ("$5") = ARGIFY (arg2); \
222 register long long __a2 asm ("$6") = ARGIFY (arg3); \
223 register long long __a3 asm ("$7") = ARGIFY (arg4); \
224 __asm__ volatile ( \
225 ".set\tnoreorder\n\t" \
226 v0_init \
227 "syscall\n\t" \
228 ".set\treorder" \
229 : "=r" (__v0), "+r" (__a3) \
230 : input, "r" (__a0), "r" (__a1), "r" (__a2) \
231 : __SYSCALL_CLOBBERS); \
232 err = __a3; \
233 _sys_result = __v0; \
234 } \
235 _sys_result; \
236 })
237
238 #define internal_syscall5(v0_init, input, number, err, \
239 arg1, arg2, arg3, arg4, arg5) \
240 ({ \
241 long _sys_result; \
242 \
243 { \
244 register long long __s0 asm ("$16") __attribute__ ((unused)) \
245 = (number); \
246 register long long __v0 asm ("$2"); \
247 register long long __a0 asm ("$4") = ARGIFY (arg1); \
248 register long long __a1 asm ("$5") = ARGIFY (arg2); \
249 register long long __a2 asm ("$6") = ARGIFY (arg3); \
250 register long long __a3 asm ("$7") = ARGIFY (arg4); \
251 register long long __a4 asm ("$8") = ARGIFY (arg5); \
252 __asm__ volatile ( \
253 ".set\tnoreorder\n\t" \
254 v0_init \
255 "syscall\n\t" \
256 ".set\treorder" \
257 : "=r" (__v0), "+r" (__a3) \
258 : input, "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4) \
259 : __SYSCALL_CLOBBERS); \
260 err = __a3; \
261 _sys_result = __v0; \
262 } \
263 _sys_result; \
264 })
265
266 #define internal_syscall6(v0_init, input, number, err, \
267 arg1, arg2, arg3, arg4, arg5, arg6) \
268 ({ \
269 long _sys_result; \
270 \
271 { \
272 register long long __s0 asm ("$16") __attribute__ ((unused)) \
273 = (number); \
274 register long long __v0 asm ("$2"); \
275 register long long __a0 asm ("$4") = ARGIFY (arg1); \
276 register long long __a1 asm ("$5") = ARGIFY (arg2); \
277 register long long __a2 asm ("$6") = ARGIFY (arg3); \
278 register long long __a3 asm ("$7") = ARGIFY (arg4); \
279 register long long __a4 asm ("$8") = ARGIFY (arg5); \
280 register long long __a5 asm ("$9") = ARGIFY (arg6); \
281 __asm__ volatile ( \
282 ".set\tnoreorder\n\t" \
283 v0_init \
284 "syscall\n\t" \
285 ".set\treorder" \
286 : "=r" (__v0), "+r" (__a3) \
287 : input, "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4), \
288 "r" (__a5) \
289 : __SYSCALL_CLOBBERS); \
290 err = __a3; \
291 _sys_result = __v0; \
292 } \
293 _sys_result; \
294 })
295
296 #define __SYSCALL_CLOBBERS "$1", "$3", "$10", "$11", "$12", "$13", \
297 "$14", "$15", "$24", "$25", "hi", "lo", "memory"
298
299 /* Standard MIPS syscalls have an error flag, and return a positive errno
300 when the error flag is set. Emulate this behaviour for vsyscalls so that
301 the INTERNAL_SYSCALL_{ERROR_P,ERRNO} macros work correctly. */
302 #define INTERNAL_VSYSCALL_CALL(funcptr, err, nr, args...) \
303 ({ \
304 long _ret = funcptr (args); \
305 err = ((unsigned long) (_ret) >= (unsigned long) -4095L); \
306 if (err) \
307 _ret = -_ret; \
308 _ret; \
309 })
310
311 /* List of system calls which are supported as vsyscalls. */
312 #define HAVE_CLOCK_GETTIME_VSYSCALL 1
313 #define HAVE_GETTIMEOFDAY_VSYSCALL 1
314
315 #endif /* __ASSEMBLER__ */
316
317 /* Pointer mangling is not yet supported for MIPS. */
318 #define PTR_MANGLE(var) (void) (var)
319 #define PTR_DEMANGLE(var) (void) (var)
320
321 #endif /* linux/mips/sysdep.h */