]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/unix/sysv/linux/mips/mips32/sysdep.h
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / sysdeps / unix / sysv / linux / mips / mips32 / sysdep.h
1 /* Copyright (C) 2000-2019 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library. If not, see
16 <http://www.gnu.org/licenses/>. */
17
18 #ifndef _LINUX_MIPS_MIPS32_SYSDEP_H
19 #define _LINUX_MIPS_MIPS32_SYSDEP_H 1
20
21 /* Always enable vsyscalls on mips32. */
22 #define ALWAYS_USE_VSYSCALL 1
23
24 /* There is some commonality. */
25 #include <sysdeps/unix/sysv/linux/sysdep.h>
26 #include <sysdeps/unix/mips/mips32/sysdep.h>
27
28 #include <tls.h>
29
30 /* In order to get __set_errno() definition in INLINE_SYSCALL. */
31 #ifndef __ASSEMBLER__
32 #include <errno.h>
33 #endif
34
35 /* For Linux we can use the system call table in the header file
36 /usr/include/asm/unistd.h
37 of the kernel. But these symbols do not follow the SYS_* syntax
38 so we have to redefine the `SYS_ify' macro here. */
39 #undef SYS_ify
40 #define SYS_ify(syscall_name) __NR_##syscall_name
41
42 #ifdef __ASSEMBLER__
43
44 /* We don't want the label for the error handler to be visible in the symbol
45 table when we define it here. */
46 #ifdef __PIC__
47 # define SYSCALL_ERROR_LABEL 99b
48 #endif
49
50 #else /* ! __ASSEMBLER__ */
51
52 /* Define a macro which expands into the inline wrapper code for a system
53 call. */
54 #undef INLINE_SYSCALL
55 #define INLINE_SYSCALL(name, nr, args...) \
56 ({ INTERNAL_SYSCALL_DECL (_sc_err); \
57 long result_var = INTERNAL_SYSCALL (name, _sc_err, nr, args); \
58 if ( INTERNAL_SYSCALL_ERROR_P (result_var, _sc_err) ) \
59 { \
60 __set_errno (INTERNAL_SYSCALL_ERRNO (result_var, _sc_err)); \
61 result_var = -1L; \
62 } \
63 result_var; })
64
65 #undef INTERNAL_SYSCALL_DECL
66 #define INTERNAL_SYSCALL_DECL(err) long err __attribute__ ((unused))
67
68 #undef INTERNAL_SYSCALL_ERROR_P
69 #define INTERNAL_SYSCALL_ERROR_P(val, err) ((void) (val), (long) (err))
70
71 #undef INTERNAL_SYSCALL_ERRNO
72 #define INTERNAL_SYSCALL_ERRNO(val, err) ((void) (err), val)
73
74 /* Note that the original Linux syscall restart convention required the
75 instruction immediately preceding SYSCALL to initialize $v0 with the
76 syscall number. Then if a restart triggered, $v0 would have been
77 clobbered by the syscall interrupted, and needed to be reinititalized.
78 The kernel would decrement the PC by 4 before switching back to the
79 user mode so that $v0 had been reloaded before SYSCALL was executed
80 again. This implied the place $v0 was loaded from must have been
81 preserved across a syscall, e.g. an immediate, static register, stack
82 slot, etc.
83
84 The convention was relaxed in Linux with a change applied to the kernel
85 GIT repository as commit 96187fb0bc30cd7919759d371d810e928048249d, that
86 first appeared in the 2.6.36 release. Since then the kernel has had
87 code that reloads $v0 upon syscall restart and resumes right at the
88 SYSCALL instruction, so no special arrangement is needed anymore.
89
90 For backwards compatibility with existing kernel binaries we support
91 the old convention by choosing the instruction preceding SYSCALL
92 carefully. This also means we have to force a 32-bit encoding of the
93 microMIPS MOVE instruction if one is used. */
94
95 #ifdef __mips_micromips
96 # define MOVE32 "move32"
97 #else
98 # define MOVE32 "move"
99 #endif
100
101 #undef INTERNAL_SYSCALL
102 #undef INTERNAL_SYSCALL_NCS
103
104 #define __nomips16 __attribute__ ((nomips16))
105
106 union __mips_syscall_return
107 {
108 long long val;
109 struct
110 {
111 long v0;
112 long v1;
113 }
114 reg;
115 };
116
117 #ifdef __mips16
118 /* There's no MIPS16 syscall instruction, so we go through out-of-line
119 standard MIPS wrappers. These do use inline snippets below though,
120 through INTERNAL_SYSCALL_MIPS16. Spilling the syscall number to
121 memory gives the best code in that case, avoiding the need to save
122 and restore a static register. */
123
124 # include <mips16-syscall.h>
125
126 # define INTERNAL_SYSCALL(name, err, nr, args...) \
127 INTERNAL_SYSCALL_NCS (SYS_ify (name), err, nr, args)
128
129 # define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \
130 ({ \
131 union __mips_syscall_return _sc_ret; \
132 _sc_ret.val = __mips16_syscall##nr (args, number); \
133 err = _sc_ret.reg.v1; \
134 _sc_ret.reg.v0; \
135 })
136
137 # define INTERNAL_SYSCALL_MIPS16(number, err, nr, args...) \
138 internal_syscall##nr ("lw\t%0, %2\n\t", \
139 "R" (number), \
140 number, err, args)
141
142 #else /* !__mips16 */
143 # define INTERNAL_SYSCALL(name, err, nr, args...) \
144 internal_syscall##nr ("li\t%0, %2\t\t\t# " #name "\n\t", \
145 "IK" (SYS_ify (name)), \
146 SYS_ify (name), err, args)
147
148 # define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \
149 internal_syscall##nr (MOVE32 "\t%0, %2\n\t", \
150 "r" (__s0), \
151 number, err, args)
152
153 #endif /* !__mips16 */
154
155 #define internal_syscall0(v0_init, input, number, err, dummy...) \
156 ({ \
157 long _sys_result; \
158 \
159 { \
160 register long __s0 asm ("$16") __attribute__ ((unused)) \
161 = (number); \
162 register long __v0 asm ("$2"); \
163 register long __a3 asm ("$7"); \
164 __asm__ volatile ( \
165 ".set\tnoreorder\n\t" \
166 v0_init \
167 "syscall\n\t" \
168 ".set reorder" \
169 : "=r" (__v0), "=r" (__a3) \
170 : input \
171 : __SYSCALL_CLOBBERS); \
172 err = __a3; \
173 _sys_result = __v0; \
174 } \
175 _sys_result; \
176 })
177
178 #define internal_syscall1(v0_init, input, number, err, arg1) \
179 ({ \
180 long _sys_result; \
181 \
182 { \
183 register long __s0 asm ("$16") __attribute__ ((unused)) \
184 = (number); \
185 register long __v0 asm ("$2"); \
186 register long __a0 asm ("$4") = (long) (arg1); \
187 register long __a3 asm ("$7"); \
188 __asm__ volatile ( \
189 ".set\tnoreorder\n\t" \
190 v0_init \
191 "syscall\n\t" \
192 ".set reorder" \
193 : "=r" (__v0), "=r" (__a3) \
194 : input, "r" (__a0) \
195 : __SYSCALL_CLOBBERS); \
196 err = __a3; \
197 _sys_result = __v0; \
198 } \
199 _sys_result; \
200 })
201
202 #define internal_syscall2(v0_init, input, number, err, arg1, arg2) \
203 ({ \
204 long _sys_result; \
205 \
206 { \
207 register long __s0 asm ("$16") __attribute__ ((unused)) \
208 = (number); \
209 register long __v0 asm ("$2"); \
210 register long __a0 asm ("$4") = (long) (arg1); \
211 register long __a1 asm ("$5") = (long) (arg2); \
212 register long __a3 asm ("$7"); \
213 __asm__ volatile ( \
214 ".set\tnoreorder\n\t" \
215 v0_init \
216 "syscall\n\t" \
217 ".set\treorder" \
218 : "=r" (__v0), "=r" (__a3) \
219 : input, "r" (__a0), "r" (__a1) \
220 : __SYSCALL_CLOBBERS); \
221 err = __a3; \
222 _sys_result = __v0; \
223 } \
224 _sys_result; \
225 })
226
227 #define internal_syscall3(v0_init, input, number, err, \
228 arg1, arg2, arg3) \
229 ({ \
230 long _sys_result; \
231 \
232 { \
233 register long __s0 asm ("$16") __attribute__ ((unused)) \
234 = (number); \
235 register long __v0 asm ("$2"); \
236 register long __a0 asm ("$4") = (long) (arg1); \
237 register long __a1 asm ("$5") = (long) (arg2); \
238 register long __a2 asm ("$6") = (long) (arg3); \
239 register long __a3 asm ("$7"); \
240 __asm__ volatile ( \
241 ".set\tnoreorder\n\t" \
242 v0_init \
243 "syscall\n\t" \
244 ".set\treorder" \
245 : "=r" (__v0), "=r" (__a3) \
246 : input, "r" (__a0), "r" (__a1), "r" (__a2) \
247 : __SYSCALL_CLOBBERS); \
248 err = __a3; \
249 _sys_result = __v0; \
250 } \
251 _sys_result; \
252 })
253
254 #define internal_syscall4(v0_init, input, number, err, \
255 arg1, arg2, arg3, arg4) \
256 ({ \
257 long _sys_result; \
258 \
259 { \
260 register long __s0 asm ("$16") __attribute__ ((unused)) \
261 = (number); \
262 register long __v0 asm ("$2"); \
263 register long __a0 asm ("$4") = (long) (arg1); \
264 register long __a1 asm ("$5") = (long) (arg2); \
265 register long __a2 asm ("$6") = (long) (arg3); \
266 register long __a3 asm ("$7") = (long) (arg4); \
267 __asm__ volatile ( \
268 ".set\tnoreorder\n\t" \
269 v0_init \
270 "syscall\n\t" \
271 ".set\treorder" \
272 : "=r" (__v0), "+r" (__a3) \
273 : input, "r" (__a0), "r" (__a1), "r" (__a2) \
274 : __SYSCALL_CLOBBERS); \
275 err = __a3; \
276 _sys_result = __v0; \
277 } \
278 _sys_result; \
279 })
280
281 /* Standalone MIPS wrappers used for 5, 6, and 7 argument syscalls,
282 which require stack arguments. We rely on the compiler arranging
283 wrapper's arguments according to the MIPS o32 function calling
284 convention, which is reused by syscalls, except for the syscall
285 number passed and the error flag returned (taken care of in the
286 wrapper called). This relieves us from relying on non-guaranteed
287 compiler specifics required for the stack arguments to be pushed,
288 which would be the case if these syscalls were inlined. */
289
290 long long __nomips16 __mips_syscall5 (long arg1, long arg2, long arg3,
291 long arg4, long arg5,
292 long number);
293 libc_hidden_proto (__mips_syscall5, nomips16)
294
295 #define internal_syscall5(v0_init, input, number, err, \
296 arg1, arg2, arg3, arg4, arg5) \
297 ({ \
298 union __mips_syscall_return _sc_ret; \
299 _sc_ret.val = __mips_syscall5 ((long) (arg1), \
300 (long) (arg2), \
301 (long) (arg3), \
302 (long) (arg4), \
303 (long) (arg5), \
304 (long) (number)); \
305 err = _sc_ret.reg.v1; \
306 _sc_ret.reg.v0; \
307 })
308
309 long long __nomips16 __mips_syscall6 (long arg1, long arg2, long arg3,
310 long arg4, long arg5, long arg6,
311 long number);
312 libc_hidden_proto (__mips_syscall6, nomips16)
313
314 #define internal_syscall6(v0_init, input, number, err, \
315 arg1, arg2, arg3, arg4, arg5, arg6) \
316 ({ \
317 union __mips_syscall_return _sc_ret; \
318 _sc_ret.val = __mips_syscall6 ((long) (arg1), \
319 (long) (arg2), \
320 (long) (arg3), \
321 (long) (arg4), \
322 (long) (arg5), \
323 (long) (arg6), \
324 (long) (number)); \
325 err = _sc_ret.reg.v1; \
326 _sc_ret.reg.v0; \
327 })
328
329 long long __nomips16 __mips_syscall7 (long arg1, long arg2, long arg3,
330 long arg4, long arg5, long arg6,
331 long arg7,
332 long number);
333 libc_hidden_proto (__mips_syscall7, nomips16)
334
335 #define internal_syscall7(v0_init, input, number, err, \
336 arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
337 ({ \
338 union __mips_syscall_return _sc_ret; \
339 _sc_ret.val = __mips_syscall7 ((long) (arg1), \
340 (long) (arg2), \
341 (long) (arg3), \
342 (long) (arg4), \
343 (long) (arg5), \
344 (long) (arg6), \
345 (long) (arg7), \
346 (long) (number)); \
347 err = _sc_ret.reg.v1; \
348 _sc_ret.reg.v0; \
349 })
350
351 #define __SYSCALL_CLOBBERS "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", \
352 "$14", "$15", "$24", "$25", "hi", "lo", "memory"
353
354 /* Standard MIPS syscalls have an error flag, and return a positive errno
355 when the error flag is set. Emulate this behaviour for vsyscalls so that
356 the INTERNAL_SYSCALL_{ERROR_P,ERRNO} macros work correctly. */
357 #define INTERNAL_VSYSCALL_CALL(funcptr, err, nr, args...) \
358 ({ \
359 long _ret = funcptr (args); \
360 err = ((unsigned long) (_ret) >= (unsigned long) -4095L); \
361 if (err) \
362 _ret = -_ret; \
363 _ret; \
364 })
365
366 /* List of system calls which are supported as vsyscalls. */
367 #define HAVE_CLOCK_GETTIME_VSYSCALL 1
368 #define HAVE_GETTIMEOFDAY_VSYSCALL 1
369
370 #endif /* __ASSEMBLER__ */
371
372 /* Pointer mangling is not yet supported for MIPS. */
373 #define PTR_MANGLE(var) (void) (var)
374 #define PTR_DEMANGLE(var) (void) (var)
375
376 #endif /* linux/mips/mips32/sysdep.h */