]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/unix/sysv/linux/mips/mips32/sysdep.h
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / sysdeps / unix / sysv / linux / mips / mips32 / sysdep.h
1 /* Copyright (C) 2000-2018 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library. If not, see
16 <http://www.gnu.org/licenses/>. */
17
18 #ifndef _LINUX_MIPS_MIPS32_SYSDEP_H
19 #define _LINUX_MIPS_MIPS32_SYSDEP_H 1
20
21 /* There is some commonality. */
22 #include <sysdeps/unix/sysv/linux/sysdep.h>
23 #include <sysdeps/unix/mips/mips32/sysdep.h>
24
25 #include <tls.h>
26
27 /* In order to get __set_errno() definition in INLINE_SYSCALL. */
28 #ifndef __ASSEMBLER__
29 #include <errno.h>
30 #endif
31
32 /* For Linux we can use the system call table in the header file
33 /usr/include/asm/unistd.h
34 of the kernel. But these symbols do not follow the SYS_* syntax
35 so we have to redefine the `SYS_ify' macro here. */
36 #undef SYS_ify
37 #define SYS_ify(syscall_name) __NR_##syscall_name
38
39 #ifdef __ASSEMBLER__
40
41 /* We don't want the label for the error handler to be visible in the symbol
42 table when we define it here. */
43 #ifdef __PIC__
44 # define SYSCALL_ERROR_LABEL 99b
45 #endif
46
47 #else /* ! __ASSEMBLER__ */
48
49 /* Define a macro which expands into the inline wrapper code for a system
50 call. */
51 #undef INLINE_SYSCALL
52 #define INLINE_SYSCALL(name, nr, args...) \
53 ({ INTERNAL_SYSCALL_DECL (_sc_err); \
54 long result_var = INTERNAL_SYSCALL (name, _sc_err, nr, args); \
55 if ( INTERNAL_SYSCALL_ERROR_P (result_var, _sc_err) ) \
56 { \
57 __set_errno (INTERNAL_SYSCALL_ERRNO (result_var, _sc_err)); \
58 result_var = -1L; \
59 } \
60 result_var; })
61
62 #undef INTERNAL_SYSCALL_DECL
63 #define INTERNAL_SYSCALL_DECL(err) long err __attribute__ ((unused))
64
65 #undef INTERNAL_SYSCALL_ERROR_P
66 #define INTERNAL_SYSCALL_ERROR_P(val, err) ((void) (val), (long) (err))
67
68 #undef INTERNAL_SYSCALL_ERRNO
69 #define INTERNAL_SYSCALL_ERRNO(val, err) ((void) (err), val)
70
71 /* Note that the original Linux syscall restart convention required the
72 instruction immediately preceding SYSCALL to initialize $v0 with the
73 syscall number. Then if a restart triggered, $v0 would have been
74 clobbered by the syscall interrupted, and needed to be reinititalized.
75 The kernel would decrement the PC by 4 before switching back to the
76 user mode so that $v0 had been reloaded before SYSCALL was executed
77 again. This implied the place $v0 was loaded from must have been
78 preserved across a syscall, e.g. an immediate, static register, stack
79 slot, etc.
80
81 The convention was relaxed in Linux with a change applied to the kernel
82 GIT repository as commit 96187fb0bc30cd7919759d371d810e928048249d, that
83 first appeared in the 2.6.36 release. Since then the kernel has had
84 code that reloads $v0 upon syscall restart and resumes right at the
85 SYSCALL instruction, so no special arrangement is needed anymore.
86
87 For backwards compatibility with existing kernel binaries we support
88 the old convention by choosing the instruction preceding SYSCALL
89 carefully. This also means we have to force a 32-bit encoding of the
90 microMIPS MOVE instruction if one is used. */
91
92 #ifdef __mips_micromips
93 # define MOVE32 "move32"
94 #else
95 # define MOVE32 "move"
96 #endif
97
98 #undef INTERNAL_SYSCALL
99 #undef INTERNAL_SYSCALL_NCS
100
101 #define __nomips16 __attribute__ ((nomips16))
102
103 union __mips_syscall_return
104 {
105 long long val;
106 struct
107 {
108 long v0;
109 long v1;
110 }
111 reg;
112 };
113
114 #ifdef __mips16
115 /* There's no MIPS16 syscall instruction, so we go through out-of-line
116 standard MIPS wrappers. These do use inline snippets below though,
117 through INTERNAL_SYSCALL_MIPS16. Spilling the syscall number to
118 memory gives the best code in that case, avoiding the need to save
119 and restore a static register. */
120
121 # include <mips16-syscall.h>
122
123 # define INTERNAL_SYSCALL(name, err, nr, args...) \
124 INTERNAL_SYSCALL_NCS (SYS_ify (name), err, nr, args)
125
126 # define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \
127 ({ \
128 union __mips_syscall_return _sc_ret; \
129 _sc_ret.val = __mips16_syscall##nr (args, number); \
130 err = _sc_ret.reg.v1; \
131 _sc_ret.reg.v0; \
132 })
133
134 # define INTERNAL_SYSCALL_MIPS16(number, err, nr, args...) \
135 internal_syscall##nr ("lw\t%0, %2\n\t", \
136 "R" (number), \
137 number, err, args)
138
139 #else /* !__mips16 */
140 # define INTERNAL_SYSCALL(name, err, nr, args...) \
141 internal_syscall##nr ("li\t%0, %2\t\t\t# " #name "\n\t", \
142 "IK" (SYS_ify (name)), \
143 SYS_ify (name), err, args)
144
145 # define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \
146 internal_syscall##nr (MOVE32 "\t%0, %2\n\t", \
147 "r" (__s0), \
148 number, err, args)
149
150 #endif /* !__mips16 */
151
152 #define internal_syscall0(v0_init, input, number, err, dummy...) \
153 ({ \
154 long _sys_result; \
155 \
156 { \
157 register long __s0 asm ("$16") __attribute__ ((unused)) \
158 = (number); \
159 register long __v0 asm ("$2"); \
160 register long __a3 asm ("$7"); \
161 __asm__ volatile ( \
162 ".set\tnoreorder\n\t" \
163 v0_init \
164 "syscall\n\t" \
165 ".set reorder" \
166 : "=r" (__v0), "=r" (__a3) \
167 : input \
168 : __SYSCALL_CLOBBERS); \
169 err = __a3; \
170 _sys_result = __v0; \
171 } \
172 _sys_result; \
173 })
174
175 #define internal_syscall1(v0_init, input, number, err, arg1) \
176 ({ \
177 long _sys_result; \
178 \
179 { \
180 register long __s0 asm ("$16") __attribute__ ((unused)) \
181 = (number); \
182 register long __v0 asm ("$2"); \
183 register long __a0 asm ("$4") = (long) (arg1); \
184 register long __a3 asm ("$7"); \
185 __asm__ volatile ( \
186 ".set\tnoreorder\n\t" \
187 v0_init \
188 "syscall\n\t" \
189 ".set reorder" \
190 : "=r" (__v0), "=r" (__a3) \
191 : input, "r" (__a0) \
192 : __SYSCALL_CLOBBERS); \
193 err = __a3; \
194 _sys_result = __v0; \
195 } \
196 _sys_result; \
197 })
198
199 #define internal_syscall2(v0_init, input, number, err, arg1, arg2) \
200 ({ \
201 long _sys_result; \
202 \
203 { \
204 register long __s0 asm ("$16") __attribute__ ((unused)) \
205 = (number); \
206 register long __v0 asm ("$2"); \
207 register long __a0 asm ("$4") = (long) (arg1); \
208 register long __a1 asm ("$5") = (long) (arg2); \
209 register long __a3 asm ("$7"); \
210 __asm__ volatile ( \
211 ".set\tnoreorder\n\t" \
212 v0_init \
213 "syscall\n\t" \
214 ".set\treorder" \
215 : "=r" (__v0), "=r" (__a3) \
216 : input, "r" (__a0), "r" (__a1) \
217 : __SYSCALL_CLOBBERS); \
218 err = __a3; \
219 _sys_result = __v0; \
220 } \
221 _sys_result; \
222 })
223
224 #define internal_syscall3(v0_init, input, number, err, \
225 arg1, arg2, arg3) \
226 ({ \
227 long _sys_result; \
228 \
229 { \
230 register long __s0 asm ("$16") __attribute__ ((unused)) \
231 = (number); \
232 register long __v0 asm ("$2"); \
233 register long __a0 asm ("$4") = (long) (arg1); \
234 register long __a1 asm ("$5") = (long) (arg2); \
235 register long __a2 asm ("$6") = (long) (arg3); \
236 register long __a3 asm ("$7"); \
237 __asm__ volatile ( \
238 ".set\tnoreorder\n\t" \
239 v0_init \
240 "syscall\n\t" \
241 ".set\treorder" \
242 : "=r" (__v0), "=r" (__a3) \
243 : input, "r" (__a0), "r" (__a1), "r" (__a2) \
244 : __SYSCALL_CLOBBERS); \
245 err = __a3; \
246 _sys_result = __v0; \
247 } \
248 _sys_result; \
249 })
250
251 #define internal_syscall4(v0_init, input, number, err, \
252 arg1, arg2, arg3, arg4) \
253 ({ \
254 long _sys_result; \
255 \
256 { \
257 register long __s0 asm ("$16") __attribute__ ((unused)) \
258 = (number); \
259 register long __v0 asm ("$2"); \
260 register long __a0 asm ("$4") = (long) (arg1); \
261 register long __a1 asm ("$5") = (long) (arg2); \
262 register long __a2 asm ("$6") = (long) (arg3); \
263 register long __a3 asm ("$7") = (long) (arg4); \
264 __asm__ volatile ( \
265 ".set\tnoreorder\n\t" \
266 v0_init \
267 "syscall\n\t" \
268 ".set\treorder" \
269 : "=r" (__v0), "+r" (__a3) \
270 : input, "r" (__a0), "r" (__a1), "r" (__a2) \
271 : __SYSCALL_CLOBBERS); \
272 err = __a3; \
273 _sys_result = __v0; \
274 } \
275 _sys_result; \
276 })
277
278 /* Standalone MIPS wrappers used for 5, 6, and 7 argument syscalls,
279 which require stack arguments. We rely on the compiler arranging
280 wrapper's arguments according to the MIPS o32 function calling
281 convention, which is reused by syscalls, except for the syscall
282 number passed and the error flag returned (taken care of in the
283 wrapper called). This relieves us from relying on non-guaranteed
284 compiler specifics required for the stack arguments to be pushed,
285 which would be the case if these syscalls were inlined. */
286
287 long long __nomips16 __mips_syscall5 (long arg1, long arg2, long arg3,
288 long arg4, long arg5,
289 long number);
290 libc_hidden_proto (__mips_syscall5, nomips16)
291
292 #define internal_syscall5(v0_init, input, number, err, \
293 arg1, arg2, arg3, arg4, arg5) \
294 ({ \
295 union __mips_syscall_return _sc_ret; \
296 _sc_ret.val = __mips_syscall5 ((long) (arg1), \
297 (long) (arg2), \
298 (long) (arg3), \
299 (long) (arg4), \
300 (long) (arg5), \
301 (long) (number)); \
302 err = _sc_ret.reg.v1; \
303 _sc_ret.reg.v0; \
304 })
305
306 long long __nomips16 __mips_syscall6 (long arg1, long arg2, long arg3,
307 long arg4, long arg5, long arg6,
308 long number);
309 libc_hidden_proto (__mips_syscall6, nomips16)
310
311 #define internal_syscall6(v0_init, input, number, err, \
312 arg1, arg2, arg3, arg4, arg5, arg6) \
313 ({ \
314 union __mips_syscall_return _sc_ret; \
315 _sc_ret.val = __mips_syscall6 ((long) (arg1), \
316 (long) (arg2), \
317 (long) (arg3), \
318 (long) (arg4), \
319 (long) (arg5), \
320 (long) (arg6), \
321 (long) (number)); \
322 err = _sc_ret.reg.v1; \
323 _sc_ret.reg.v0; \
324 })
325
326 long long __nomips16 __mips_syscall7 (long arg1, long arg2, long arg3,
327 long arg4, long arg5, long arg6,
328 long arg7,
329 long number);
330 libc_hidden_proto (__mips_syscall7, nomips16)
331
332 #define internal_syscall7(v0_init, input, number, err, \
333 arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
334 ({ \
335 union __mips_syscall_return _sc_ret; \
336 _sc_ret.val = __mips_syscall7 ((long) (arg1), \
337 (long) (arg2), \
338 (long) (arg3), \
339 (long) (arg4), \
340 (long) (arg5), \
341 (long) (arg6), \
342 (long) (arg7), \
343 (long) (number)); \
344 err = _sc_ret.reg.v1; \
345 _sc_ret.reg.v0; \
346 })
347
348 #define __SYSCALL_CLOBBERS "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", \
349 "$14", "$15", "$24", "$25", "hi", "lo", "memory"
350
351 /* Standard MIPS syscalls have an error flag, and return a positive errno
352 when the error flag is set. Emulate this behaviour for vsyscalls so that
353 the INTERNAL_SYSCALL_{ERROR_P,ERRNO} macros work correctly. */
354 #define INTERNAL_VSYSCALL_CALL(funcptr, err, nr, args...) \
355 ({ \
356 long _ret = funcptr (args); \
357 err = ((unsigned long) (_ret) >= (unsigned long) -4095L); \
358 if (err) \
359 _ret = -_ret; \
360 _ret; \
361 })
362
363 /* List of system calls which are supported as vsyscalls. */
364 #define HAVE_CLOCK_GETTIME_VSYSCALL 1
365 #define HAVE_GETTIMEOFDAY_VSYSCALL 1
366
367 #endif /* __ASSEMBLER__ */
368
369 /* Pointer mangling is not yet supported for MIPS. */
370 #define PTR_MANGLE(var) (void) (var)
371 #define PTR_DEMANGLE(var) (void) (var)
372
373 #endif /* linux/mips/mips32/sysdep.h */