]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/unix/sysv/linux/mips/mips32/sysdep.h
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / sysdeps / unix / sysv / linux / mips / mips32 / sysdep.h
1 /* Copyright (C) 2000-2017 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library. If not, see
16 <http://www.gnu.org/licenses/>. */
17
18 #ifndef _LINUX_MIPS_MIPS32_SYSDEP_H
19 #define _LINUX_MIPS_MIPS32_SYSDEP_H 1
20
21 /* There is some commonality. */
22 #include <sysdeps/unix/sysv/linux/sysdep.h>
23 #include <sysdeps/unix/mips/mips32/sysdep.h>
24
25 #include <tls.h>
26
27 /* In order to get __set_errno() definition in INLINE_SYSCALL. */
28 #ifndef __ASSEMBLER__
29 #include <errno.h>
30 #endif
31
32 /* For Linux we can use the system call table in the header file
33 /usr/include/asm/unistd.h
34 of the kernel. But these symbols do not follow the SYS_* syntax
35 so we have to redefine the `SYS_ify' macro here. */
36 #undef SYS_ify
37 #define SYS_ify(syscall_name) __NR_##syscall_name
38
39 #ifdef __ASSEMBLER__
40
41 /* We don't want the label for the error handler to be visible in the symbol
42 table when we define it here. */
43 #ifdef __PIC__
44 # define SYSCALL_ERROR_LABEL 99b
45 #endif
46
47 #else /* ! __ASSEMBLER__ */
48
49 /* Define a macro which expands into the inline wrapper code for a system
50 call. */
51 #undef INLINE_SYSCALL
52 #define INLINE_SYSCALL(name, nr, args...) \
53 ({ INTERNAL_SYSCALL_DECL (_sc_err); \
54 long result_var = INTERNAL_SYSCALL (name, _sc_err, nr, args); \
55 if ( INTERNAL_SYSCALL_ERROR_P (result_var, _sc_err) ) \
56 { \
57 __set_errno (INTERNAL_SYSCALL_ERRNO (result_var, _sc_err)); \
58 result_var = -1L; \
59 } \
60 result_var; })
61
62 #undef INTERNAL_SYSCALL_DECL
63 #define INTERNAL_SYSCALL_DECL(err) long err __attribute__ ((unused))
64
65 #undef INTERNAL_SYSCALL_ERROR_P
66 #define INTERNAL_SYSCALL_ERROR_P(val, err) ((void) (val), (long) (err))
67
68 #undef INTERNAL_SYSCALL_ERRNO
69 #define INTERNAL_SYSCALL_ERRNO(val, err) ((void) (err), val)
70
71 /* Note that the original Linux syscall restart convention required the
72 instruction immediately preceding SYSCALL to initialize $v0 with the
73 syscall number. Then if a restart triggered, $v0 would have been
74 clobbered by the syscall interrupted, and needed to be reinititalized.
75 The kernel would decrement the PC by 4 before switching back to the
76 user mode so that $v0 had been reloaded before SYSCALL was executed
77 again. This implied the place $v0 was loaded from must have been
78 preserved across a syscall, e.g. an immediate, static register, stack
79 slot, etc.
80
81 The convention was relaxed in Linux with a change applied to the kernel
82 GIT repository as commit 96187fb0bc30cd7919759d371d810e928048249d, that
83 first appeared in the 2.6.36 release. Since then the kernel has had
84 code that reloads $v0 upon syscall restart and resumes right at the
85 SYSCALL instruction, so no special arrangement is needed anymore.
86
87 For backwards compatibility with existing kernel binaries we support
88 the old convention by choosing the instruction preceding SYSCALL
89 carefully. This also means we have to force a 32-bit encoding of the
90 microMIPS MOVE instruction if one is used. */
91
92 #ifdef __mips_micromips
93 # define MOVE32 "move32"
94 #else
95 # define MOVE32 "move"
96 #endif
97
98 #undef INTERNAL_SYSCALL
99 #undef INTERNAL_SYSCALL_NCS
100
101 #ifdef __mips16
102 /* There's no MIPS16 syscall instruction, so we go through out-of-line
103 standard MIPS wrappers. These do use inline snippets below though,
104 through INTERNAL_SYSCALL_MIPS16. Spilling the syscall number to
105 memory gives the best code in that case, avoiding the need to save
106 and restore a static register. */
107
108 # include <mips16-syscall.h>
109
110 # define INTERNAL_SYSCALL(name, err, nr, args...) \
111 INTERNAL_SYSCALL_NCS (SYS_ify (name), err, nr, args)
112
113 # define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \
114 ({ \
115 union __mips16_syscall_return _sc_ret; \
116 _sc_ret.val = __mips16_syscall##nr (args, number); \
117 err = _sc_ret.reg.v1; \
118 _sc_ret.reg.v0; \
119 })
120
121 # define INTERNAL_SYSCALL_MIPS16(number, err, nr, args...) \
122 internal_syscall##nr ("lw\t%0, %2\n\t", \
123 "R" (number), \
124 0, err, args)
125
126 #else /* !__mips16 */
127 # define INTERNAL_SYSCALL(name, err, nr, args...) \
128 internal_syscall##nr ("li\t%0, %2\t\t\t# " #name "\n\t", \
129 "IK" (SYS_ify (name)), \
130 0, err, args)
131
132 # define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \
133 internal_syscall##nr (MOVE32 "\t%0, %2\n\t", \
134 "r" (__s0), \
135 number, err, args)
136
137 #endif /* !__mips16 */
138
139 #define internal_syscall0(v0_init, input, number, err, dummy...) \
140 ({ \
141 long _sys_result; \
142 \
143 { \
144 register long __s0 asm ("$16") __attribute__ ((unused)) \
145 = (number); \
146 register long __v0 asm ("$2"); \
147 register long __a3 asm ("$7"); \
148 __asm__ volatile ( \
149 ".set\tnoreorder\n\t" \
150 v0_init \
151 "syscall\n\t" \
152 ".set reorder" \
153 : "=r" (__v0), "=r" (__a3) \
154 : input \
155 : __SYSCALL_CLOBBERS); \
156 err = __a3; \
157 _sys_result = __v0; \
158 } \
159 _sys_result; \
160 })
161
162 #define internal_syscall1(v0_init, input, number, err, arg1) \
163 ({ \
164 long _sys_result; \
165 \
166 { \
167 register long __s0 asm ("$16") __attribute__ ((unused)) \
168 = (number); \
169 register long __v0 asm ("$2"); \
170 register long __a0 asm ("$4") = (long) (arg1); \
171 register long __a3 asm ("$7"); \
172 __asm__ volatile ( \
173 ".set\tnoreorder\n\t" \
174 v0_init \
175 "syscall\n\t" \
176 ".set reorder" \
177 : "=r" (__v0), "=r" (__a3) \
178 : input, "r" (__a0) \
179 : __SYSCALL_CLOBBERS); \
180 err = __a3; \
181 _sys_result = __v0; \
182 } \
183 _sys_result; \
184 })
185
186 #define internal_syscall2(v0_init, input, number, err, arg1, arg2) \
187 ({ \
188 long _sys_result; \
189 \
190 { \
191 register long __s0 asm ("$16") __attribute__ ((unused)) \
192 = (number); \
193 register long __v0 asm ("$2"); \
194 register long __a0 asm ("$4") = (long) (arg1); \
195 register long __a1 asm ("$5") = (long) (arg2); \
196 register long __a3 asm ("$7"); \
197 __asm__ volatile ( \
198 ".set\tnoreorder\n\t" \
199 v0_init \
200 "syscall\n\t" \
201 ".set\treorder" \
202 : "=r" (__v0), "=r" (__a3) \
203 : input, "r" (__a0), "r" (__a1) \
204 : __SYSCALL_CLOBBERS); \
205 err = __a3; \
206 _sys_result = __v0; \
207 } \
208 _sys_result; \
209 })
210
211 #define internal_syscall3(v0_init, input, number, err, \
212 arg1, arg2, arg3) \
213 ({ \
214 long _sys_result; \
215 \
216 { \
217 register long __s0 asm ("$16") __attribute__ ((unused)) \
218 = (number); \
219 register long __v0 asm ("$2"); \
220 register long __a0 asm ("$4") = (long) (arg1); \
221 register long __a1 asm ("$5") = (long) (arg2); \
222 register long __a2 asm ("$6") = (long) (arg3); \
223 register long __a3 asm ("$7"); \
224 __asm__ volatile ( \
225 ".set\tnoreorder\n\t" \
226 v0_init \
227 "syscall\n\t" \
228 ".set\treorder" \
229 : "=r" (__v0), "=r" (__a3) \
230 : input, "r" (__a0), "r" (__a1), "r" (__a2) \
231 : __SYSCALL_CLOBBERS); \
232 err = __a3; \
233 _sys_result = __v0; \
234 } \
235 _sys_result; \
236 })
237
238 #define internal_syscall4(v0_init, input, number, err, \
239 arg1, arg2, arg3, arg4) \
240 ({ \
241 long _sys_result; \
242 \
243 { \
244 register long __s0 asm ("$16") __attribute__ ((unused)) \
245 = (number); \
246 register long __v0 asm ("$2"); \
247 register long __a0 asm ("$4") = (long) (arg1); \
248 register long __a1 asm ("$5") = (long) (arg2); \
249 register long __a2 asm ("$6") = (long) (arg3); \
250 register long __a3 asm ("$7") = (long) (arg4); \
251 __asm__ volatile ( \
252 ".set\tnoreorder\n\t" \
253 v0_init \
254 "syscall\n\t" \
255 ".set\treorder" \
256 : "=r" (__v0), "+r" (__a3) \
257 : input, "r" (__a0), "r" (__a1), "r" (__a2) \
258 : __SYSCALL_CLOBBERS); \
259 err = __a3; \
260 _sys_result = __v0; \
261 } \
262 _sys_result; \
263 })
264
265 /* We need to use a frame pointer for the functions in which we
266 adjust $sp around the syscall, or debug information and unwind
267 information will be $sp relative and thus wrong during the syscall. As
268 of GCC 4.7, this is sufficient. */
269 #define FORCE_FRAME_POINTER \
270 void *volatile __fp_force __attribute__ ((unused)) = alloca (4)
271
272 #define internal_syscall5(v0_init, input, number, err, \
273 arg1, arg2, arg3, arg4, arg5) \
274 ({ \
275 long _sys_result; \
276 \
277 FORCE_FRAME_POINTER; \
278 { \
279 register long __s0 asm ("$16") __attribute__ ((unused)) \
280 = (number); \
281 register long __v0 asm ("$2"); \
282 register long __a0 asm ("$4") = (long) (arg1); \
283 register long __a1 asm ("$5") = (long) (arg2); \
284 register long __a2 asm ("$6") = (long) (arg3); \
285 register long __a3 asm ("$7") = (long) (arg4); \
286 __asm__ volatile ( \
287 ".set\tnoreorder\n\t" \
288 "subu\t$29, 32\n\t" \
289 "sw\t%6, 16($29)\n\t" \
290 v0_init \
291 "syscall\n\t" \
292 "addiu\t$29, 32\n\t" \
293 ".set\treorder" \
294 : "=r" (__v0), "+r" (__a3) \
295 : input, "r" (__a0), "r" (__a1), "r" (__a2), \
296 "r" ((long) (arg5)) \
297 : __SYSCALL_CLOBBERS); \
298 err = __a3; \
299 _sys_result = __v0; \
300 } \
301 _sys_result; \
302 })
303
304 #define internal_syscall6(v0_init, input, number, err, \
305 arg1, arg2, arg3, arg4, arg5, arg6) \
306 ({ \
307 long _sys_result; \
308 \
309 FORCE_FRAME_POINTER; \
310 { \
311 register long __s0 asm ("$16") __attribute__ ((unused)) \
312 = (number); \
313 register long __v0 asm ("$2"); \
314 register long __a0 asm ("$4") = (long) (arg1); \
315 register long __a1 asm ("$5") = (long) (arg2); \
316 register long __a2 asm ("$6") = (long) (arg3); \
317 register long __a3 asm ("$7") = (long) (arg4); \
318 __asm__ volatile ( \
319 ".set\tnoreorder\n\t" \
320 "subu\t$29, 32\n\t" \
321 "sw\t%6, 16($29)\n\t" \
322 "sw\t%7, 20($29)\n\t" \
323 v0_init \
324 "syscall\n\t" \
325 "addiu\t$29, 32\n\t" \
326 ".set\treorder" \
327 : "=r" (__v0), "+r" (__a3) \
328 : input, "r" (__a0), "r" (__a1), "r" (__a2), \
329 "r" ((long) (arg5)), "r" ((long) (arg6)) \
330 : __SYSCALL_CLOBBERS); \
331 err = __a3; \
332 _sys_result = __v0; \
333 } \
334 _sys_result; \
335 })
336
337 #define internal_syscall7(v0_init, input, number, err, \
338 arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
339 ({ \
340 long _sys_result; \
341 \
342 FORCE_FRAME_POINTER; \
343 { \
344 register long __s0 asm ("$16") __attribute__ ((unused)) \
345 = (number); \
346 register long __v0 asm ("$2"); \
347 register long __a0 asm ("$4") = (long) (arg1); \
348 register long __a1 asm ("$5") = (long) (arg2); \
349 register long __a2 asm ("$6") = (long) (arg3); \
350 register long __a3 asm ("$7") = (long) (arg4); \
351 __asm__ volatile ( \
352 ".set\tnoreorder\n\t" \
353 "subu\t$29, 32\n\t" \
354 "sw\t%6, 16($29)\n\t" \
355 "sw\t%7, 20($29)\n\t" \
356 "sw\t%8, 24($29)\n\t" \
357 v0_init \
358 "syscall\n\t" \
359 "addiu\t$29, 32\n\t" \
360 ".set\treorder" \
361 : "=r" (__v0), "+r" (__a3) \
362 : input, "r" (__a0), "r" (__a1), "r" (__a2), \
363 "r" ((long) (arg5)), "r" ((long) (arg6)), "r" ((long) (arg7)) \
364 : __SYSCALL_CLOBBERS); \
365 err = __a3; \
366 _sys_result = __v0; \
367 } \
368 _sys_result; \
369 })
370
371 #define __SYSCALL_CLOBBERS "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", \
372 "$14", "$15", "$24", "$25", "hi", "lo", "memory"
373
374 /* Standard MIPS syscalls have an error flag, and return a positive errno
375 when the error flag is set. Emulate this behaviour for vsyscalls so that
376 the INTERNAL_SYSCALL_{ERROR_P,ERRNO} macros work correctly. */
377 #define INTERNAL_VSYSCALL_CALL(funcptr, err, nr, args...) \
378 ({ \
379 long _ret = funcptr (args); \
380 err = ((unsigned long) (_ret) >= (unsigned long) -4095L); \
381 if (err) \
382 _ret = -_ret; \
383 _ret; \
384 })
385
386 /* List of system calls which are supported as vsyscalls. */
387 #define HAVE_CLOCK_GETTIME_VSYSCALL 1
388 #define HAVE_GETTIMEOFDAY_VSYSCALL 1
389
390 #endif /* __ASSEMBLER__ */
391
392 /* Pointer mangling is not yet supported for MIPS. */
393 #define PTR_MANGLE(var) (void) (var)
394 #define PTR_DEMANGLE(var) (void) (var)
395
396 #endif /* linux/mips/mips32/sysdep.h */