1 /* Copyright (C) 1992-2013 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Brendan Kehoe (brendan@zen.org).
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library. If not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <sysdeps/unix/sysdep.h>
24 # include <alpha/regdef.h>
30 # include <dl-sysdep.h> /* Defines RTLD_PRIVATE_ERRNO. */
34 #define __LABEL(x) x##:
36 #define LEAF(name, framesize) \
41 .frame sp, framesize, ra
50 /* Mark the end of function SYM. */
52 #define END(sym) .end sym
55 # define PSEUDO_PROF \
58 jsr AT, (AT), _mcount; \
65 # define PSEUDO_PROLOGUE \
71 # define PSEUDO_PROLOGUE \
75 # define PSEUDO_PROLOGUE \
82 # define USEPV_PROF std
84 # define USEPV_PROF no
87 #if RTLD_PRIVATE_ERRNO
88 # define SYSCALL_ERROR_LABEL $syscall_error
89 # define SYSCALL_ERROR_HANDLER \
91 stl v0, rtld_errno(gp) !gprel; \
94 # define SYSCALL_ERROR_FALLTHRU
96 # define SYSCALL_ERROR_LABEL __syscall_error !samegp
97 # define SYSCALL_ERROR_HANDLER
98 # define SYSCALL_ERROR_FALLTHRU br SYSCALL_ERROR_LABEL
100 # define SYSCALL_ERROR_LABEL $syscall_error
101 # define SYSCALL_ERROR_HANDLER \
103 jmp $31, __syscall_error
104 # define SYSCALL_ERROR_FALLTHRU
105 #endif /* RTLD_PRIVATE_ERRNO */
107 /* Overridden by specific syscalls. */
108 #undef PSEUDO_PREPARE_ARGS
109 #define PSEUDO_PREPARE_ARGS /* Nothing. */
111 #define PSEUDO(name, syscall_name, args) \
117 PSEUDO_PREPARE_ARGS \
118 lda v0, SYS_ify(syscall_name); \
119 call_pal PAL_callsys; \
120 bne a3, SYSCALL_ERROR_LABEL
123 #define PSEUDO_END(sym) \
124 SYSCALL_ERROR_HANDLER; \
127 #define PSEUDO_NOERRNO(name, syscall_name, args) \
133 PSEUDO_PREPARE_ARGS \
134 lda v0, SYS_ify(syscall_name); \
135 call_pal PAL_callsys;
137 #undef PSEUDO_END_NOERRNO
138 #define PSEUDO_END_NOERRNO(sym) END(sym)
140 #define ret_NOERRNO ret
142 #define PSEUDO_ERRVAL(name, syscall_name, args) \
148 PSEUDO_PREPARE_ARGS \
149 lda v0, SYS_ify(syscall_name); \
150 call_pal PAL_callsys;
152 #undef PSEUDO_END_ERRVAL
153 #define PSEUDO_END_ERRVAL(sym) END(sym)
155 #define ret_ERRVAL ret
160 #define MOVE(x,y) mov x,y
162 #else /* !ASSEMBLER */
164 /* ??? Linux needs to be able to override INLINE_SYSCALL for one
165 particular special case. Make this easy. */
167 #undef INLINE_SYSCALL
168 #define INLINE_SYSCALL(name, nr, args...) \
169 INLINE_SYSCALL1(name, nr, args)
171 #define INLINE_SYSCALL1(name, nr, args...) \
173 long _sc_ret, _sc_err; \
174 inline_syscall##nr(__NR_##name, args); \
175 if (__builtin_expect (_sc_err, 0)) \
177 __set_errno (_sc_ret); \
183 #define INTERNAL_SYSCALL(name, err_out, nr, args...) \
184 INTERNAL_SYSCALL1(name, err_out, nr, args)
186 #define INTERNAL_SYSCALL1(name, err_out, nr, args...) \
187 INTERNAL_SYSCALL_NCS(__NR_##name, err_out, nr, args)
189 #define INTERNAL_SYSCALL_NCS(name, err_out, nr, args...) \
191 long _sc_ret, _sc_err; \
192 inline_syscall##nr(name, args); \
197 #define INTERNAL_SYSCALL_DECL(err) \
198 long int err __attribute__((unused))
200 /* The normal Alpha calling convention sign-extends 32-bit quantties
201 no matter what the "real" sign of the 32-bit type. We want to
202 preserve that when filling in values for the kernel. */
203 #define syscall_promote(arg) \
204 (sizeof(arg) == 4 ? (long)(int)(long)(arg) : (long)(arg))
206 /* Make sure and "use" the variable that we're not returning,
207 in order to suppress unused variable warnings. */
208 #define INTERNAL_SYSCALL_ERROR_P(val, err) ((void)val, err)
209 #define INTERNAL_SYSCALL_ERRNO(val, err) ((void)err, val)
211 #define inline_syscall_clobbers \
212 "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", \
213 "$22", "$23", "$24", "$25", "$27", "$28", "memory"
215 /* It is moderately important optimization-wise to limit the lifetime
216 of the hard-register variables as much as possible. Thus we copy
217 in/out as close to the asm as possible. */
219 #define inline_syscall0(name, args...) \
221 register long _sc_19 __asm__("$19"); \
222 register long _sc_0 = name; \
223 __asm__ __volatile__ \
224 ("callsys # %0 %1 <= %2" \
225 : "+v"(_sc_0), "=r"(_sc_19) \
226 : : inline_syscall_clobbers, \
227 "$16", "$17", "$18", "$20", "$21"); \
228 _sc_ret = _sc_0, _sc_err = _sc_19; \
231 #define inline_syscall1(name,arg1) \
233 register long _tmp_16 = syscall_promote (arg1); \
234 register long _sc_0 = name; \
235 register long _sc_16 __asm__("$16") = _tmp_16; \
236 register long _sc_19 __asm__("$19"); \
237 __asm__ __volatile__ \
238 ("callsys # %0 %1 <= %2 %3" \
239 : "+v"(_sc_0), "=r"(_sc_19), "+r"(_sc_16) \
240 : : inline_syscall_clobbers, \
241 "$17", "$18", "$20", "$21"); \
242 _sc_ret = _sc_0, _sc_err = _sc_19; \
245 #define inline_syscall2(name,arg1,arg2) \
247 register long _tmp_16 = syscall_promote (arg1); \
248 register long _tmp_17 = syscall_promote (arg2); \
249 register long _sc_0 = name; \
250 register long _sc_16 __asm__("$16") = _tmp_16; \
251 register long _sc_17 __asm__("$17") = _tmp_17; \
252 register long _sc_19 __asm__("$19"); \
253 __asm__ __volatile__ \
254 ("callsys # %0 %1 <= %2 %3 %4" \
255 : "+v"(_sc_0), "=r"(_sc_19), \
256 "+r"(_sc_16), "+r"(_sc_17) \
257 : : inline_syscall_clobbers, \
258 "$18", "$20", "$21"); \
259 _sc_ret = _sc_0, _sc_err = _sc_19; \
262 #define inline_syscall3(name,arg1,arg2,arg3) \
264 register long _tmp_16 = syscall_promote (arg1); \
265 register long _tmp_17 = syscall_promote (arg2); \
266 register long _tmp_18 = syscall_promote (arg3); \
267 register long _sc_0 = name; \
268 register long _sc_16 __asm__("$16") = _tmp_16; \
269 register long _sc_17 __asm__("$17") = _tmp_17; \
270 register long _sc_18 __asm__("$18") = _tmp_18; \
271 register long _sc_19 __asm__("$19"); \
272 __asm__ __volatile__ \
273 ("callsys # %0 %1 <= %2 %3 %4 %5" \
274 : "+v"(_sc_0), "=r"(_sc_19), "+r"(_sc_16), \
275 "+r"(_sc_17), "+r"(_sc_18) \
276 : : inline_syscall_clobbers, "$20", "$21"); \
277 _sc_ret = _sc_0, _sc_err = _sc_19; \
280 #define inline_syscall4(name,arg1,arg2,arg3,arg4) \
282 register long _tmp_16 = syscall_promote (arg1); \
283 register long _tmp_17 = syscall_promote (arg2); \
284 register long _tmp_18 = syscall_promote (arg3); \
285 register long _tmp_19 = syscall_promote (arg4); \
286 register long _sc_0 = name; \
287 register long _sc_16 __asm__("$16") = _tmp_16; \
288 register long _sc_17 __asm__("$17") = _tmp_17; \
289 register long _sc_18 __asm__("$18") = _tmp_18; \
290 register long _sc_19 __asm__("$19") = _tmp_19; \
291 __asm__ __volatile__ \
292 ("callsys # %0 %1 <= %2 %3 %4 %5 %6" \
293 : "+v"(_sc_0), "+r"(_sc_19), "+r"(_sc_16), \
294 "+r"(_sc_17), "+r"(_sc_18) \
295 : : inline_syscall_clobbers, "$20", "$21"); \
296 _sc_ret = _sc_0, _sc_err = _sc_19; \
299 #define inline_syscall5(name,arg1,arg2,arg3,arg4,arg5) \
301 register long _tmp_16 = syscall_promote (arg1); \
302 register long _tmp_17 = syscall_promote (arg2); \
303 register long _tmp_18 = syscall_promote (arg3); \
304 register long _tmp_19 = syscall_promote (arg4); \
305 register long _tmp_20 = syscall_promote (arg5); \
306 register long _sc_0 = name; \
307 register long _sc_16 __asm__("$16") = _tmp_16; \
308 register long _sc_17 __asm__("$17") = _tmp_17; \
309 register long _sc_18 __asm__("$18") = _tmp_18; \
310 register long _sc_19 __asm__("$19") = _tmp_19; \
311 register long _sc_20 __asm__("$20") = _tmp_20; \
312 __asm__ __volatile__ \
313 ("callsys # %0 %1 <= %2 %3 %4 %5 %6 %7" \
314 : "+v"(_sc_0), "+r"(_sc_19), "+r"(_sc_16), \
315 "+r"(_sc_17), "+r"(_sc_18), "+r"(_sc_20) \
316 : : inline_syscall_clobbers, "$21"); \
317 _sc_ret = _sc_0, _sc_err = _sc_19; \
320 #define inline_syscall6(name,arg1,arg2,arg3,arg4,arg5,arg6) \
322 register long _tmp_16 = syscall_promote (arg1); \
323 register long _tmp_17 = syscall_promote (arg2); \
324 register long _tmp_18 = syscall_promote (arg3); \
325 register long _tmp_19 = syscall_promote (arg4); \
326 register long _tmp_20 = syscall_promote (arg5); \
327 register long _tmp_21 = syscall_promote (arg6); \
328 register long _sc_0 = name; \
329 register long _sc_16 __asm__("$16") = _tmp_16; \
330 register long _sc_17 __asm__("$17") = _tmp_17; \
331 register long _sc_18 __asm__("$18") = _tmp_18; \
332 register long _sc_19 __asm__("$19") = _tmp_19; \
333 register long _sc_20 __asm__("$20") = _tmp_20; \
334 register long _sc_21 __asm__("$21") = _tmp_21; \
335 __asm__ __volatile__ \
336 ("callsys # %0 %1 <= %2 %3 %4 %5 %6 %7 %8" \
337 : "+v"(_sc_0), "+r"(_sc_19), "+r"(_sc_16), \
338 "+r"(_sc_17), "+r"(_sc_18), "+r"(_sc_20), \
340 : : inline_syscall_clobbers); \
341 _sc_ret = _sc_0, _sc_err = _sc_19; \
344 /* Pointer mangling support. Note that tls access is slow enough that
345 we don't deoptimize things by placing the pointer check value there. */
349 #if defined NOT_IN_libc && defined IS_IN_rtld
350 # ifdef __ASSEMBLER__
351 # define PTR_MANGLE(dst, src, tmp) \
352 ldah tmp, __pointer_chk_guard_local($29) !gprelhigh; \
353 ldq tmp, __pointer_chk_guard_local(tmp) !gprellow; \
355 # define PTR_MANGLE2(dst, src, tmp) \
357 # define PTR_DEMANGLE(dst, tmp) PTR_MANGLE(dst, dst, tmp)
358 # define PTR_DEMANGLE2(dst, tmp) PTR_MANGLE2(dst, dst, tmp)
360 extern uintptr_t __pointer_chk_guard_local attribute_relro attribute_hidden
;
361 # define PTR_MANGLE(var) \
362 (var) = (__typeof (var)) ((uintptr_t) (var) ^ __pointer_chk_guard_local)
363 # define PTR_DEMANGLE(var) PTR_MANGLE(var)
366 # ifdef __ASSEMBLER__
367 # define PTR_MANGLE(dst, src, tmp) \
368 ldq tmp, __pointer_chk_guard; \
370 # define PTR_MANGLE2(dst, src, tmp) \
372 # define PTR_DEMANGLE(dst, tmp) PTR_MANGLE(dst, dst, tmp)
373 # define PTR_DEMANGLE2(dst, tmp) PTR_MANGLE2(dst, dst, tmp)
375 extern const uintptr_t __pointer_chk_guard attribute_relro
;
376 # define PTR_MANGLE(var) \
377 (var) = (__typeof(var)) ((uintptr_t) (var) ^ __pointer_chk_guard)
378 # define PTR_DEMANGLE(var) PTR_MANGLE(var)
381 /* There exists generic C code that assumes that PTR_MANGLE is always
382 defined. When generating code for the static libc, we don't have
383 __pointer_chk_guard defined. Nor is there any place that would
384 initialize it if it were defined, so there's little point in doing
385 anything more than nothing. */
386 # ifndef __ASSEMBLER__
387 # define PTR_MANGLE(var)
388 # define PTR_DEMANGLE(var)
392 #endif /* ASSEMBLER */