]> git.ipfire.org Git - thirdparty/gcc.git/blame - libsanitizer/sanitizer_common/sanitizer_syscall_linux_arm.inc
Libsanitizer: merge from trunk with merge.sh.
[thirdparty/gcc.git] / libsanitizer / sanitizer_common / sanitizer_syscall_linux_arm.inc
CommitLineData
eac97531
ML
1//===-- sanitizer_syscall_linux_arm.inc -------------------------*- C++ -*-===//
2//
b667dd70
ML
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
eac97531
ML
6//
7//===----------------------------------------------------------------------===//
8//
9// Implementations of internal_syscall and internal_iserror for Linux/arm.
10//
11//===----------------------------------------------------------------------===//
12
13#define SYSCALL(name) __NR_ ## name
14
15static uptr __internal_syscall(u32 nr) {
16 register u32 r8 asm("r7") = nr;
17 register u32 r0 asm("r0");
18 asm volatile("swi #0"
19 : "=r"(r0)
20 : "r"(r8)
21 : "memory", "cc");
22 return r0;
23}
24#define __internal_syscall0(n) \
25 (__internal_syscall)(n)
26
27static uptr __internal_syscall(u32 nr, u32 arg1) {
28 register u32 r8 asm("r7") = nr;
29 register u32 r0 asm("r0") = arg1;
30 asm volatile("swi #0"
31 : "=r"(r0)
32 : "r"(r8), "0"(r0)
33 : "memory", "cc");
34 return r0;
35}
36#define __internal_syscall1(n, a1) \
37 (__internal_syscall)(n, (u32)(a1))
38
39static uptr __internal_syscall(u32 nr, u32 arg1, long arg2) {
40 register u32 r8 asm("r7") = nr;
41 register u32 r0 asm("r0") = arg1;
42 register u32 r1 asm("r1") = arg2;
43 asm volatile("swi #0"
44 : "=r"(r0)
45 : "r"(r8), "0"(r0), "r"(r1)
46 : "memory", "cc");
47 return r0;
48}
49#define __internal_syscall2(n, a1, a2) \
50 (__internal_syscall)(n, (u32)(a1), (long)(a2))
51
52static uptr __internal_syscall(u32 nr, u32 arg1, long arg2, long arg3) {
53 register u32 r8 asm("r7") = nr;
54 register u32 r0 asm("r0") = arg1;
55 register u32 r1 asm("r1") = arg2;
56 register u32 r2 asm("r2") = arg3;
57 asm volatile("swi #0"
58 : "=r"(r0)
59 : "r"(r8), "0"(r0), "r"(r1), "r"(r2)
60 : "memory", "cc");
61 return r0;
62}
63#define __internal_syscall3(n, a1, a2, a3) \
64 (__internal_syscall)(n, (u32)(a1), (long)(a2), (long)(a3))
65
66static uptr __internal_syscall(u32 nr, u32 arg1, long arg2, long arg3,
67 u32 arg4) {
68 register u32 r8 asm("r7") = nr;
69 register u32 r0 asm("r0") = arg1;
70 register u32 r1 asm("r1") = arg2;
71 register u32 r2 asm("r2") = arg3;
72 register u32 r3 asm("r3") = arg4;
73 asm volatile("swi #0"
74 : "=r"(r0)
75 : "r"(r8), "0"(r0), "r"(r1), "r"(r2), "r"(r3)
76 : "memory", "cc");
77 return r0;
78}
79#define __internal_syscall4(n, a1, a2, a3, a4) \
80 (__internal_syscall)(n, (u32)(a1), (long)(a2), (long)(a3), (long)(a4))
81
82static uptr __internal_syscall(u32 nr, u32 arg1, long arg2, long arg3,
83 u32 arg4, long arg5) {
84 register u32 r8 asm("r7") = nr;
85 register u32 r0 asm("r0") = arg1;
86 register u32 r1 asm("r1") = arg2;
87 register u32 r2 asm("r2") = arg3;
88 register u32 r3 asm("r3") = arg4;
89 register u32 r4 asm("r4") = arg5;
90 asm volatile("swi #0"
91 : "=r"(r0)
92 : "r"(r8), "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4)
93 : "memory", "cc");
94 return r0;
95}
96#define __internal_syscall5(n, a1, a2, a3, a4, a5) \
97 (__internal_syscall)(n, (u32)(a1), (long)(a2), (long)(a3), (long)(a4), \
98 (u32)(a5))
99
100static uptr __internal_syscall(u32 nr, u32 arg1, long arg2, long arg3,
101 u32 arg4, long arg5, long arg6) {
102 register u32 r8 asm("r7") = nr;
103 register u32 r0 asm("r0") = arg1;
104 register u32 r1 asm("r1") = arg2;
105 register u32 r2 asm("r2") = arg3;
106 register u32 r3 asm("r3") = arg4;
107 register u32 r4 asm("r4") = arg5;
108 register u32 r5 asm("r5") = arg6;
109 asm volatile("swi #0"
110 : "=r"(r0)
111 : "r"(r8), "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r5)
112 : "memory", "cc");
113 return r0;
114}
115#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
116 (__internal_syscall)(n, (u32)(a1), (long)(a2), (long)(a3), (long)(a4), \
117 (u32)(a5), (long)(a6))
118
119#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
120#define __SYSCALL_NARGS(...) \
121 __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
122#define __SYSCALL_CONCAT_X(a, b) a##b
123#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
124#define __SYSCALL_DISP(b, ...) \
125 __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
126
127#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
128
129// Helper function used to avoid cobbler errno.
130bool internal_iserror(uptr retval, int *rverrno) {
131 if (retval >= (uptr)-4095) {
132 if (rverrno)
133 *rverrno = -retval;
134 return true;
135 }
136 return false;
137}