]> git.ipfire.org Git - thirdparty/gcc.git/blame - libsanitizer/sanitizer_common/sanitizer_syscall_linux_riscv64.inc
Correct a function pre/postcondition [PR102403].
[thirdparty/gcc.git] / libsanitizer / sanitizer_common / sanitizer_syscall_linux_riscv64.inc
CommitLineData
0b997f6e
ML
1//===-- sanitizer_syscall_linux_riscv64.inc ---------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Implementations of internal_syscall and internal_iserror for Linux/riscv64.
10//
11//===----------------------------------------------------------------------===//
12
13// About local register variables:
14// https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables
15//
16// Kernel ABI...
17// To my surprise I haven't found much information regarding it.
18// Kernel source and internet browsing shows that:
19// syscall number is passed in a7
20// (http://man7.org/linux/man-pages/man2/syscall.2.html) results are return in
21// a0 and a1 (http://man7.org/linux/man-pages/man2/syscall.2.html) arguments
22// are passed in: a0-a7 (see below)
23//
24// Regarding the arguments. The only "documentation" I could find is
25// this comment (!!!) by Bruce Hold on google forums (!!!):
26// https://groups.google.com/a/groups.riscv.org/forum/#!topic/sw-dev/exbrzM3GZDQ
27// Confirmed by inspecting glibc sources.
28// Great way to document things.
29#define SYSCALL(name) __NR_##name
30
31#define INTERNAL_SYSCALL_CLOBBERS "memory"
32
33static uptr __internal_syscall(u64 nr) {
34 register u64 a7 asm("a7") = nr;
35 register u64 a0 asm("a0");
36 __asm__ volatile("ecall\n\t"
37 : "=r"(a0)
38 : "r"(a7)
39 : INTERNAL_SYSCALL_CLOBBERS);
40 return a0;
41}
42#define __internal_syscall0(n) (__internal_syscall)(n)
43
44static uptr __internal_syscall(u64 nr, u64 arg1) {
45 register u64 a7 asm("a7") = nr;
46 register u64 a0 asm("a0") = arg1;
47 __asm__ volatile("ecall\n\t"
48 : "+r"(a0)
49 : "r"(a7)
50 : INTERNAL_SYSCALL_CLOBBERS);
51 return a0;
52}
53#define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1))
54
55static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
56 register u64 a7 asm("a7") = nr;
57 register u64 a0 asm("a0") = arg1;
58 register u64 a1 asm("a1") = arg2;
59 __asm__ volatile("ecall\n\t"
60 : "+r"(a0)
61 : "r"(a7), "r"(a1)
62 : INTERNAL_SYSCALL_CLOBBERS);
63 return a0;
64}
65#define __internal_syscall2(n, a1, a2) \
66 (__internal_syscall)(n, (u64)(a1), (long)(a2))
67
68static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
69 register u64 a7 asm("a7") = nr;
70 register u64 a0 asm("a0") = arg1;
71 register u64 a1 asm("a1") = arg2;
72 register u64 a2 asm("a2") = arg3;
73 __asm__ volatile("ecall\n\t"
74 : "+r"(a0)
75 : "r"(a7), "r"(a1), "r"(a2)
76 : INTERNAL_SYSCALL_CLOBBERS);
77 return a0;
78}
79#define __internal_syscall3(n, a1, a2, a3) \
80 (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))
81
82static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
83 u64 arg4) {
84 register u64 a7 asm("a7") = nr;
85 register u64 a0 asm("a0") = arg1;
86 register u64 a1 asm("a1") = arg2;
87 register u64 a2 asm("a2") = arg3;
88 register u64 a3 asm("a3") = arg4;
89 __asm__ volatile("ecall\n\t"
90 : "+r"(a0)
91 : "r"(a7), "r"(a1), "r"(a2), "r"(a3)
92 : INTERNAL_SYSCALL_CLOBBERS);
93 return a0;
94}
95#define __internal_syscall4(n, a1, a2, a3, a4) \
96 (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))
97
98static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
99 long arg5) {
100 register u64 a7 asm("a7") = nr;
101 register u64 a0 asm("a0") = arg1;
102 register u64 a1 asm("a1") = arg2;
103 register u64 a2 asm("a2") = arg3;
104 register u64 a3 asm("a3") = arg4;
105 register u64 a4 asm("a4") = arg5;
106 __asm__ volatile("ecall\n\t"
107 : "+r"(a0)
108 : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4)
109 : INTERNAL_SYSCALL_CLOBBERS);
110 return a0;
111}
112#define __internal_syscall5(n, a1, a2, a3, a4, a5) \
113 (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
114 (u64)(a5))
115
116static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
117 long arg5, long arg6) {
118 register u64 a7 asm("a7") = nr;
119 register u64 a0 asm("a0") = arg1;
120 register u64 a1 asm("a1") = arg2;
121 register u64 a2 asm("a2") = arg3;
122 register u64 a3 asm("a3") = arg4;
123 register u64 a4 asm("a4") = arg5;
124 register u64 a5 asm("a5") = arg6;
125 __asm__ volatile("ecall\n\t"
126 : "+r"(a0)
127 : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5)
128 : INTERNAL_SYSCALL_CLOBBERS);
129 return a0;
130}
131#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
132 (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
133 (u64)(a5), (long)(a6))
134
135static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
136 long arg5, long arg6, long arg7) {
137 register u64 a7 asm("a7") = nr;
138 register u64 a0 asm("a0") = arg1;
139 register u64 a1 asm("a1") = arg2;
140 register u64 a2 asm("a2") = arg3;
141 register u64 a3 asm("a3") = arg4;
142 register u64 a4 asm("a4") = arg5;
143 register u64 a5 asm("a5") = arg6;
144 register u64 a6 asm("a6") = arg7;
145 __asm__ volatile("ecall\n\t"
146 : "+r"(a0)
147 : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
148 "r"(a6)
149 : INTERNAL_SYSCALL_CLOBBERS);
150 return a0;
151}
152#define __internal_syscall7(n, a1, a2, a3, a4, a5, a6, a7) \
153 (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
154 (u64)(a5), (long)(a6), (long)(a7))
155
156#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
157#define __SYSCALL_NARGS(...) \
158 __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
159#define __SYSCALL_CONCAT_X(a, b) a##b
160#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
161#define __SYSCALL_DISP(b, ...) \
162 __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
163
164#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
165
166// Helper function used to avoid clobbering of errno.
167bool internal_iserror(uptr retval, int *rverrno) {
168 if (retval >= (uptr)-4095) {
169 if (rverrno)
170 *rverrno = -retval;
171 return true;
172 }
173 return false;
174}