]>
Commit | Line | Data |
---|---|---|
ef1b3fda KS |
1 | //===-- sanitizer_stoptheworld_linux_libcdep.cc ---------------------------===// |
2 | // | |
3 | // This file is distributed under the University of Illinois Open Source | |
4 | // License. See LICENSE.TXT for details. | |
5 | // | |
6 | //===----------------------------------------------------------------------===// | |
7 | // | |
8 | // See sanitizer_stoptheworld.h for details. | |
9 | // This implementation was inspired by Markus Gutschke's linuxthreads.cc. | |
10 | // | |
11 | //===----------------------------------------------------------------------===// | |
12 | ||
ef1b3fda | 13 | #include "sanitizer_platform.h" |
696d846a MO |
14 | |
15 | #if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) || \ | |
10189819 | 16 | defined(__aarch64__) || defined(__powerpc64__) || \ |
5d3805fc JJ |
17 | defined(__s390__) || defined(__i386__) || \ |
18 | defined(__arm__)) | |
ef1b3fda KS |
19 | |
20 | #include "sanitizer_stoptheworld.h" | |
21 | ||
df77f0e4 | 22 | #include "sanitizer_platform_limits_posix.h" |
696d846a | 23 | #include "sanitizer_atomic.h" |
df77f0e4 | 24 | |
ef1b3fda KS |
25 | #include <errno.h> |
26 | #include <sched.h> // for CLONE_* definitions | |
27 | #include <stddef.h> | |
28 | #include <sys/prctl.h> // for PR_* definitions | |
29 | #include <sys/ptrace.h> // for PTRACE_* definitions | |
30 | #include <sys/types.h> // for pid_t | |
696d846a MO |
31 | #include <sys/uio.h> // for iovec |
32 | #include <elf.h> // for NT_PRSTATUS | |
5d3805fc | 33 | #if defined(__aarch64__) && !SANITIZER_ANDROID |
696d846a | 34 | // GLIBC 2.20+ sys/user does not include asm/ptrace.h |
5d3805fc JJ |
35 | # include <asm/ptrace.h> |
36 | #endif | |
37 | #include <sys/user.h> // for user_regs_struct | |
38 | #if SANITIZER_ANDROID && SANITIZER_MIPS | |
39 | # include <asm/reg.h> // for mips SP register in sys/user.h | |
ef1b3fda KS |
40 | #endif |
41 | #include <sys/wait.h> // for signal-related stuff | |
42 | ||
df77f0e4 KS |
43 | #ifdef sa_handler |
44 | # undef sa_handler | |
45 | #endif | |
46 | ||
47 | #ifdef sa_sigaction | |
48 | # undef sa_sigaction | |
49 | #endif | |
50 | ||
ef1b3fda | 51 | #include "sanitizer_common.h" |
df77f0e4 | 52 | #include "sanitizer_flags.h" |
ef1b3fda KS |
53 | #include "sanitizer_libc.h" |
54 | #include "sanitizer_linux.h" | |
55 | #include "sanitizer_mutex.h" | |
56 | #include "sanitizer_placement_new.h" | |
57 | ||
58 | // This module works by spawning a Linux task which then attaches to every | |
59 | // thread in the caller process with ptrace. This suspends the threads, and | |
60 | // PTRACE_GETREGS can then be used to obtain their register state. The callback | |
61 | // supplied to StopTheWorld() is run in the tracer task while the threads are | |
62 | // suspended. | |
63 | // The tracer task must be placed in a different thread group for ptrace to | |
64 | // work, so it cannot be spawned as a pthread. Instead, we use the low-level | |
65 | // clone() interface (we want to share the address space with the caller | |
66 | // process, so we prefer clone() over fork()). | |
67 | // | |
df77f0e4 KS |
68 | // We don't use any libc functions, relying instead on direct syscalls. There |
69 | // are two reasons for this: | |
ef1b3fda KS |
70 | // 1. calling a library function while threads are suspended could cause a |
71 | // deadlock, if one of the treads happens to be holding a libc lock; | |
72 | // 2. it's generally not safe to call libc functions from the tracer task, | |
73 | // because clone() does not set up a thread-local storage for it. Any | |
74 | // thread-local variables used by libc will be shared between the tracer task | |
75 | // and the thread which spawned it. | |
ef1b3fda | 76 | |
ef1b3fda | 77 | namespace __sanitizer { |
696d846a | 78 | |
5d3805fc JJ |
79 | class SuspendedThreadsListLinux : public SuspendedThreadsList { |
80 | public: | |
81 | SuspendedThreadsListLinux() : thread_ids_(1024) {} | |
82 | ||
83 | tid_t GetThreadID(uptr index) const; | |
84 | uptr ThreadCount() const; | |
85 | bool ContainsTid(tid_t thread_id) const; | |
86 | void Append(tid_t tid); | |
87 | ||
88 | PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer, | |
89 | uptr *sp) const; | |
90 | uptr RegisterCount() const; | |
91 | ||
92 | private: | |
93 | InternalMmapVector<tid_t> thread_ids_; | |
94 | }; | |
10189819 | 95 | |
696d846a MO |
96 | // Structure for passing arguments into the tracer thread. |
97 | struct TracerThreadArgument { | |
98 | StopTheWorldCallback callback; | |
99 | void *callback_argument; | |
100 | // The tracer thread waits on this mutex while the parent finishes its | |
101 | // preparations. | |
102 | BlockingMutex mutex; | |
103 | // Tracer thread signals its completion by setting done. | |
104 | atomic_uintptr_t done; | |
105 | uptr parent_pid; | |
106 | }; | |
107 | ||
ef1b3fda KS |
108 | // This class handles thread suspending/unsuspending in the tracer thread. |
109 | class ThreadSuspender { | |
110 | public: | |
696d846a MO |
111 | explicit ThreadSuspender(pid_t pid, TracerThreadArgument *arg) |
112 | : arg(arg) | |
113 | , pid_(pid) { | |
ef1b3fda KS |
114 | CHECK_GE(pid, 0); |
115 | } | |
116 | bool SuspendAllThreads(); | |
117 | void ResumeAllThreads(); | |
118 | void KillAllThreads(); | |
5d3805fc | 119 | SuspendedThreadsListLinux &suspended_threads_list() { |
ef1b3fda KS |
120 | return suspended_threads_list_; |
121 | } | |
696d846a | 122 | TracerThreadArgument *arg; |
ef1b3fda | 123 | private: |
5d3805fc | 124 | SuspendedThreadsListLinux suspended_threads_list_; |
ef1b3fda | 125 | pid_t pid_; |
5d3805fc | 126 | bool SuspendThread(tid_t thread_id); |
ef1b3fda KS |
127 | }; |
128 | ||
5d3805fc | 129 | bool ThreadSuspender::SuspendThread(tid_t tid) { |
ef1b3fda KS |
130 | // Are we already attached to this thread? |
131 | // Currently this check takes linear time, however the number of threads is | |
132 | // usually small. | |
5d3805fc | 133 | if (suspended_threads_list_.ContainsTid(tid)) return false; |
ef1b3fda | 134 | int pterrno; |
696d846a | 135 | if (internal_iserror(internal_ptrace(PTRACE_ATTACH, tid, nullptr, nullptr), |
ef1b3fda KS |
136 | &pterrno)) { |
137 | // Either the thread is dead, or something prevented us from attaching. | |
138 | // Log this event and move on. | |
5d3805fc JJ |
139 | VReport(1, "Could not attach to thread %zu (errno %d).\n", (uptr)tid, |
140 | pterrno); | |
ef1b3fda KS |
141 | return false; |
142 | } else { | |
5d3805fc | 143 | VReport(2, "Attached to thread %zu.\n", (uptr)tid); |
ef1b3fda | 144 | // The thread is not guaranteed to stop before ptrace returns, so we must |
696d846a MO |
145 | // wait on it. Note: if the thread receives a signal concurrently, |
146 | // we can get notification about the signal before notification about stop. | |
147 | // In such case we need to forward the signal to the thread, otherwise | |
148 | // the signal will be missed (as we do PTRACE_DETACH with arg=0) and | |
149 | // any logic relying on signals will break. After forwarding we need to | |
150 | // continue to wait for stopping, because the thread is not stopped yet. | |
151 | // We do ignore delivery of SIGSTOP, because we want to make stop-the-world | |
152 | // as invisible as possible. | |
153 | for (;;) { | |
154 | int status; | |
155 | uptr waitpid_status; | |
156 | HANDLE_EINTR(waitpid_status, internal_waitpid(tid, &status, __WALL)); | |
157 | int wperrno; | |
158 | if (internal_iserror(waitpid_status, &wperrno)) { | |
159 | // Got a ECHILD error. I don't think this situation is possible, but it | |
160 | // doesn't hurt to report it. | |
5d3805fc JJ |
161 | VReport(1, "Waiting on thread %zu failed, detaching (errno %d).\n", |
162 | (uptr)tid, wperrno); | |
696d846a MO |
163 | internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr); |
164 | return false; | |
165 | } | |
166 | if (WIFSTOPPED(status) && WSTOPSIG(status) != SIGSTOP) { | |
167 | internal_ptrace(PTRACE_CONT, tid, nullptr, | |
168 | (void*)(uptr)WSTOPSIG(status)); | |
169 | continue; | |
170 | } | |
171 | break; | |
ef1b3fda | 172 | } |
696d846a | 173 | suspended_threads_list_.Append(tid); |
ef1b3fda KS |
174 | return true; |
175 | } | |
176 | } | |
177 | ||
178 | void ThreadSuspender::ResumeAllThreads() { | |
5d3805fc | 179 | for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++) { |
ef1b3fda KS |
180 | pid_t tid = suspended_threads_list_.GetThreadID(i); |
181 | int pterrno; | |
696d846a | 182 | if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr), |
ef1b3fda | 183 | &pterrno)) { |
696d846a | 184 | VReport(2, "Detached from thread %d.\n", tid); |
ef1b3fda KS |
185 | } else { |
186 | // Either the thread is dead, or we are already detached. | |
187 | // The latter case is possible, for instance, if this function was called | |
188 | // from a signal handler. | |
dee5ea7a | 189 | VReport(1, "Could not detach from thread %d (errno %d).\n", tid, pterrno); |
ef1b3fda KS |
190 | } |
191 | } | |
192 | } | |
193 | ||
194 | void ThreadSuspender::KillAllThreads() { | |
5d3805fc | 195 | for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++) |
ef1b3fda | 196 | internal_ptrace(PTRACE_KILL, suspended_threads_list_.GetThreadID(i), |
696d846a | 197 | nullptr, nullptr); |
ef1b3fda KS |
198 | } |
199 | ||
200 | bool ThreadSuspender::SuspendAllThreads() { | |
201 | ThreadLister thread_lister(pid_); | |
202 | bool added_threads; | |
10189819 | 203 | bool first_iteration = true; |
ef1b3fda KS |
204 | do { |
205 | // Run through the directory entries once. | |
206 | added_threads = false; | |
207 | pid_t tid = thread_lister.GetNextTID(); | |
208 | while (tid >= 0) { | |
209 | if (SuspendThread(tid)) | |
210 | added_threads = true; | |
211 | tid = thread_lister.GetNextTID(); | |
212 | } | |
10189819 | 213 | if (thread_lister.error() || (first_iteration && !added_threads)) { |
ef1b3fda KS |
214 | // Detach threads and fail. |
215 | ResumeAllThreads(); | |
216 | return false; | |
217 | } | |
218 | thread_lister.Reset(); | |
10189819 | 219 | first_iteration = false; |
ef1b3fda KS |
220 | } while (added_threads); |
221 | return true; | |
222 | } | |
223 | ||
224 | // Pointer to the ThreadSuspender instance for use in signal handler. | |
696d846a | 225 | static ThreadSuspender *thread_suspender_instance = nullptr; |
ef1b3fda | 226 | |
696d846a MO |
227 | // Synchronous signals that should not be blocked. |
228 | static const int kSyncSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS, | |
229 | SIGXCPU, SIGXFSZ }; | |
ef1b3fda KS |
230 | |
231 | static void TracerThreadDieCallback() { | |
232 | // Generally a call to Die() in the tracer thread should be fatal to the | |
233 | // parent process as well, because they share the address space. | |
234 | // This really only works correctly if all the threads are suspended at this | |
235 | // point. So we correctly handle calls to Die() from within the callback, but | |
236 | // not those that happen before or after the callback. Hopefully there aren't | |
237 | // a lot of opportunities for that to happen... | |
696d846a MO |
238 | ThreadSuspender *inst = thread_suspender_instance; |
239 | if (inst && stoptheworld_tracer_pid == internal_getpid()) { | |
240 | inst->KillAllThreads(); | |
241 | thread_suspender_instance = nullptr; | |
242 | } | |
243 | } | |
244 | ||
245 | // Signal handler to wake up suspended threads when the tracer thread dies. | |
246 | static void TracerThreadSignalHandler(int signum, void *siginfo, void *uctx) { | |
5d3805fc | 247 | SignalContext ctx(siginfo, uctx); |
10189819 MO |
248 | Printf("Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n", signum, |
249 | ctx.addr, ctx.pc, ctx.sp); | |
696d846a MO |
250 | ThreadSuspender *inst = thread_suspender_instance; |
251 | if (inst) { | |
252 | if (signum == SIGABRT) | |
253 | inst->KillAllThreads(); | |
254 | else | |
255 | inst->ResumeAllThreads(); | |
256 | RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback)); | |
257 | thread_suspender_instance = nullptr; | |
258 | atomic_store(&inst->arg->done, 1, memory_order_relaxed); | |
259 | } | |
260 | internal__exit((signum == SIGABRT) ? 1 : 2); | |
ef1b3fda KS |
261 | } |
262 | ||
263 | // Size of alternative stack for signal handlers in the tracer thread. | |
5d3805fc | 264 | static const int kHandlerStackSize = 8192; |
ef1b3fda KS |
265 | |
266 | // This function will be run as a cloned task. | |
267 | static int TracerThread(void* argument) { | |
268 | TracerThreadArgument *tracer_thread_argument = | |
269 | (TracerThreadArgument *)argument; | |
270 | ||
df77f0e4 KS |
271 | internal_prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0); |
272 | // Check if parent is already dead. | |
273 | if (internal_getppid() != tracer_thread_argument->parent_pid) | |
274 | internal__exit(4); | |
275 | ||
ef1b3fda KS |
276 | // Wait for the parent thread to finish preparations. |
277 | tracer_thread_argument->mutex.Lock(); | |
278 | tracer_thread_argument->mutex.Unlock(); | |
279 | ||
696d846a | 280 | RAW_CHECK(AddDieCallback(TracerThreadDieCallback)); |
ef1b3fda | 281 | |
696d846a | 282 | ThreadSuspender thread_suspender(internal_getppid(), tracer_thread_argument); |
ef1b3fda KS |
283 | // Global pointer for the signal handler. |
284 | thread_suspender_instance = &thread_suspender; | |
285 | ||
286 | // Alternate stack for signal handling. | |
287 | InternalScopedBuffer<char> handler_stack_memory(kHandlerStackSize); | |
144e36a7 | 288 | stack_t handler_stack; |
ef1b3fda KS |
289 | internal_memset(&handler_stack, 0, sizeof(handler_stack)); |
290 | handler_stack.ss_sp = handler_stack_memory.data(); | |
291 | handler_stack.ss_size = kHandlerStackSize; | |
696d846a MO |
292 | internal_sigaltstack(&handler_stack, nullptr); |
293 | ||
294 | // Install our handler for synchronous signals. Other signals should be | |
295 | // blocked by the mask we inherited from the parent thread. | |
296 | for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) { | |
297 | __sanitizer_sigaction act; | |
298 | internal_memset(&act, 0, sizeof(act)); | |
299 | act.sigaction = TracerThreadSignalHandler; | |
300 | act.sa_flags = SA_ONSTACK | SA_SIGINFO; | |
301 | internal_sigaction_norestorer(kSyncSignals[i], &act, 0); | |
ef1b3fda KS |
302 | } |
303 | ||
304 | int exit_code = 0; | |
305 | if (!thread_suspender.SuspendAllThreads()) { | |
dee5ea7a | 306 | VReport(1, "Failed suspending threads.\n"); |
ef1b3fda KS |
307 | exit_code = 3; |
308 | } else { | |
309 | tracer_thread_argument->callback(thread_suspender.suspended_threads_list(), | |
310 | tracer_thread_argument->callback_argument); | |
311 | thread_suspender.ResumeAllThreads(); | |
312 | exit_code = 0; | |
313 | } | |
696d846a MO |
314 | RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback)); |
315 | thread_suspender_instance = nullptr; | |
316 | atomic_store(&tracer_thread_argument->done, 1, memory_order_relaxed); | |
ef1b3fda KS |
317 | return exit_code; |
318 | } | |
319 | ||
320 | class ScopedStackSpaceWithGuard { | |
321 | public: | |
322 | explicit ScopedStackSpaceWithGuard(uptr stack_size) { | |
323 | stack_size_ = stack_size; | |
324 | guard_size_ = GetPageSizeCached(); | |
325 | // FIXME: Omitting MAP_STACK here works in current kernels but might break | |
326 | // in the future. | |
327 | guard_start_ = (uptr)MmapOrDie(stack_size_ + guard_size_, | |
328 | "ScopedStackWithGuard"); | |
696d846a | 329 | CHECK(MprotectNoAccess((uptr)guard_start_, guard_size_)); |
ef1b3fda KS |
330 | } |
331 | ~ScopedStackSpaceWithGuard() { | |
332 | UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_); | |
333 | } | |
334 | void *Bottom() const { | |
335 | return (void *)(guard_start_ + stack_size_ + guard_size_); | |
336 | } | |
337 | ||
338 | private: | |
339 | uptr stack_size_; | |
340 | uptr guard_size_; | |
341 | uptr guard_start_; | |
342 | }; | |
343 | ||
ef1b3fda KS |
344 | // We have a limitation on the stack frame size, so some stuff had to be moved |
345 | // into globals. | |
dee5ea7a KS |
346 | static __sanitizer_sigset_t blocked_sigset; |
347 | static __sanitizer_sigset_t old_sigset; | |
ef1b3fda KS |
348 | |
349 | class StopTheWorldScope { | |
350 | public: | |
351 | StopTheWorldScope() { | |
ef1b3fda KS |
352 | // Make this process dumpable. Processes that are not dumpable cannot be |
353 | // attached to. | |
354 | process_was_dumpable_ = internal_prctl(PR_GET_DUMPABLE, 0, 0, 0, 0); | |
355 | if (!process_was_dumpable_) | |
356 | internal_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); | |
ef1b3fda KS |
357 | } |
358 | ||
359 | ~StopTheWorldScope() { | |
ef1b3fda KS |
360 | // Restore the dumpable flag. |
361 | if (!process_was_dumpable_) | |
362 | internal_prctl(PR_SET_DUMPABLE, 0, 0, 0, 0); | |
ef1b3fda KS |
363 | } |
364 | ||
365 | private: | |
366 | int process_was_dumpable_; | |
367 | }; | |
368 | ||
c4c16f74 KS |
369 | // When sanitizer output is being redirected to file (i.e. by using log_path), |
370 | // the tracer should write to the parent's log instead of trying to open a new | |
371 | // file. Alert the logging code to the fact that we have a tracer. | |
372 | struct ScopedSetTracerPID { | |
373 | explicit ScopedSetTracerPID(uptr tracer_pid) { | |
374 | stoptheworld_tracer_pid = tracer_pid; | |
375 | stoptheworld_tracer_ppid = internal_getpid(); | |
376 | } | |
377 | ~ScopedSetTracerPID() { | |
378 | stoptheworld_tracer_pid = 0; | |
379 | stoptheworld_tracer_ppid = 0; | |
380 | } | |
381 | }; | |
382 | ||
ef1b3fda KS |
383 | void StopTheWorld(StopTheWorldCallback callback, void *argument) { |
384 | StopTheWorldScope in_stoptheworld; | |
385 | // Prepare the arguments for TracerThread. | |
386 | struct TracerThreadArgument tracer_thread_argument; | |
387 | tracer_thread_argument.callback = callback; | |
388 | tracer_thread_argument.callback_argument = argument; | |
df77f0e4 | 389 | tracer_thread_argument.parent_pid = internal_getpid(); |
696d846a | 390 | atomic_store(&tracer_thread_argument.done, 0, memory_order_relaxed); |
ef1b3fda KS |
391 | const uptr kTracerStackSize = 2 * 1024 * 1024; |
392 | ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize); | |
393 | // Block the execution of TracerThread until after we have set ptrace | |
394 | // permissions. | |
395 | tracer_thread_argument.mutex.Lock(); | |
696d846a MO |
396 | // Signal handling story. |
397 | // We don't want async signals to be delivered to the tracer thread, | |
398 | // so we block all async signals before creating the thread. An async signal | |
399 | // handler can temporary modify errno, which is shared with this thread. | |
400 | // We ought to use pthread_sigmask here, because sigprocmask has undefined | |
401 | // behavior in multithreaded programs. However, on linux sigprocmask is | |
402 | // equivalent to pthread_sigmask with the exception that pthread_sigmask | |
403 | // does not allow to block some signals used internally in pthread | |
404 | // implementation. We are fine with blocking them here, we are really not | |
405 | // going to pthread_cancel the thread. | |
406 | // The tracer thread should not raise any synchronous signals. But in case it | |
407 | // does, we setup a special handler for sync signals that properly kills the | |
408 | // parent as well. Note: we don't pass CLONE_SIGHAND to clone, so handlers | |
409 | // in the tracer thread won't interfere with user program. Double note: if a | |
410 | // user does something along the lines of 'kill -11 pid', that can kill the | |
411 | // process even if user setup own handler for SEGV. | |
412 | // Thing to watch out for: this code should not change behavior of user code | |
413 | // in any observable way. In particular it should not override user signal | |
414 | // handlers. | |
415 | internal_sigfillset(&blocked_sigset); | |
416 | for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) | |
417 | internal_sigdelset(&blocked_sigset, kSyncSignals[i]); | |
418 | int rv = internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset); | |
419 | CHECK_EQ(rv, 0); | |
ef1b3fda KS |
420 | uptr tracer_pid = internal_clone( |
421 | TracerThread, tracer_stack.Bottom(), | |
422 | CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED, | |
696d846a MO |
423 | &tracer_thread_argument, nullptr /* parent_tidptr */, |
424 | nullptr /* newtls */, nullptr /* child_tidptr */); | |
425 | internal_sigprocmask(SIG_SETMASK, &old_sigset, 0); | |
ef1b3fda KS |
426 | int local_errno = 0; |
427 | if (internal_iserror(tracer_pid, &local_errno)) { | |
dee5ea7a | 428 | VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno); |
ef1b3fda KS |
429 | tracer_thread_argument.mutex.Unlock(); |
430 | } else { | |
c4c16f74 | 431 | ScopedSetTracerPID scoped_set_tracer_pid(tracer_pid); |
ef1b3fda KS |
432 | // On some systems we have to explicitly declare that we want to be traced |
433 | // by the tracer thread. | |
434 | #ifdef PR_SET_PTRACER | |
435 | internal_prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0); | |
436 | #endif | |
437 | // Allow the tracer thread to start. | |
438 | tracer_thread_argument.mutex.Unlock(); | |
696d846a MO |
439 | // NOTE: errno is shared between this thread and the tracer thread. |
440 | // internal_waitpid() may call syscall() which can access/spoil errno, | |
441 | // so we can't call it now. Instead we for the tracer thread to finish using | |
442 | // the spin loop below. Man page for sched_yield() says "In the Linux | |
443 | // implementation, sched_yield() always succeeds", so let's hope it does not | |
444 | // spoil errno. Note that this spin loop runs only for brief periods before | |
445 | // the tracer thread has suspended us and when it starts unblocking threads. | |
446 | while (atomic_load(&tracer_thread_argument.done, memory_order_relaxed) == 0) | |
447 | sched_yield(); | |
448 | // Now the tracer thread is about to exit and does not touch errno, | |
449 | // wait for it. | |
450 | for (;;) { | |
451 | uptr waitpid_status = internal_waitpid(tracer_pid, nullptr, __WALL); | |
452 | if (!internal_iserror(waitpid_status, &local_errno)) | |
453 | break; | |
454 | if (local_errno == EINTR) | |
455 | continue; | |
dee5ea7a KS |
456 | VReport(1, "Waiting on the tracer thread failed (errno %d).\n", |
457 | local_errno); | |
696d846a MO |
458 | break; |
459 | } | |
ef1b3fda KS |
460 | } |
461 | } | |
462 | ||
463 | // Platform-specific methods from SuspendedThreadsList. | |
464 | #if SANITIZER_ANDROID && defined(__arm__) | |
465 | typedef pt_regs regs_struct; | |
466 | #define REG_SP ARM_sp | |
467 | ||
468 | #elif SANITIZER_LINUX && defined(__arm__) | |
469 | typedef user_regs regs_struct; | |
470 | #define REG_SP uregs[13] | |
471 | ||
472 | #elif defined(__i386__) || defined(__x86_64__) | |
473 | typedef user_regs_struct regs_struct; | |
474 | #if defined(__i386__) | |
475 | #define REG_SP esp | |
476 | #else | |
477 | #define REG_SP rsp | |
478 | #endif | |
479 | ||
480 | #elif defined(__powerpc__) || defined(__powerpc64__) | |
481 | typedef pt_regs regs_struct; | |
482 | #define REG_SP gpr[PT_R1] | |
483 | ||
484 | #elif defined(__mips__) | |
485 | typedef struct user regs_struct; | |
10189819 MO |
486 | # if SANITIZER_ANDROID |
487 | # define REG_SP regs[EF_R29] | |
488 | # else | |
489 | # define REG_SP regs[EF_REG29] | |
490 | # endif | |
ef1b3fda | 491 | |
696d846a MO |
492 | #elif defined(__aarch64__) |
493 | typedef struct user_pt_regs regs_struct; | |
494 | #define REG_SP sp | |
495 | #define ARCH_IOVEC_FOR_GETREGSET | |
496 | ||
10189819 MO |
497 | #elif defined(__s390__) |
498 | typedef _user_regs_struct regs_struct; | |
499 | #define REG_SP gprs[15] | |
500 | #define ARCH_IOVEC_FOR_GETREGSET | |
501 | ||
ef1b3fda KS |
502 | #else |
503 | #error "Unsupported architecture" | |
504 | #endif // SANITIZER_ANDROID && defined(__arm__) | |
505 | ||
5d3805fc JJ |
506 | tid_t SuspendedThreadsListLinux::GetThreadID(uptr index) const { |
507 | CHECK_LT(index, thread_ids_.size()); | |
508 | return thread_ids_[index]; | |
509 | } | |
510 | ||
511 | uptr SuspendedThreadsListLinux::ThreadCount() const { | |
512 | return thread_ids_.size(); | |
513 | } | |
514 | ||
515 | bool SuspendedThreadsListLinux::ContainsTid(tid_t thread_id) const { | |
516 | for (uptr i = 0; i < thread_ids_.size(); i++) { | |
517 | if (thread_ids_[i] == thread_id) return true; | |
518 | } | |
519 | return false; | |
520 | } | |
521 | ||
522 | void SuspendedThreadsListLinux::Append(tid_t tid) { | |
523 | thread_ids_.push_back(tid); | |
524 | } | |
525 | ||
526 | PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP( | |
527 | uptr index, uptr *buffer, uptr *sp) const { | |
ef1b3fda KS |
528 | pid_t tid = GetThreadID(index); |
529 | regs_struct regs; | |
530 | int pterrno; | |
696d846a MO |
531 | #ifdef ARCH_IOVEC_FOR_GETREGSET |
532 | struct iovec regset_io; | |
533 | regset_io.iov_base = ®s; | |
534 | regset_io.iov_len = sizeof(regs_struct); | |
535 | bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGSET, tid, | |
536 | (void*)NT_PRSTATUS, (void*)®set_io), | |
537 | &pterrno); | |
538 | #else | |
539 | bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, nullptr, | |
540 | ®s), &pterrno); | |
541 | #endif | |
542 | if (isErr) { | |
dee5ea7a KS |
543 | VReport(1, "Could not get registers from thread %d (errno %d).\n", tid, |
544 | pterrno); | |
5d3805fc JJ |
545 | // ESRCH means that the given thread is not suspended or already dead. |
546 | // Therefore it's unsafe to inspect its data (e.g. walk through stack) and | |
547 | // we should notify caller about this. | |
548 | return pterrno == ESRCH ? REGISTERS_UNAVAILABLE_FATAL | |
549 | : REGISTERS_UNAVAILABLE; | |
ef1b3fda KS |
550 | } |
551 | ||
552 | *sp = regs.REG_SP; | |
553 | internal_memcpy(buffer, ®s, sizeof(regs)); | |
5d3805fc | 554 | return REGISTERS_AVAILABLE; |
ef1b3fda KS |
555 | } |
556 | ||
5d3805fc | 557 | uptr SuspendedThreadsListLinux::RegisterCount() const { |
ef1b3fda KS |
558 | return sizeof(regs_struct) / sizeof(uptr); |
559 | } | |
696d846a | 560 | } // namespace __sanitizer |
ef1b3fda | 561 | |
10189819 MO |
562 | #endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) |
563 | // || defined(__aarch64__) || defined(__powerpc64__) | |
5d3805fc | 564 | // || defined(__s390__) || defined(__i386__) || defined(__arm__) |