]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/linux-nat.c
Automatic date update in version.in
[thirdparty/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
1d506c26 3 Copyright (C) 2001-2024 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1 19
3993f6b1 20#include "inferior.h"
45741a9c 21#include "infrun.h"
3993f6b1 22#include "target.h"
96d7229d
LM
23#include "nat/linux-nat.h"
24#include "nat/linux-waitpid.h"
268a13a5 25#include "gdbsupport/gdb_wait.h"
d6b0e80f
AC
26#include <unistd.h>
27#include <sys/syscall.h>
5826e159 28#include "nat/gdb_ptrace.h"
0274a8ce 29#include "linux-nat.h"
125f8a3d
GB
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
8cc73a39 32#include "nat/linux-personality.h"
ac264b3b 33#include "linux-fork.h"
d6b0e80f 34#include "gdbthread.h"
5b9707eb 35#include "cli/cli-cmds.h"
d6b0e80f 36#include "regcache.h"
4f844a66 37#include "regset.h"
dab06dbe 38#include "inf-child.h"
10d6c8cd
DJ
39#include "inf-ptrace.h"
40#include "auxv.h"
ef0f16cc
TT
41#include <sys/procfs.h>
42#include "elf-bfd.h"
43#include "gregset.h"
44#include "gdbcore.h"
45#include <ctype.h>
46#include <sys/stat.h>
47#include <fcntl.h>
b84876c2 48#include "inf-loop.h"
400b5eca 49#include "gdbsupport/event-loop.h"
b84876c2 50#include "event-top.h"
07e059b5
VP
51#include <pwd.h>
52#include <sys/types.h>
2978b111 53#include <dirent.h>
07e059b5 54#include "xml-support.h"
efcbbd14 55#include <sys/vfs.h>
6c95b8df 56#include "solib.h"
125f8a3d 57#include "nat/linux-osdata.h"
6432734d 58#include "linux-tdep.h"
7dcd53a0 59#include "symfile.h"
268a13a5 60#include "gdbsupport/agent.h"
5808517f 61#include "tracepoint.h"
6ecd4729 62#include "target-descriptions.h"
268a13a5 63#include "gdbsupport/filestuff.h"
77e371c0 64#include "objfiles.h"
7a6a1731 65#include "nat/linux-namespaces.h"
b146ba14 66#include "gdbsupport/block-signals.h"
268a13a5
TT
67#include "gdbsupport/fileio.h"
68#include "gdbsupport/scope-exit.h"
21987b9c 69#include "gdbsupport/gdb-sigmask.h"
ba988419 70#include "gdbsupport/common-debug.h"
8a89ddbd 71#include <unordered_map>
efcbbd14 72
1777feb0 73/* This comment documents high-level logic of this file.
8a77dff3
VP
74
75Waiting for events in sync mode
76===============================
77
4a6ed09b
PA
78When waiting for an event in a specific thread, we just use waitpid,
79passing the specific pid, and not passing WNOHANG.
80
81When waiting for an event in all threads, waitpid is not quite good:
82
83- If the thread group leader exits while other threads in the thread
84 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
85 return an exit status until the other threads in the group are
86 reaped.
87
88- When a non-leader thread execs, that thread just vanishes without
89 reporting an exit (so we'd hang if we waited for it explicitly in
90 that case). The exec event is instead reported to the TGID pid.
91
92The solution is to always use -1 and WNOHANG, together with
93sigsuspend.
94
95First, we use non-blocking waitpid to check for events. If nothing is
96found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
97it means something happened to a child process. As soon as we know
98there's an event, we get back to calling nonblocking waitpid.
99
100Note that SIGCHLD should be blocked between waitpid and sigsuspend
101calls, so that we don't miss a signal. If SIGCHLD arrives in between,
102when it's blocked, the signal becomes pending and sigsuspend
103immediately notices it and returns.
104
105Waiting for events in async mode (TARGET_WNOHANG)
106=================================================
8a77dff3 107
7feb7d06
PA
108In async mode, GDB should always be ready to handle both user input
109and target events, so neither blocking waitpid nor sigsuspend are
110viable options. Instead, we should asynchronously notify the GDB main
111event loop whenever there's an unprocessed event from the target. We
112detect asynchronous target events by handling SIGCHLD signals. To
c150bdf0
JB
113notify the event loop about target events, an event pipe is used
114--- the pipe is registered as waitable event source in the event loop,
7feb7d06 115the event loop select/poll's on the read end of this pipe (as well on
c150bdf0
JB
116other event sources, e.g., stdin), and the SIGCHLD handler marks the
117event pipe to raise an event. This is more portable than relying on
7feb7d06
PA
118pselect/ppoll, since on kernels that lack those syscalls, libc
119emulates them with select/poll+sigprocmask, and that is racy
120(a.k.a. plain broken).
121
122Obviously, if we fail to notify the event loop if there's a target
123event, it's bad. OTOH, if we notify the event loop when there's no
124event from the target, linux_nat_wait will detect that there's no real
125event to report, and return event of type TARGET_WAITKIND_IGNORE.
126This is mostly harmless, but it will waste time and is better avoided.
127
128The main design point is that every time GDB is outside linux-nat.c,
129we have a SIGCHLD handler installed that is called when something
130happens to the target and notifies the GDB event loop. Whenever GDB
131core decides to handle the event, and calls into linux-nat.c, we
132process things as in sync mode, except that the we never block in
133sigsuspend.
134
135While processing an event, we may end up momentarily blocked in
136waitpid calls. Those waitpid calls, while blocking, are guarantied to
137return quickly. E.g., in all-stop mode, before reporting to the core
138that an LWP hit a breakpoint, all LWPs are stopped by sending them
139SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
140Note that this is different from blocking indefinitely waiting for the
141next event --- here, we're already handling an event.
8a77dff3
VP
142
143Use of signals
144==============
145
146We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
147signal is not entirely significant; we just need for a signal to be delivered,
148so that we can intercept it. SIGSTOP's advantage is that it can not be
149blocked. A disadvantage is that it is not a real-time signal, so it can only
150be queued once; we do not keep track of other sources of SIGSTOP.
151
152Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
153use them, because they have special behavior when the signal is generated -
154not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
155kills the entire thread group.
156
157A delivered SIGSTOP would stop the entire thread group, not just the thread we
158tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
159cancel it (by PTRACE_CONT without passing SIGSTOP).
160
161We could use a real-time signal instead. This would solve those problems; we
162could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
163But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
164generates it, and there are races with trying to find a signal that is not
4a6ed09b
PA
165blocked.
166
167Exec events
168===========
169
170The case of a thread group (process) with 3 or more threads, and a
171thread other than the leader execs is worth detailing:
172
173On an exec, the Linux kernel destroys all threads except the execing
174one in the thread group, and resets the execing thread's tid to the
175tgid. No exit notification is sent for the execing thread -- from the
176ptracer's perspective, it appears as though the execing thread just
177vanishes. Until we reap all other threads except the leader and the
178execing thread, the leader will be zombie, and the execing thread will
179be in `D (disc sleep)' state. As soon as all other threads are
180reaped, the execing thread changes its tid to the tgid, and the
181previous (zombie) leader vanishes, giving place to the "new"
bd659b80
PA
182leader.
183
184Accessing inferior memory
185=========================
186
187To access inferior memory, we strongly prefer /proc/PID/mem. We
188fallback to ptrace if and only if /proc/PID/mem is not writable, as a
189concession for obsolescent kernels (such as found in RHEL6). For
190modern kernels, the fallback shouldn't trigger. GDBserver does not
191have the ptrace fallback already, and at some point, we'll consider
192removing it from native GDB too.
193
194/proc/PID/mem has a few advantages over alternatives like
195PTRACE_PEEKTEXT/PTRACE_POKETEXT or process_vm_readv/process_vm_writev:
196
197- Because we can use a single read/write call, /proc/PID/mem can be
198 much more efficient than banging away at
199 PTRACE_PEEKTEXT/PTRACE_POKETEXT, one word at a time.
200
201- /proc/PID/mem allows writing to read-only pages, which we need to
202 e.g., plant breakpoint instructions. process_vm_writev does not
203 allow this.
204
205- /proc/PID/mem allows memory access even if all threads are running.
206 OTOH, PTRACE_PEEKTEXT/PTRACE_POKETEXT require passing down the tid
207 of a stopped task. This lets us e.g., install breakpoints while the
208 inferior is running, clear a displaced stepping scratch pad when the
209 thread that was displaced stepping exits, print inferior globals,
210 etc., all without having to worry about temporarily pausing some
211 thread.
212
213- /proc/PID/mem does not suffer from a race that could cause us to
214 access memory of the wrong address space when the inferior execs.
215
216 process_vm_readv/process_vm_writev have this problem.
217
218 E.g., say GDB decides to write to memory just while the inferior
219 execs. In this scenario, GDB could write memory to the post-exec
220 address space thinking it was writing to the pre-exec address space,
221 with high probability of corrupting the inferior. Or if GDB decides
222 instead to read memory just while the inferior execs, it could read
223 bogus contents out of the wrong address space.
224
225 ptrace used to have this problem too, but no longer has since Linux
226 commit dbb5afad100a ("ptrace: make ptrace() fail if the tracee
227 changed its pid unexpectedly"), in Linux 5.13. (And if ptrace were
228 ever changed to allow access memory via zombie or running threads,
229 it would better not forget to consider this scenario.)
230
231 We avoid this race with /proc/PID/mem, by opening the file as soon
232 as we start debugging the inferior, when it is known the inferior is
233 stopped, and holding on to the open file descriptor, to be used
234 whenever we need to access inferior memory. If the inferior execs
235 or exits, reading/writing from/to the file returns 0 (EOF),
236 indicating the address space is gone, and so we return
237 TARGET_XFER_EOF to the core. We close the old file and open a new
238 one when we finally see the PTRACE_EVENT_EXEC event. */
a0ef4274 239
dba24537
AC
240#ifndef O_LARGEFILE
241#define O_LARGEFILE 0
242#endif
0274a8ce 243
f6ac5f3d
PA
244struct linux_nat_target *linux_target;
245
433bbbf8 246/* Does the current host support PTRACE_GETREGSET? */
0bdb2f78 247enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
433bbbf8 248
b6e52a0b
AB
249/* When true, print debug messages relating to the linux native target. */
250
251static bool debug_linux_nat;
252
8864ef42 253/* Implement 'show debug linux-nat'. */
b6e52a0b 254
920d2a44
AC
255static void
256show_debug_linux_nat (struct ui_file *file, int from_tty,
257 struct cmd_list_element *c, const char *value)
258{
6cb06a8c
TT
259 gdb_printf (file, _("Debugging of GNU/Linux native targets is %s.\n"),
260 value);
920d2a44 261}
d6b0e80f 262
17417fb0 263/* Print a linux-nat debug statement. */
9327494e
SM
264
265#define linux_nat_debug_printf(fmt, ...) \
74b773fc 266 debug_prefixed_printf_cond (debug_linux_nat, "linux-nat", fmt, ##__VA_ARGS__)
9327494e 267
b6e52a0b
AB
268/* Print "linux-nat" enter/exit debug statements. */
269
270#define LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT \
271 scoped_debug_enter_exit (debug_linux_nat, "linux-nat")
272
ae087d01
DJ
273struct simple_pid_list
274{
275 int pid;
3d799a95 276 int status;
ae087d01
DJ
277 struct simple_pid_list *next;
278};
05c309a8 279static struct simple_pid_list *stopped_pids;
ae087d01 280
aa01bd36
PA
281/* Whether target_thread_events is in effect. */
282static int report_thread_events;
283
7feb7d06
PA
284static int kill_lwp (int lwpid, int signo);
285
d3a70e03 286static int stop_callback (struct lwp_info *lp);
7feb7d06
PA
287
288static void block_child_signals (sigset_t *prev_mask);
289static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
290
291struct lwp_info;
292static struct lwp_info *add_lwp (ptid_t ptid);
293static void purge_lwp_list (int pid);
4403d8e9 294static void delete_lwp (ptid_t ptid);
2277426b
PA
295static struct lwp_info *find_lwp_pid (ptid_t ptid);
296
8a99810d
PA
297static int lwp_status_pending_p (struct lwp_info *lp);
298
5e86aab8
PA
299static bool is_lwp_marked_dead (lwp_info *lp);
300
e7ad2f14
PA
301static void save_stop_reason (struct lwp_info *lp);
302
1bcb0708 303static bool proc_mem_file_is_writable ();
8a89ddbd
PA
304static void close_proc_mem_file (pid_t pid);
305static void open_proc_mem_file (ptid_t ptid);
05c06f31 306
6cf20c46
PA
307/* Return TRUE if LWP is the leader thread of the process. */
308
309static bool
310is_leader (lwp_info *lp)
311{
312 return lp->ptid.pid () == lp->ptid.lwp ();
313}
314
57573e54
PA
315/* Convert an LWP's pending status to a std::string. */
316
317static std::string
318pending_status_str (lwp_info *lp)
319{
320 gdb_assert (lwp_status_pending_p (lp));
321
322 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
323 return lp->waitstatus.to_string ();
324 else
325 return status_to_str (lp->status);
326}
327
a51e14ef
PA
328/* Return true if we should report exit events for LP. */
329
330static bool
331report_exit_events_for (lwp_info *lp)
332{
333 thread_info *thr = linux_target->find_thread (lp->ptid);
334 gdb_assert (thr != nullptr);
335
336 return (report_thread_events
337 || (thr->thread_options () & GDB_THREAD_OPTION_EXIT) != 0);
338}
339
cff068da
GB
340\f
341/* LWP accessors. */
342
343/* See nat/linux-nat.h. */
344
345ptid_t
346ptid_of_lwp (struct lwp_info *lwp)
347{
348 return lwp->ptid;
349}
350
351/* See nat/linux-nat.h. */
352
4b134ca1
GB
353void
354lwp_set_arch_private_info (struct lwp_info *lwp,
355 struct arch_lwp_info *info)
356{
357 lwp->arch_private = info;
358}
359
360/* See nat/linux-nat.h. */
361
362struct arch_lwp_info *
363lwp_arch_private_info (struct lwp_info *lwp)
364{
365 return lwp->arch_private;
366}
367
368/* See nat/linux-nat.h. */
369
cff068da
GB
370int
371lwp_is_stopped (struct lwp_info *lwp)
372{
373 return lwp->stopped;
374}
375
376/* See nat/linux-nat.h. */
377
378enum target_stop_reason
379lwp_stop_reason (struct lwp_info *lwp)
380{
381 return lwp->stop_reason;
382}
383
0e00e962
AA
384/* See nat/linux-nat.h. */
385
386int
387lwp_is_stepping (struct lwp_info *lwp)
388{
389 return lwp->step;
390}
391
ae087d01
DJ
392\f
393/* Trivial list manipulation functions to keep track of a list of
394 new stopped processes. */
395static void
3d799a95 396add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01 397{
8d749320 398 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
e0881a8e 399
ae087d01 400 new_pid->pid = pid;
3d799a95 401 new_pid->status = status;
ae087d01
DJ
402 new_pid->next = *listp;
403 *listp = new_pid;
404}
405
406static int
46a96992 407pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
408{
409 struct simple_pid_list **p;
410
411 for (p = listp; *p != NULL; p = &(*p)->next)
412 if ((*p)->pid == pid)
413 {
414 struct simple_pid_list *next = (*p)->next;
e0881a8e 415
46a96992 416 *statusp = (*p)->status;
ae087d01
DJ
417 xfree (*p);
418 *p = next;
419 return 1;
420 }
421 return 0;
422}
423
de0d863e
DB
424/* Return the ptrace options that we want to try to enable. */
425
426static int
427linux_nat_ptrace_options (int attached)
428{
429 int options = 0;
430
431 if (!attached)
432 options |= PTRACE_O_EXITKILL;
433
434 options |= (PTRACE_O_TRACESYSGOOD
435 | PTRACE_O_TRACEVFORKDONE
436 | PTRACE_O_TRACEVFORK
437 | PTRACE_O_TRACEFORK
438 | PTRACE_O_TRACEEXEC);
439
440 return options;
441}
442
1b919490
VB
443/* Initialize ptrace and procfs warnings and check for supported
444 ptrace features given PID.
beed38b8
JB
445
446 ATTACHED should be nonzero iff we attached to the inferior. */
3993f6b1
DJ
447
448static void
1b919490 449linux_init_ptrace_procfs (pid_t pid, int attached)
3993f6b1 450{
de0d863e
DB
451 int options = linux_nat_ptrace_options (attached);
452
453 linux_enable_event_reporting (pid, options);
96d7229d 454 linux_ptrace_init_warnings ();
1b919490 455 linux_proc_init_warnings ();
9dff6a5d 456 proc_mem_file_is_writable ();
4de4c07c
DJ
457}
458
f6ac5f3d
PA
459linux_nat_target::~linux_nat_target ()
460{}
461
462void
463linux_nat_target::post_attach (int pid)
4de4c07c 464{
1b919490 465 linux_init_ptrace_procfs (pid, 1);
4de4c07c
DJ
466}
467
200fd287
AB
468/* Implement the virtual inf_ptrace_target::post_startup_inferior method. */
469
f6ac5f3d
PA
470void
471linux_nat_target::post_startup_inferior (ptid_t ptid)
4de4c07c 472{
1b919490 473 linux_init_ptrace_procfs (ptid.pid (), 0);
4de4c07c
DJ
474}
475
4403d8e9
JK
476/* Return the number of known LWPs in the tgid given by PID. */
477
478static int
479num_lwps (int pid)
480{
481 int count = 0;
4403d8e9 482
901b9821 483 for (const lwp_info *lp ATTRIBUTE_UNUSED : all_lwps ())
e99b03dc 484 if (lp->ptid.pid () == pid)
4403d8e9
JK
485 count++;
486
487 return count;
488}
489
169bb27b 490/* Deleter for lwp_info unique_ptr specialisation. */
4403d8e9 491
169bb27b 492struct lwp_deleter
4403d8e9 493{
169bb27b
AB
494 void operator() (struct lwp_info *lwp) const
495 {
496 delete_lwp (lwp->ptid);
497 }
498};
4403d8e9 499
169bb27b
AB
500/* A unique_ptr specialisation for lwp_info. */
501
502typedef std::unique_ptr<struct lwp_info, lwp_deleter> lwp_info_up;
4403d8e9 503
82d1f134 504/* Target hook for follow_fork. */
d83ad864 505
e97007b6 506void
82d1f134
SM
507linux_nat_target::follow_fork (inferior *child_inf, ptid_t child_ptid,
508 target_waitkind fork_kind, bool follow_child,
509 bool detach_fork)
3993f6b1 510{
82d1f134
SM
511 inf_ptrace_target::follow_fork (child_inf, child_ptid, fork_kind,
512 follow_child, detach_fork);
513
d83ad864 514 if (!follow_child)
4de4c07c 515 {
3a849a34
SM
516 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
517 ptid_t parent_ptid = inferior_ptid;
3a849a34
SM
518 int parent_pid = parent_ptid.lwp ();
519 int child_pid = child_ptid.lwp ();
4de4c07c 520
1777feb0 521 /* We're already attached to the parent, by default. */
3a849a34 522 lwp_info *child_lp = add_lwp (child_ptid);
d83ad864
DB
523 child_lp->stopped = 1;
524 child_lp->last_resume_kind = resume_stop;
4de4c07c 525
ac264b3b
MS
526 /* Detach new forked process? */
527 if (detach_fork)
f75c00e4 528 {
95347337
AB
529 int child_stop_signal = 0;
530 bool detach_child = true;
4403d8e9 531
169bb27b
AB
532 /* Move CHILD_LP into a unique_ptr and clear the source pointer
533 to prevent us doing anything stupid with it. */
534 lwp_info_up child_lp_ptr (child_lp);
535 child_lp = nullptr;
536
537 linux_target->low_prepare_to_resume (child_lp_ptr.get ());
c077881a
HZ
538
539 /* When debugging an inferior in an architecture that supports
540 hardware single stepping on a kernel without commit
541 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
542 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
543 set if the parent process had them set.
544 To work around this, single step the child process
545 once before detaching to clear the flags. */
546
2fd9d7ca
PA
547 /* Note that we consult the parent's architecture instead of
548 the child's because there's no inferior for the child at
549 this point. */
c077881a 550 if (!gdbarch_software_single_step_p (target_thread_architecture
2fd9d7ca 551 (parent_ptid)))
c077881a 552 {
95347337
AB
553 int status;
554
c077881a
HZ
555 linux_disable_event_reporting (child_pid);
556 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
557 perror_with_name (_("Couldn't do single step"));
558 if (my_waitpid (child_pid, &status, 0) < 0)
559 perror_with_name (_("Couldn't wait vfork process"));
95347337
AB
560 else
561 {
562 detach_child = WIFSTOPPED (status);
563 child_stop_signal = WSTOPSIG (status);
564 }
c077881a
HZ
565 }
566
95347337 567 if (detach_child)
9caaaa83 568 {
95347337 569 int signo = child_stop_signal;
9caaaa83 570
9caaaa83
PA
571 if (signo != 0
572 && !signal_pass_state (gdb_signal_from_host (signo)))
573 signo = 0;
574 ptrace (PTRACE_DETACH, child_pid, 0, signo);
8a89ddbd
PA
575
576 close_proc_mem_file (child_pid);
9caaaa83 577 }
ac264b3b 578 }
9016a515
DJ
579
580 if (has_vforked)
581 {
a2885186
SM
582 lwp_info *parent_lp = find_lwp_pid (parent_ptid);
583 linux_nat_debug_printf ("waiting for VFORK_DONE on %d", parent_pid);
584 parent_lp->stopped = 1;
6c95b8df 585
a2885186
SM
586 /* We'll handle the VFORK_DONE event like any other
587 event, in target_wait. */
9016a515 588 }
4de4c07c 589 }
3993f6b1 590 else
4de4c07c 591 {
3ced3da4 592 struct lwp_info *child_lp;
4de4c07c 593
82d1f134 594 child_lp = add_lwp (child_ptid);
3ced3da4 595 child_lp->stopped = 1;
25289eb2 596 child_lp->last_resume_kind = resume_stop;
4de4c07c 597 }
4de4c07c
DJ
598}
599
4de4c07c 600\f
f6ac5f3d
PA
601int
602linux_nat_target::insert_fork_catchpoint (int pid)
4de4c07c 603{
a2885186 604 return 0;
3993f6b1
DJ
605}
606
f6ac5f3d
PA
607int
608linux_nat_target::remove_fork_catchpoint (int pid)
eb73ad13
PA
609{
610 return 0;
611}
612
f6ac5f3d
PA
613int
614linux_nat_target::insert_vfork_catchpoint (int pid)
3993f6b1 615{
a2885186 616 return 0;
3993f6b1
DJ
617}
618
f6ac5f3d
PA
619int
620linux_nat_target::remove_vfork_catchpoint (int pid)
eb73ad13
PA
621{
622 return 0;
623}
624
f6ac5f3d
PA
625int
626linux_nat_target::insert_exec_catchpoint (int pid)
3993f6b1 627{
a2885186 628 return 0;
3993f6b1
DJ
629}
630
f6ac5f3d
PA
631int
632linux_nat_target::remove_exec_catchpoint (int pid)
eb73ad13
PA
633{
634 return 0;
635}
636
f6ac5f3d
PA
637int
638linux_nat_target::set_syscall_catchpoint (int pid, bool needed, int any_count,
639 gdb::array_view<const int> syscall_counts)
a96d9b2e 640{
a96d9b2e
SDJ
641 /* On GNU/Linux, we ignore the arguments. It means that we only
642 enable the syscall catchpoints, but do not disable them.
77b06cd7 643
649a140c 644 Also, we do not use the `syscall_counts' information because we do not
a96d9b2e
SDJ
645 filter system calls here. We let GDB do the logic for us. */
646 return 0;
647}
648
774113b0
PA
649/* List of known LWPs, keyed by LWP PID. This speeds up the common
650 case of mapping a PID returned from the kernel to our corresponding
651 lwp_info data structure. */
652static htab_t lwp_lwpid_htab;
653
654/* Calculate a hash from a lwp_info's LWP PID. */
655
656static hashval_t
657lwp_info_hash (const void *ap)
658{
659 const struct lwp_info *lp = (struct lwp_info *) ap;
e38504b3 660 pid_t pid = lp->ptid.lwp ();
774113b0
PA
661
662 return iterative_hash_object (pid, 0);
663}
664
665/* Equality function for the lwp_info hash table. Compares the LWP's
666 PID. */
667
668static int
669lwp_lwpid_htab_eq (const void *a, const void *b)
670{
671 const struct lwp_info *entry = (const struct lwp_info *) a;
672 const struct lwp_info *element = (const struct lwp_info *) b;
673
e38504b3 674 return entry->ptid.lwp () == element->ptid.lwp ();
774113b0
PA
675}
676
677/* Create the lwp_lwpid_htab hash table. */
678
679static void
680lwp_lwpid_htab_create (void)
681{
682 lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL);
683}
684
685/* Add LP to the hash table. */
686
687static void
688lwp_lwpid_htab_add_lwp (struct lwp_info *lp)
689{
690 void **slot;
691
692 slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT);
693 gdb_assert (slot != NULL && *slot == NULL);
694 *slot = lp;
695}
696
697/* Head of doubly-linked list of known LWPs. Sorted by reverse
698 creation order. This order is assumed in some cases. E.g.,
699 reaping status after killing alls lwps of a process: the leader LWP
700 must be reaped last. */
901b9821
SM
701
702static intrusive_list<lwp_info> lwp_list;
703
704/* See linux-nat.h. */
705
706lwp_info_range
707all_lwps ()
708{
709 return lwp_info_range (lwp_list.begin ());
710}
711
712/* See linux-nat.h. */
713
714lwp_info_safe_range
715all_lwps_safe ()
716{
717 return lwp_info_safe_range (lwp_list.begin ());
718}
774113b0
PA
719
720/* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
721
722static void
723lwp_list_add (struct lwp_info *lp)
724{
901b9821 725 lwp_list.push_front (*lp);
774113b0
PA
726}
727
728/* Remove LP from sorted-by-reverse-creation-order doubly-linked
729 list. */
730
731static void
732lwp_list_remove (struct lwp_info *lp)
733{
734 /* Remove from sorted-by-creation-order list. */
901b9821 735 lwp_list.erase (lwp_list.iterator_to (*lp));
774113b0
PA
736}
737
d6b0e80f
AC
738\f
739
d6b0e80f
AC
740/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
741 _initialize_linux_nat. */
742static sigset_t suspend_mask;
743
7feb7d06
PA
744/* Signals to block to make that sigsuspend work. */
745static sigset_t blocked_mask;
746
747/* SIGCHLD action. */
6bd434d6 748static struct sigaction sigchld_action;
b84876c2 749
7feb7d06
PA
750/* Block child signals (SIGCHLD and linux threads signals), and store
751 the previous mask in PREV_MASK. */
84e46146 752
7feb7d06
PA
753static void
754block_child_signals (sigset_t *prev_mask)
755{
756 /* Make sure SIGCHLD is blocked. */
757 if (!sigismember (&blocked_mask, SIGCHLD))
758 sigaddset (&blocked_mask, SIGCHLD);
759
21987b9c 760 gdb_sigmask (SIG_BLOCK, &blocked_mask, prev_mask);
7feb7d06
PA
761}
762
763/* Restore child signals mask, previously returned by
764 block_child_signals. */
765
766static void
767restore_child_signals_mask (sigset_t *prev_mask)
768{
21987b9c 769 gdb_sigmask (SIG_SETMASK, prev_mask, NULL);
7feb7d06 770}
2455069d
UW
771
772/* Mask of signals to pass directly to the inferior. */
773static sigset_t pass_mask;
774
775/* Update signals to pass to the inferior. */
f6ac5f3d 776void
adc6a863
PA
777linux_nat_target::pass_signals
778 (gdb::array_view<const unsigned char> pass_signals)
2455069d
UW
779{
780 int signo;
781
782 sigemptyset (&pass_mask);
783
784 for (signo = 1; signo < NSIG; signo++)
785 {
2ea28649 786 int target_signo = gdb_signal_from_host (signo);
adc6a863 787 if (target_signo < pass_signals.size () && pass_signals[target_signo])
dda83cd7 788 sigaddset (&pass_mask, signo);
2455069d
UW
789 }
790}
791
d6b0e80f
AC
792\f
793
794/* Prototypes for local functions. */
d3a70e03
TT
795static int stop_wait_callback (struct lwp_info *lp);
796static int resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid);
ced2dffb 797static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
710151dd 798
d6b0e80f 799\f
d6b0e80f 800
7b50312a
PA
801/* Destroy and free LP. */
802
676362df 803lwp_info::~lwp_info ()
7b50312a 804{
466eecee 805 /* Let the arch specific bits release arch_lwp_info. */
676362df 806 linux_target->low_delete_thread (this->arch_private);
7b50312a
PA
807}
808
774113b0 809/* Traversal function for purge_lwp_list. */
d90e17a7 810
774113b0
PA
811static int
812lwp_lwpid_htab_remove_pid (void **slot, void *info)
d90e17a7 813{
774113b0
PA
814 struct lwp_info *lp = (struct lwp_info *) *slot;
815 int pid = *(int *) info;
d90e17a7 816
e99b03dc 817 if (lp->ptid.pid () == pid)
d90e17a7 818 {
774113b0
PA
819 htab_clear_slot (lwp_lwpid_htab, slot);
820 lwp_list_remove (lp);
676362df 821 delete lp;
774113b0 822 }
d90e17a7 823
774113b0
PA
824 return 1;
825}
d90e17a7 826
774113b0
PA
827/* Remove all LWPs belong to PID from the lwp list. */
828
829static void
830purge_lwp_list (int pid)
831{
832 htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid);
d90e17a7
PA
833}
834
26cb8b7c
PA
835/* Add the LWP specified by PTID to the list. PTID is the first LWP
836 in the process. Return a pointer to the structure describing the
837 new LWP.
838
839 This differs from add_lwp in that we don't let the arch specific
840 bits know about this new thread. Current clients of this callback
841 take the opportunity to install watchpoints in the new thread, and
842 we shouldn't do that for the first thread. If we're spawning a
843 child ("run"), the thread executes the shell wrapper first, and we
844 shouldn't touch it until it execs the program we want to debug.
845 For "attach", it'd be okay to call the callback, but it's not
846 necessary, because watchpoints can't yet have been inserted into
847 the inferior. */
d6b0e80f
AC
848
849static struct lwp_info *
26cb8b7c 850add_initial_lwp (ptid_t ptid)
d6b0e80f 851{
15a9e13e 852 gdb_assert (ptid.lwp_p ());
d6b0e80f 853
b0f6c8d2 854 lwp_info *lp = new lwp_info (ptid);
d6b0e80f 855
d6b0e80f 856
774113b0
PA
857 /* Add to sorted-by-reverse-creation-order list. */
858 lwp_list_add (lp);
859
860 /* Add to keyed-by-pid htab. */
861 lwp_lwpid_htab_add_lwp (lp);
d6b0e80f 862
26cb8b7c
PA
863 return lp;
864}
865
866/* Add the LWP specified by PID to the list. Return a pointer to the
867 structure describing the new LWP. The LWP should already be
868 stopped. */
869
870static struct lwp_info *
871add_lwp (ptid_t ptid)
872{
873 struct lwp_info *lp;
874
875 lp = add_initial_lwp (ptid);
876
6e012a6c
PA
877 /* Let the arch specific bits know about this new thread. Current
878 clients of this callback take the opportunity to install
26cb8b7c
PA
879 watchpoints in the new thread. We don't do this for the first
880 thread though. See add_initial_lwp. */
135340af 881 linux_target->low_new_thread (lp);
9f0bdab8 882
d6b0e80f
AC
883 return lp;
884}
885
886/* Remove the LWP specified by PID from the list. */
887
888static void
889delete_lwp (ptid_t ptid)
890{
b0f6c8d2 891 lwp_info dummy (ptid);
d6b0e80f 892
b0f6c8d2 893 void **slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
774113b0
PA
894 if (slot == NULL)
895 return;
d6b0e80f 896
b0f6c8d2 897 lwp_info *lp = *(struct lwp_info **) slot;
774113b0 898 gdb_assert (lp != NULL);
d6b0e80f 899
774113b0 900 htab_clear_slot (lwp_lwpid_htab, slot);
d6b0e80f 901
774113b0
PA
902 /* Remove from sorted-by-creation-order list. */
903 lwp_list_remove (lp);
d6b0e80f 904
774113b0 905 /* Release. */
676362df 906 delete lp;
d6b0e80f
AC
907}
908
909/* Return a pointer to the structure describing the LWP corresponding
910 to PID. If no corresponding LWP could be found, return NULL. */
911
912static struct lwp_info *
913find_lwp_pid (ptid_t ptid)
914{
d6b0e80f
AC
915 int lwp;
916
15a9e13e 917 if (ptid.lwp_p ())
e38504b3 918 lwp = ptid.lwp ();
d6b0e80f 919 else
e99b03dc 920 lwp = ptid.pid ();
d6b0e80f 921
b0f6c8d2
SM
922 lwp_info dummy (ptid_t (0, lwp));
923 return (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
d6b0e80f
AC
924}
925
6d4ee8c6 926/* See nat/linux-nat.h. */
d6b0e80f
AC
927
928struct lwp_info *
d90e17a7 929iterate_over_lwps (ptid_t filter,
d3a70e03 930 gdb::function_view<iterate_over_lwps_ftype> callback)
d6b0e80f 931{
901b9821 932 for (lwp_info *lp : all_lwps_safe ())
d6b0e80f 933 {
26a57c92 934 if (lp->ptid.matches (filter))
d90e17a7 935 {
d3a70e03 936 if (callback (lp) != 0)
d90e17a7
PA
937 return lp;
938 }
d6b0e80f
AC
939 }
940
941 return NULL;
942}
943
2277426b
PA
944/* Update our internal state when changing from one checkpoint to
945 another indicated by NEW_PTID. We can only switch single-threaded
946 applications, so we only create one new LWP, and the previous list
947 is discarded. */
f973ed9c
DJ
948
949void
950linux_nat_switch_fork (ptid_t new_ptid)
951{
952 struct lwp_info *lp;
953
e99b03dc 954 purge_lwp_list (inferior_ptid.pid ());
2277426b 955
f973ed9c
DJ
956 lp = add_lwp (new_ptid);
957 lp->stopped = 1;
e26af52f 958
2277426b
PA
959 /* This changes the thread's ptid while preserving the gdb thread
960 num. Also changes the inferior pid, while preserving the
961 inferior num. */
5b6d1e4f 962 thread_change_ptid (linux_target, inferior_ptid, new_ptid);
2277426b
PA
963
964 /* We've just told GDB core that the thread changed target id, but,
965 in fact, it really is a different thread, with different register
966 contents. */
967 registers_changed ();
e26af52f
DJ
968}
969
7730e5c6
PA
970/* Handle the exit of a single thread LP. If DEL_THREAD is true,
971 delete the thread_info associated to LP, if it exists. */
e26af52f
DJ
972
973static void
7730e5c6 974exit_lwp (struct lwp_info *lp, bool del_thread = true)
e26af52f 975{
9213a6d7 976 struct thread_info *th = linux_target->find_thread (lp->ptid);
063bfe2e 977
7730e5c6 978 if (th != nullptr && del_thread)
9d7d58e7 979 delete_thread (th);
e26af52f
DJ
980
981 delete_lwp (lp->ptid);
982}
983
a0ef4274
DJ
984/* Wait for the LWP specified by LP, which we have just attached to.
985 Returns a wait status for that LWP, to cache. */
986
987static int
22827c51 988linux_nat_post_attach_wait (ptid_t ptid, int *signalled)
a0ef4274 989{
e38504b3 990 pid_t new_pid, pid = ptid.lwp ();
a0ef4274
DJ
991 int status;
992
644cebc9 993 if (linux_proc_pid_is_stopped (pid))
a0ef4274 994 {
9327494e 995 linux_nat_debug_printf ("Attaching to a stopped process");
a0ef4274
DJ
996
997 /* The process is definitely stopped. It is in a job control
998 stop, unless the kernel predates the TASK_STOPPED /
999 TASK_TRACED distinction, in which case it might be in a
1000 ptrace stop. Make sure it is in a ptrace stop; from there we
1001 can kill it, signal it, et cetera.
1002
dda83cd7 1003 First make sure there is a pending SIGSTOP. Since we are
a0ef4274
DJ
1004 already attached, the process can not transition from stopped
1005 to running without a PTRACE_CONT; so we know this signal will
1006 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1007 probably already in the queue (unless this kernel is old
1008 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1009 is not an RT signal, it can only be queued once. */
1010 kill_lwp (pid, SIGSTOP);
1011
1012 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1013 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1014 ptrace (PTRACE_CONT, pid, 0, 0);
1015 }
1016
1017 /* Make sure the initial process is stopped. The user-level threads
1018 layer might want to poke around in the inferior, and that won't
1019 work if things haven't stabilized yet. */
4a6ed09b 1020 new_pid = my_waitpid (pid, &status, __WALL);
dacc9cb2
PP
1021 gdb_assert (pid == new_pid);
1022
1023 if (!WIFSTOPPED (status))
1024 {
1025 /* The pid we tried to attach has apparently just exited. */
9327494e 1026 linux_nat_debug_printf ("Failed to stop %d: %s", pid,
8d06918f 1027 status_to_str (status).c_str ());
dacc9cb2
PP
1028 return status;
1029 }
a0ef4274
DJ
1030
1031 if (WSTOPSIG (status) != SIGSTOP)
1032 {
1033 *signalled = 1;
9327494e 1034 linux_nat_debug_printf ("Received %s after attaching",
8d06918f 1035 status_to_str (status).c_str ());
a0ef4274
DJ
1036 }
1037
1038 return status;
1039}
1040
f6ac5f3d
PA
1041void
1042linux_nat_target::create_inferior (const char *exec_file,
1043 const std::string &allargs,
1044 char **env, int from_tty)
b84876c2 1045{
41272101
TT
1046 maybe_disable_address_space_randomization restore_personality
1047 (disable_randomization);
b84876c2
PA
1048
1049 /* The fork_child mechanism is synchronous and calls target_wait, so
1050 we have to mask the async mode. */
1051
2455069d 1052 /* Make sure we report all signals during startup. */
adc6a863 1053 pass_signals ({});
2455069d 1054
f6ac5f3d 1055 inf_ptrace_target::create_inferior (exec_file, allargs, env, from_tty);
8a89ddbd
PA
1056
1057 open_proc_mem_file (inferior_ptid);
b84876c2
PA
1058}
1059
8784d563
PA
1060/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1061 already attached. Returns true if a new LWP is found, false
1062 otherwise. */
1063
1064static int
1065attach_proc_task_lwp_callback (ptid_t ptid)
1066{
1067 struct lwp_info *lp;
1068
1069 /* Ignore LWPs we're already attached to. */
1070 lp = find_lwp_pid (ptid);
1071 if (lp == NULL)
1072 {
e38504b3 1073 int lwpid = ptid.lwp ();
8784d563
PA
1074
1075 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1076 {
1077 int err = errno;
1078
1079 /* Be quiet if we simply raced with the thread exiting.
1080 EPERM is returned if the thread's task still exists, and
1081 is marked as exited or zombie, as well as other
1082 conditions, so in that case, confirm the status in
1083 /proc/PID/status. */
1084 if (err == ESRCH
1085 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1086 {
9327494e
SM
1087 linux_nat_debug_printf
1088 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1089 lwpid, err, safe_strerror (err));
1090
8784d563
PA
1091 }
1092 else
1093 {
4d9b86e1 1094 std::string reason
50fa3001 1095 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1 1096
c6f7f9c8
TT
1097 error (_("Cannot attach to lwp %d: %s"),
1098 lwpid, reason.c_str ());
8784d563
PA
1099 }
1100 }
1101 else
1102 {
9327494e 1103 linux_nat_debug_printf ("PTRACE_ATTACH %s, 0, 0 (OK)",
e53c95d4 1104 ptid.to_string ().c_str ());
8784d563
PA
1105
1106 lp = add_lwp (ptid);
8784d563
PA
1107
1108 /* The next time we wait for this LWP we'll see a SIGSTOP as
1109 PTRACE_ATTACH brings it to a halt. */
1110 lp->signalled = 1;
1111
1112 /* We need to wait for a stop before being able to make the
1113 next ptrace call on this LWP. */
1114 lp->must_set_ptrace_flags = 1;
026a9174
PA
1115
1116 /* So that wait collects the SIGSTOP. */
1117 lp->resumed = 1;
8784d563
PA
1118 }
1119
1120 return 1;
1121 }
1122 return 0;
1123}
1124
f6ac5f3d
PA
1125void
1126linux_nat_target::attach (const char *args, int from_tty)
d6b0e80f
AC
1127{
1128 struct lwp_info *lp;
d6b0e80f 1129 int status;
af990527 1130 ptid_t ptid;
d6b0e80f 1131
2455069d 1132 /* Make sure we report all signals during attach. */
adc6a863 1133 pass_signals ({});
2455069d 1134
a70b8144 1135 try
87b0bb13 1136 {
f6ac5f3d 1137 inf_ptrace_target::attach (args, from_tty);
87b0bb13 1138 }
230d2906 1139 catch (const gdb_exception_error &ex)
87b0bb13
JK
1140 {
1141 pid_t pid = parse_pid_to_attach (args);
50fa3001 1142 std::string reason = linux_ptrace_attach_fail_reason (pid);
87b0bb13 1143
4d9b86e1 1144 if (!reason.empty ())
3d6e9d23
TT
1145 throw_error (ex.error, "warning: %s\n%s", reason.c_str (),
1146 ex.what ());
7ae1a6a6 1147 else
3d6e9d23 1148 throw_error (ex.error, "%s", ex.what ());
87b0bb13 1149 }
d6b0e80f 1150
af990527
PA
1151 /* The ptrace base target adds the main thread with (pid,0,0)
1152 format. Decorate it with lwp info. */
e99b03dc 1153 ptid = ptid_t (inferior_ptid.pid (),
184ea2f7 1154 inferior_ptid.pid ());
5b6d1e4f 1155 thread_change_ptid (linux_target, inferior_ptid, ptid);
af990527 1156
9f0bdab8 1157 /* Add the initial process as the first LWP to the list. */
26cb8b7c 1158 lp = add_initial_lwp (ptid);
a0ef4274 1159
22827c51 1160 status = linux_nat_post_attach_wait (lp->ptid, &lp->signalled);
dacc9cb2
PP
1161 if (!WIFSTOPPED (status))
1162 {
1163 if (WIFEXITED (status))
1164 {
1165 int exit_code = WEXITSTATUS (status);
1166
223ffa71 1167 target_terminal::ours ();
bc1e6c81 1168 target_mourn_inferior (inferior_ptid);
dacc9cb2
PP
1169 if (exit_code == 0)
1170 error (_("Unable to attach: program exited normally."));
1171 else
1172 error (_("Unable to attach: program exited with code %d."),
1173 exit_code);
1174 }
1175 else if (WIFSIGNALED (status))
1176 {
2ea28649 1177 enum gdb_signal signo;
dacc9cb2 1178
223ffa71 1179 target_terminal::ours ();
bc1e6c81 1180 target_mourn_inferior (inferior_ptid);
dacc9cb2 1181
2ea28649 1182 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1183 error (_("Unable to attach: program terminated with signal "
1184 "%s, %s."),
2ea28649
PA
1185 gdb_signal_to_name (signo),
1186 gdb_signal_to_string (signo));
dacc9cb2
PP
1187 }
1188
f34652de 1189 internal_error (_("unexpected status %d for PID %ld"),
e38504b3 1190 status, (long) ptid.lwp ());
dacc9cb2
PP
1191 }
1192
a0ef4274 1193 lp->stopped = 1;
9f0bdab8 1194
8a89ddbd
PA
1195 open_proc_mem_file (lp->ptid);
1196
a0ef4274 1197 /* Save the wait status to report later. */
d6b0e80f 1198 lp->resumed = 1;
9327494e 1199 linux_nat_debug_printf ("waitpid %ld, saving status %s",
8d06918f
SM
1200 (long) lp->ptid.pid (),
1201 status_to_str (status).c_str ());
710151dd 1202
7feb7d06
PA
1203 lp->status = status;
1204
8784d563
PA
1205 /* We must attach to every LWP. If /proc is mounted, use that to
1206 find them now. The inferior may be using raw clone instead of
1207 using pthreads. But even if it is using pthreads, thread_db
1208 walks structures in the inferior's address space to find the list
1209 of threads/LWPs, and those structures may well be corrupted.
1210 Note that once thread_db is loaded, we'll still use it to list
1211 threads and associate pthread info with each LWP. */
c6f7f9c8
TT
1212 try
1213 {
1214 linux_proc_attach_tgid_threads (lp->ptid.pid (),
1215 attach_proc_task_lwp_callback);
1216 }
1217 catch (const gdb_exception_error &)
1218 {
1219 /* Failed to attach to some LWP. Detach any we've already
1220 attached to. */
1221 iterate_over_lwps (ptid_t (ptid.pid ()),
1222 [] (struct lwp_info *lwp) -> int
1223 {
1224 /* Ignore errors when detaching. */
1225 ptrace (PTRACE_DETACH, lwp->ptid.lwp (), 0, 0);
1226 delete_lwp (lwp->ptid);
1227 return 0;
1228 });
1229
1230 target_terminal::ours ();
1231 target_mourn_inferior (inferior_ptid);
1232
1233 throw;
1234 }
1235
1236 /* Add all the LWPs to gdb's thread list. */
1237 iterate_over_lwps (ptid_t (ptid.pid ()),
1238 [] (struct lwp_info *lwp) -> int
1239 {
1240 if (lwp->ptid.pid () != lwp->ptid.lwp ())
1241 {
1242 add_thread (linux_target, lwp->ptid);
1243 set_running (linux_target, lwp->ptid, true);
1244 set_executing (linux_target, lwp->ptid, true);
1245 }
1246 return 0;
1247 });
d6b0e80f
AC
1248}
1249
4a3ee32a
SM
1250/* Ptrace-detach the thread with pid PID. */
1251
1252static void
1253detach_one_pid (int pid, int signo)
1254{
1255 if (ptrace (PTRACE_DETACH, pid, 0, signo) < 0)
1256 {
1257 int save_errno = errno;
1258
1259 /* We know the thread exists, so ESRCH must mean the lwp is
1260 zombie. This can happen if one of the already-detached
1261 threads exits the whole thread group. In that case we're
1262 still attached, and must reap the lwp. */
1263 if (save_errno == ESRCH)
1264 {
1265 int ret, status;
1266
1267 ret = my_waitpid (pid, &status, __WALL);
1268 if (ret == -1)
1269 {
1270 warning (_("Couldn't reap LWP %d while detaching: %s"),
1271 pid, safe_strerror (errno));
1272 }
1273 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1274 {
1275 warning (_("Reaping LWP %d while detaching "
1276 "returned unexpected status 0x%x"),
1277 pid, status);
1278 }
1279 }
1280 else
1281 error (_("Can't detach %d: %s"),
1282 pid, safe_strerror (save_errno));
1283 }
1284 else
1285 linux_nat_debug_printf ("PTRACE_DETACH (%d, %s, 0) (OK)",
1286 pid, strsignal (signo));
1287}
1288
ced2dffb
PA
1289/* Get pending signal of THREAD as a host signal number, for detaching
1290 purposes. This is the signal the thread last stopped for, which we
1291 need to deliver to the thread when detaching, otherwise, it'd be
1292 suppressed/lost. */
1293
a0ef4274 1294static int
ced2dffb 1295get_detach_signal (struct lwp_info *lp)
a0ef4274 1296{
a493e3e2 1297 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1298
1299 /* If we paused threads momentarily, we may have stored pending
1300 events in lp->status or lp->waitstatus (see stop_wait_callback),
1301 and GDB core hasn't seen any signal for those threads.
1302 Otherwise, the last signal reported to the core is found in the
1303 thread object's stop_signal.
1304
1305 There's a corner case that isn't handled here at present. Only
1306 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1307 stop_signal make sense as a real signal to pass to the inferior.
1308 Some catchpoint related events, like
1309 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1310 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1311 those traps are debug API (ptrace in our case) related and
1312 induced; the inferior wouldn't see them if it wasn't being
1313 traced. Hence, we should never pass them to the inferior, even
1314 when set to pass state. Since this corner case isn't handled by
1315 infrun.c when proceeding with a signal, for consistency, neither
1316 do we handle it here (or elsewhere in the file we check for
1317 signal pass state). Normally SIGTRAP isn't set to pass state, so
1318 this is really a corner case. */
1319
183be222 1320 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
a493e3e2 1321 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1322 else if (lp->status)
2ea28649 1323 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
00431a78 1324 else
ca2163eb 1325 {
9213a6d7 1326 thread_info *tp = linux_target->find_thread (lp->ptid);
e0881a8e 1327
611841bb 1328 if (target_is_non_stop_p () && !tp->executing ())
ca2163eb 1329 {
1edb66d8 1330 if (tp->has_pending_waitstatus ())
df5ad102
SM
1331 {
1332 /* If the thread has a pending event, and it was stopped with a
287de656 1333 signal, use that signal to resume it. If it has a pending
df5ad102
SM
1334 event of another kind, it was not stopped with a signal, so
1335 resume it without a signal. */
1336 if (tp->pending_waitstatus ().kind () == TARGET_WAITKIND_STOPPED)
1337 signo = tp->pending_waitstatus ().sig ();
1338 else
1339 signo = GDB_SIGNAL_0;
1340 }
00431a78 1341 else
1edb66d8 1342 signo = tp->stop_signal ();
00431a78
PA
1343 }
1344 else if (!target_is_non_stop_p ())
1345 {
00431a78 1346 ptid_t last_ptid;
5b6d1e4f 1347 process_stratum_target *last_target;
00431a78 1348
5b6d1e4f 1349 get_last_target_status (&last_target, &last_ptid, nullptr);
e0881a8e 1350
5b6d1e4f
PA
1351 if (last_target == linux_target
1352 && lp->ptid.lwp () == last_ptid.lwp ())
1edb66d8 1353 signo = tp->stop_signal ();
4c28f408 1354 }
ca2163eb 1355 }
4c28f408 1356
a493e3e2 1357 if (signo == GDB_SIGNAL_0)
ca2163eb 1358 {
9327494e 1359 linux_nat_debug_printf ("lwp %s has no pending signal",
e53c95d4 1360 lp->ptid.to_string ().c_str ());
ca2163eb
PA
1361 }
1362 else if (!signal_pass_state (signo))
1363 {
9327494e
SM
1364 linux_nat_debug_printf
1365 ("lwp %s had signal %s but it is in no pass state",
e53c95d4 1366 lp->ptid.to_string ().c_str (), gdb_signal_to_string (signo));
a0ef4274 1367 }
a0ef4274 1368 else
4c28f408 1369 {
9327494e 1370 linux_nat_debug_printf ("lwp %s has pending signal %s",
e53c95d4 1371 lp->ptid.to_string ().c_str (),
9327494e 1372 gdb_signal_to_string (signo));
ced2dffb
PA
1373
1374 return gdb_signal_to_host (signo);
4c28f408 1375 }
a0ef4274
DJ
1376
1377 return 0;
1378}
1379
0d36baa9 1380/* If LP has a pending fork/vfork/clone status, return it. */
ced2dffb 1381
6b09f134 1382static std::optional<target_waitstatus>
0d36baa9 1383get_pending_child_status (lwp_info *lp)
d6b0e80f 1384{
b26b06dd
AB
1385 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
1386
1387 linux_nat_debug_printf ("lwp %s (stopped = %d)",
1388 lp->ptid.to_string ().c_str (), lp->stopped);
1389
df5ad102
SM
1390 /* Check in lwp_info::status. */
1391 if (WIFSTOPPED (lp->status) && linux_is_extended_waitstatus (lp->status))
1392 {
1393 int event = linux_ptrace_get_extended_event (lp->status);
1394
0d36baa9
PA
1395 if (event == PTRACE_EVENT_FORK
1396 || event == PTRACE_EVENT_VFORK
1397 || event == PTRACE_EVENT_CLONE)
df5ad102
SM
1398 {
1399 unsigned long child_pid;
1400 int ret = ptrace (PTRACE_GETEVENTMSG, lp->ptid.lwp (), 0, &child_pid);
1401 if (ret == 0)
0d36baa9
PA
1402 {
1403 target_waitstatus ws;
1404
1405 if (event == PTRACE_EVENT_FORK)
1406 ws.set_forked (ptid_t (child_pid, child_pid));
1407 else if (event == PTRACE_EVENT_VFORK)
1408 ws.set_vforked (ptid_t (child_pid, child_pid));
1409 else if (event == PTRACE_EVENT_CLONE)
1410 ws.set_thread_cloned (ptid_t (lp->ptid.pid (), child_pid));
1411 else
1412 gdb_assert_not_reached ("unhandled");
1413
1414 return ws;
1415 }
df5ad102 1416 else
0d36baa9
PA
1417 {
1418 perror_warning_with_name (_("Failed to retrieve event msg"));
1419 return {};
1420 }
df5ad102
SM
1421 }
1422 }
1423
1424 /* Check in lwp_info::waitstatus. */
0d36baa9
PA
1425 if (is_new_child_status (lp->waitstatus.kind ()))
1426 return lp->waitstatus;
df5ad102 1427
9213a6d7 1428 thread_info *tp = linux_target->find_thread (lp->ptid);
df5ad102 1429
0d36baa9
PA
1430 /* Check in thread_info::pending_waitstatus. */
1431 if (tp->has_pending_waitstatus ()
1432 && is_new_child_status (tp->pending_waitstatus ().kind ()))
1433 return tp->pending_waitstatus ();
df5ad102
SM
1434
1435 /* Check in thread_info::pending_follow. */
0d36baa9
PA
1436 if (is_new_child_status (tp->pending_follow.kind ()))
1437 return tp->pending_follow;
df5ad102 1438
0d36baa9
PA
1439 return {};
1440}
1441
1442/* Detach from LP. If SIGNO_P is non-NULL, then it points to the
1443 signal number that should be passed to the LWP when detaching.
1444 Otherwise pass any pending signal the LWP may have, if any. */
1445
1446static void
1447detach_one_lwp (struct lwp_info *lp, int *signo_p)
1448{
1449 int lwpid = lp->ptid.lwp ();
1450 int signo;
1451
1452 /* If the lwp/thread we are about to detach has a pending fork/clone
1453 event, there is a process/thread GDB is attached to that the core
1454 of GDB doesn't know about. Detach from it. */
1455
6b09f134 1456 std::optional<target_waitstatus> ws = get_pending_child_status (lp);
0d36baa9
PA
1457 if (ws.has_value ())
1458 detach_one_pid (ws->child_ptid ().lwp (), 0);
d6b0e80f 1459
a0ef4274
DJ
1460 /* If there is a pending SIGSTOP, get rid of it. */
1461 if (lp->signalled)
d6b0e80f 1462 {
9327494e 1463 linux_nat_debug_printf ("Sending SIGCONT to %s",
e53c95d4 1464 lp->ptid.to_string ().c_str ());
d6b0e80f 1465
ced2dffb 1466 kill_lwp (lwpid, SIGCONT);
d6b0e80f 1467 lp->signalled = 0;
d6b0e80f
AC
1468 }
1469
57e6a098
KB
1470 /* If the lwp has exited or was terminated due to a signal, there's
1471 nothing left to do. */
5e86aab8 1472 if (is_lwp_marked_dead (lp))
57e6a098
KB
1473 {
1474 linux_nat_debug_printf
1475 ("Can't detach %s - it has exited or was terminated: %s.",
1476 lp->ptid.to_string ().c_str (),
1477 lp->waitstatus.to_string ().c_str ());
1478 delete_lwp (lp->ptid);
1479 return;
1480 }
1481
ced2dffb 1482 if (signo_p == NULL)
d6b0e80f 1483 {
a0ef4274 1484 /* Pass on any pending signal for this LWP. */
ced2dffb
PA
1485 signo = get_detach_signal (lp);
1486 }
1487 else
1488 signo = *signo_p;
a0ef4274 1489
b26b06dd
AB
1490 linux_nat_debug_printf ("preparing to resume lwp %s (stopped = %d)",
1491 lp->ptid.to_string ().c_str (),
1492 lp->stopped);
1493
ced2dffb
PA
1494 /* Preparing to resume may try to write registers, and fail if the
1495 lwp is zombie. If that happens, ignore the error. We'll handle
1496 it below, when detach fails with ESRCH. */
a70b8144 1497 try
ced2dffb 1498 {
135340af 1499 linux_target->low_prepare_to_resume (lp);
ced2dffb 1500 }
230d2906 1501 catch (const gdb_exception_error &ex)
ced2dffb
PA
1502 {
1503 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 1504 throw;
ced2dffb 1505 }
d6b0e80f 1506
4a3ee32a 1507 detach_one_pid (lwpid, signo);
ced2dffb
PA
1508
1509 delete_lwp (lp->ptid);
1510}
d6b0e80f 1511
ced2dffb 1512static int
d3a70e03 1513detach_callback (struct lwp_info *lp)
ced2dffb
PA
1514{
1515 /* We don't actually detach from the thread group leader just yet.
1516 If the thread group exits, we must reap the zombie clone lwps
1517 before we're able to reap the leader. */
e38504b3 1518 if (lp->ptid.lwp () != lp->ptid.pid ())
ced2dffb 1519 detach_one_lwp (lp, NULL);
d6b0e80f
AC
1520 return 0;
1521}
1522
f6ac5f3d
PA
1523void
1524linux_nat_target::detach (inferior *inf, int from_tty)
d6b0e80f 1525{
b26b06dd
AB
1526 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
1527
d90e17a7 1528 struct lwp_info *main_lwp;
bc09b0c1 1529 int pid = inf->pid;
a0ef4274 1530
ae5e0686
MK
1531 /* Don't unregister from the event loop, as there may be other
1532 inferiors running. */
b84876c2 1533
4c28f408 1534 /* Stop all threads before detaching. ptrace requires that the
30baf67b 1535 thread is stopped to successfully detach. */
d3a70e03 1536 iterate_over_lwps (ptid_t (pid), stop_callback);
4c28f408
PA
1537 /* ... and wait until all of them have reported back that
1538 they're no longer running. */
d3a70e03 1539 iterate_over_lwps (ptid_t (pid), stop_wait_callback);
4c28f408 1540
e87f0fe8
PA
1541 /* We can now safely remove breakpoints. We don't this in earlier
1542 in common code because this target doesn't currently support
1543 writing memory while the inferior is running. */
1544 remove_breakpoints_inf (current_inferior ());
1545
d3a70e03 1546 iterate_over_lwps (ptid_t (pid), detach_callback);
d6b0e80f 1547
fd492bf1
AB
1548 /* We have detached from everything except the main thread now, so
1549 should only have one thread left. However, in non-stop mode the
1550 main thread might have exited, in which case we'll have no threads
1551 left. */
1552 gdb_assert (num_lwps (pid) == 1
1553 || (target_is_non_stop_p () && num_lwps (pid) == 0));
d6b0e80f 1554
57e6a098 1555 if (pid == inferior_ptid.pid () && forks_exist_p ())
7a7d3353
PA
1556 {
1557 /* Multi-fork case. The current inferior_ptid is being detached
1558 from, but there are other viable forks to debug. Detach from
1559 the current fork, and context-switch to the first
1560 available. */
57e6a098 1561 linux_fork_detach (from_tty, find_lwp_pid (ptid_t (pid)));
7a7d3353
PA
1562 }
1563 else
ced2dffb 1564 {
ced2dffb
PA
1565 target_announce_detach (from_tty);
1566
fd492bf1
AB
1567 /* In non-stop mode it is possible that the main thread has exited,
1568 in which case we don't try to detach. */
1569 main_lwp = find_lwp_pid (ptid_t (pid));
1570 if (main_lwp != nullptr)
1571 {
1572 /* Pass on any pending signal for the last LWP. */
1573 int signo = get_detach_signal (main_lwp);
ced2dffb 1574
fd492bf1
AB
1575 detach_one_lwp (main_lwp, &signo);
1576 }
1577 else
1578 gdb_assert (target_is_non_stop_p ());
ced2dffb 1579
f6ac5f3d 1580 detach_success (inf);
ced2dffb 1581 }
05c06f31 1582
8a89ddbd 1583 close_proc_mem_file (pid);
d6b0e80f
AC
1584}
1585
8a99810d
PA
1586/* Resume execution of the inferior process. If STEP is nonzero,
1587 single-step it. If SIGNAL is nonzero, give it that signal. */
1588
1589static void
23f238d3
PA
1590linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1591 enum gdb_signal signo)
8a99810d 1592{
8a99810d 1593 lp->step = step;
9c02b525
PA
1594
1595 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1596 We only presently need that if the LWP is stepped though (to
1597 handle the case of stepping a breakpoint instruction). */
1598 if (step)
1599 {
5b6d1e4f 1600 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
9c02b525
PA
1601
1602 lp->stop_pc = regcache_read_pc (regcache);
1603 }
1604 else
1605 lp->stop_pc = 0;
1606
135340af 1607 linux_target->low_prepare_to_resume (lp);
f6ac5f3d 1608 linux_target->low_resume (lp->ptid, step, signo);
23f238d3
PA
1609
1610 /* Successfully resumed. Clear state that no longer makes sense,
1611 and mark the LWP as running. Must not do this before resuming
1612 otherwise if that fails other code will be confused. E.g., we'd
1613 later try to stop the LWP and hang forever waiting for a stop
1614 status. Note that we must not throw after this is cleared,
1615 otherwise handle_zombie_lwp_error would get confused. */
8a99810d 1616 lp->stopped = 0;
1ad3de98 1617 lp->core = -1;
23f238d3 1618 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
5b6d1e4f 1619 registers_changed_ptid (linux_target, lp->ptid);
8a99810d
PA
1620}
1621
23f238d3
PA
1622/* Called when we try to resume a stopped LWP and that errors out. If
1623 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1624 or about to become), discard the error, clear any pending status
1625 the LWP may have, and return true (we'll collect the exit status
1626 soon enough). Otherwise, return false. */
1627
1628static int
1629check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1630{
1631 /* If we get an error after resuming the LWP successfully, we'd
1632 confuse !T state for the LWP being gone. */
1633 gdb_assert (lp->stopped);
1634
1635 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1636 because even if ptrace failed with ESRCH, the tracee may be "not
1637 yet fully dead", but already refusing ptrace requests. In that
1638 case the tracee has 'R (Running)' state for a little bit
1639 (observed in Linux 3.18). See also the note on ESRCH in the
1640 ptrace(2) man page. Instead, check whether the LWP has any state
1641 other than ptrace-stopped. */
1642
1643 /* Don't assume anything if /proc/PID/status can't be read. */
e38504b3 1644 if (linux_proc_pid_is_trace_stopped_nowarn (lp->ptid.lwp ()) == 0)
23f238d3
PA
1645 {
1646 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1647 lp->status = 0;
183be222 1648 lp->waitstatus.set_ignore ();
23f238d3
PA
1649 return 1;
1650 }
1651 return 0;
1652}
1653
1654/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1655 disappears while we try to resume it. */
1656
1657static void
1658linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1659{
a70b8144 1660 try
23f238d3
PA
1661 {
1662 linux_resume_one_lwp_throw (lp, step, signo);
1663 }
230d2906 1664 catch (const gdb_exception_error &ex)
23f238d3
PA
1665 {
1666 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 1667 throw;
23f238d3 1668 }
23f238d3
PA
1669}
1670
d6b0e80f
AC
1671/* Resume LP. */
1672
25289eb2 1673static void
e5ef252a 1674resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1675{
25289eb2 1676 if (lp->stopped)
6c95b8df 1677 {
5b6d1e4f 1678 struct inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
25289eb2
PA
1679
1680 if (inf->vfork_child != NULL)
1681 {
8a9da63e 1682 linux_nat_debug_printf ("Not resuming sibling %s (vfork parent)",
e53c95d4 1683 lp->ptid.to_string ().c_str ());
25289eb2 1684 }
8a99810d 1685 else if (!lwp_status_pending_p (lp))
25289eb2 1686 {
9327494e 1687 linux_nat_debug_printf ("Resuming sibling %s, %s, %s",
e53c95d4 1688 lp->ptid.to_string ().c_str (),
9327494e
SM
1689 (signo != GDB_SIGNAL_0
1690 ? strsignal (gdb_signal_to_host (signo))
1691 : "0"),
1692 step ? "step" : "resume");
25289eb2 1693
8a99810d 1694 linux_resume_one_lwp (lp, step, signo);
25289eb2
PA
1695 }
1696 else
1697 {
9327494e 1698 linux_nat_debug_printf ("Not resuming sibling %s (has pending)",
e53c95d4 1699 lp->ptid.to_string ().c_str ());
25289eb2 1700 }
6c95b8df 1701 }
25289eb2 1702 else
9327494e 1703 linux_nat_debug_printf ("Not resuming sibling %s (not stopped)",
e53c95d4 1704 lp->ptid.to_string ().c_str ());
25289eb2 1705}
d6b0e80f 1706
8817a6f2
PA
1707/* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1708 Resume LWP with the last stop signal, if it is in pass state. */
e5ef252a 1709
25289eb2 1710static int
d3a70e03 1711linux_nat_resume_callback (struct lwp_info *lp, struct lwp_info *except)
25289eb2 1712{
e5ef252a
PA
1713 enum gdb_signal signo = GDB_SIGNAL_0;
1714
8817a6f2
PA
1715 if (lp == except)
1716 return 0;
1717
e5ef252a
PA
1718 if (lp->stopped)
1719 {
1720 struct thread_info *thread;
1721
9213a6d7 1722 thread = linux_target->find_thread (lp->ptid);
e5ef252a
PA
1723 if (thread != NULL)
1724 {
1edb66d8
SM
1725 signo = thread->stop_signal ();
1726 thread->set_stop_signal (GDB_SIGNAL_0);
e5ef252a
PA
1727 }
1728 }
1729
1730 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1731 return 0;
1732}
1733
1734static int
d3a70e03 1735resume_clear_callback (struct lwp_info *lp)
d6b0e80f
AC
1736{
1737 lp->resumed = 0;
25289eb2 1738 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1739 return 0;
1740}
1741
1742static int
d3a70e03 1743resume_set_callback (struct lwp_info *lp)
d6b0e80f
AC
1744{
1745 lp->resumed = 1;
25289eb2 1746 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1747 return 0;
1748}
1749
f6ac5f3d 1750void
d51926f0 1751linux_nat_target::resume (ptid_t scope_ptid, int step, enum gdb_signal signo)
d6b0e80f
AC
1752{
1753 struct lwp_info *lp;
d6b0e80f 1754
9327494e
SM
1755 linux_nat_debug_printf ("Preparing to %s %s, %s, inferior_ptid %s",
1756 step ? "step" : "resume",
d51926f0 1757 scope_ptid.to_string ().c_str (),
9327494e
SM
1758 (signo != GDB_SIGNAL_0
1759 ? strsignal (gdb_signal_to_host (signo)) : "0"),
e53c95d4 1760 inferior_ptid.to_string ().c_str ());
76f50ad1 1761
7da6a5b9
LM
1762 /* Mark the lwps we're resuming as resumed and update their
1763 last_resume_kind to resume_continue. */
d51926f0 1764 iterate_over_lwps (scope_ptid, resume_set_callback);
d6b0e80f 1765
d51926f0 1766 lp = find_lwp_pid (inferior_ptid);
9f0bdab8 1767 gdb_assert (lp != NULL);
d6b0e80f 1768
9f0bdab8 1769 /* Remember if we're stepping. */
25289eb2 1770 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1771
9f0bdab8
DJ
1772 /* If we have a pending wait status for this thread, there is no
1773 point in resuming the process. But first make sure that
1774 linux_nat_wait won't preemptively handle the event - we
1775 should never take this short-circuit if we are going to
1776 leave LP running, since we have skipped resuming all the
1777 other threads. This bit of code needs to be synchronized
1778 with linux_nat_wait. */
76f50ad1 1779
9f0bdab8
DJ
1780 if (lp->status && WIFSTOPPED (lp->status))
1781 {
2455069d
UW
1782 if (!lp->step
1783 && WSTOPSIG (lp->status)
1784 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1785 {
9327494e
SM
1786 linux_nat_debug_printf
1787 ("Not short circuiting for ignored status 0x%x", lp->status);
9f0bdab8 1788
d6b0e80f
AC
1789 /* FIXME: What should we do if we are supposed to continue
1790 this thread with a signal? */
a493e3e2 1791 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 1792 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1793 lp->status = 0;
1794 }
1795 }
76f50ad1 1796
8a99810d 1797 if (lwp_status_pending_p (lp))
9f0bdab8
DJ
1798 {
1799 /* FIXME: What should we do if we are supposed to continue
1800 this thread with a signal? */
a493e3e2 1801 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 1802
57573e54
PA
1803 linux_nat_debug_printf ("Short circuiting for status %s",
1804 pending_status_str (lp).c_str ());
d6b0e80f 1805
7feb7d06
PA
1806 if (target_can_async_p ())
1807 {
4a570176 1808 target_async (true);
7feb7d06
PA
1809 /* Tell the event loop we have something to process. */
1810 async_file_mark ();
1811 }
9f0bdab8 1812 return;
d6b0e80f
AC
1813 }
1814
d51926f0
PA
1815 /* No use iterating unless we're resuming other threads. */
1816 if (scope_ptid != lp->ptid)
1817 iterate_over_lwps (scope_ptid, [=] (struct lwp_info *info)
1818 {
1819 return linux_nat_resume_callback (info, lp);
1820 });
d90e17a7 1821
9327494e
SM
1822 linux_nat_debug_printf ("%s %s, %s (resume event thread)",
1823 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 1824 lp->ptid.to_string ().c_str (),
9327494e
SM
1825 (signo != GDB_SIGNAL_0
1826 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2 1827
2bf6fb9d 1828 linux_resume_one_lwp (lp, step, signo);
d6b0e80f
AC
1829}
1830
c5f62d5f 1831/* Send a signal to an LWP. */
d6b0e80f
AC
1832
1833static int
1834kill_lwp (int lwpid, int signo)
1835{
4a6ed09b 1836 int ret;
d6b0e80f 1837
4a6ed09b
PA
1838 errno = 0;
1839 ret = syscall (__NR_tkill, lwpid, signo);
1840 if (errno == ENOSYS)
1841 {
1842 /* If tkill fails, then we are not using nptl threads, a
1843 configuration we no longer support. */
1844 perror_with_name (("tkill"));
1845 }
1846 return ret;
d6b0e80f
AC
1847}
1848
ca2163eb
PA
1849/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1850 event, check if the core is interested in it: if not, ignore the
1851 event, and keep waiting; otherwise, we need to toggle the LWP's
1852 syscall entry/exit status, since the ptrace event itself doesn't
1853 indicate it, and report the trap to higher layers. */
1854
1855static int
1856linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1857{
1858 struct target_waitstatus *ourstatus = &lp->waitstatus;
1859 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
9213a6d7 1860 thread_info *thread = linux_target->find_thread (lp->ptid);
00431a78 1861 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, thread);
ca2163eb
PA
1862
1863 if (stopping)
1864 {
1865 /* If we're stopping threads, there's a SIGSTOP pending, which
1866 makes it so that the LWP reports an immediate syscall return,
1867 followed by the SIGSTOP. Skip seeing that "return" using
1868 PTRACE_CONT directly, and let stop_wait_callback collect the
1869 SIGSTOP. Later when the thread is resumed, a new syscall
1870 entry event. If we didn't do this (and returned 0), we'd
1871 leave a syscall entry pending, and our caller, by using
1872 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1873 itself. Later, when the user re-resumes this LWP, we'd see
1874 another syscall entry event and we'd mistake it for a return.
1875
1876 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1877 (leaving immediately with LWP->signalled set, without issuing
1878 a PTRACE_CONT), it would still be problematic to leave this
1879 syscall enter pending, as later when the thread is resumed,
1880 it would then see the same syscall exit mentioned above,
1881 followed by the delayed SIGSTOP, while the syscall didn't
1882 actually get to execute. It seems it would be even more
1883 confusing to the user. */
1884
9327494e
SM
1885 linux_nat_debug_printf
1886 ("ignoring syscall %d for LWP %ld (stopping threads), resuming with "
1887 "PTRACE_CONT for SIGSTOP", syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1888
1889 lp->syscall_state = TARGET_WAITKIND_IGNORE;
e38504b3 1890 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
8817a6f2 1891 lp->stopped = 0;
ca2163eb
PA
1892 return 1;
1893 }
1894
bfd09d20
JS
1895 /* Always update the entry/return state, even if this particular
1896 syscall isn't interesting to the core now. In async mode,
1897 the user could install a new catchpoint for this syscall
1898 between syscall enter/return, and we'll need to know to
1899 report a syscall return if that happens. */
1900 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1901 ? TARGET_WAITKIND_SYSCALL_RETURN
1902 : TARGET_WAITKIND_SYSCALL_ENTRY);
1903
ca2163eb
PA
1904 if (catch_syscall_enabled ())
1905 {
ca2163eb
PA
1906 if (catching_syscall_number (syscall_number))
1907 {
1908 /* Alright, an event to report. */
183be222
SM
1909 if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
1910 ourstatus->set_syscall_entry (syscall_number);
1911 else if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
1912 ourstatus->set_syscall_return (syscall_number);
1913 else
1914 gdb_assert_not_reached ("unexpected syscall state");
ca2163eb 1915
9327494e
SM
1916 linux_nat_debug_printf
1917 ("stopping for %s of syscall %d for LWP %ld",
1918 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1919 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1920
ca2163eb
PA
1921 return 0;
1922 }
1923
9327494e
SM
1924 linux_nat_debug_printf
1925 ("ignoring %s of syscall %d for LWP %ld",
1926 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1927 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1928 }
1929 else
1930 {
1931 /* If we had been syscall tracing, and hence used PT_SYSCALL
1932 before on this LWP, it could happen that the user removes all
1933 syscall catchpoints before we get to process this event.
1934 There are two noteworthy issues here:
1935
1936 - When stopped at a syscall entry event, resuming with
1937 PT_STEP still resumes executing the syscall and reports a
1938 syscall return.
1939
1940 - Only PT_SYSCALL catches syscall enters. If we last
1941 single-stepped this thread, then this event can't be a
1942 syscall enter. If we last single-stepped this thread, this
1943 has to be a syscall exit.
1944
1945 The points above mean that the next resume, be it PT_STEP or
1946 PT_CONTINUE, can not trigger a syscall trace event. */
9327494e
SM
1947 linux_nat_debug_printf
1948 ("caught syscall event with no syscall catchpoints. %d for LWP %ld, "
1949 "ignoring", syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1950 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1951 }
1952
1953 /* The core isn't interested in this event. For efficiency, avoid
1954 stopping all threads only to have the core resume them all again.
1955 Since we're not stopping threads, if we're still syscall tracing
1956 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1957 subsequent syscall. Simply resume using the inf-ptrace layer,
1958 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1959
8a99810d 1960 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
ca2163eb
PA
1961 return 1;
1962}
1963
0d36baa9
PA
1964/* See target.h. */
1965
1966void
1967linux_nat_target::follow_clone (ptid_t child_ptid)
1968{
1969 lwp_info *new_lp = add_lwp (child_ptid);
1970 new_lp->stopped = 1;
1971
1972 /* If the thread_db layer is active, let it record the user
1973 level thread id and status, and add the thread to GDB's
1974 list. */
1975 if (!thread_db_notice_clone (inferior_ptid, new_lp->ptid))
1976 {
1977 /* The process is not using thread_db. Add the LWP to
1978 GDB's list. */
1979 add_thread (linux_target, new_lp->ptid);
1980 }
1981
1982 /* We just created NEW_LP so it cannot yet contain STATUS. */
1983 gdb_assert (new_lp->status == 0);
1984
1985 if (!pull_pid_from_list (&stopped_pids, child_ptid.lwp (), &new_lp->status))
1986 internal_error (_("no saved status for clone lwp"));
1987
1988 if (WSTOPSIG (new_lp->status) != SIGSTOP)
1989 {
1990 /* This can happen if someone starts sending signals to
1991 the new thread before it gets a chance to run, which
1992 have a lower number than SIGSTOP (e.g. SIGUSR1).
1993 This is an unlikely case, and harder to handle for
1994 fork / vfork than for clone, so we do not try - but
1995 we handle it for clone events here. */
1996
1997 new_lp->signalled = 1;
1998
1999 /* Save the wait status to report later. */
2000 linux_nat_debug_printf
2001 ("waitpid of new LWP %ld, saving status %s",
2002 (long) new_lp->ptid.lwp (), status_to_str (new_lp->status).c_str ());
2003 }
2004 else
2005 {
2006 new_lp->status = 0;
2007
2008 if (report_thread_events)
2009 new_lp->waitstatus.set_thread_created ();
2010 }
2011}
2012
3d799a95
DJ
2013/* Handle a GNU/Linux extended wait response. If we see a clone
2014 event, we need to add the new LWP to our list (and not report the
2015 trap to higher layers). This function returns non-zero if the
2016 event should be ignored and we should wait again. If STOPPING is
2017 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
2018
2019static int
4dd63d48 2020linux_handle_extended_wait (struct lwp_info *lp, int status)
d6b0e80f 2021{
e38504b3 2022 int pid = lp->ptid.lwp ();
3d799a95 2023 struct target_waitstatus *ourstatus = &lp->waitstatus;
89a5711c 2024 int event = linux_ptrace_get_extended_event (status);
d6b0e80f 2025
bfd09d20
JS
2026 /* All extended events we currently use are mid-syscall. Only
2027 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
2028 you have to be using PTRACE_SEIZE to get that. */
2029 lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
2030
3d799a95
DJ
2031 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2032 || event == PTRACE_EVENT_CLONE)
d6b0e80f 2033 {
3d799a95
DJ
2034 unsigned long new_pid;
2035 int ret;
2036
2037 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 2038
3d799a95
DJ
2039 /* If we haven't already seen the new PID stop, wait for it now. */
2040 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2041 {
2042 /* The new child has a pending SIGSTOP. We can't affect it until it
2043 hits the SIGSTOP, but we're already attached. */
4a6ed09b 2044 ret = my_waitpid (new_pid, &status, __WALL);
3d799a95
DJ
2045 if (ret == -1)
2046 perror_with_name (_("waiting for new child"));
2047 else if (ret != new_pid)
f34652de 2048 internal_error (_("wait returned unexpected PID %d"), ret);
3d799a95 2049 else if (!WIFSTOPPED (status))
f34652de 2050 internal_error (_("wait returned unexpected status 0x%x"), status);
3d799a95
DJ
2051 }
2052
26cb8b7c
PA
2053 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
2054 {
0d36baa9 2055 open_proc_mem_file (ptid_t (new_pid, new_pid));
8a89ddbd 2056
26cb8b7c
PA
2057 /* The arch-specific native code may need to know about new
2058 forks even if those end up never mapped to an
2059 inferior. */
135340af 2060 linux_target->low_new_fork (lp, new_pid);
26cb8b7c 2061 }
1310c1b0
PFC
2062 else if (event == PTRACE_EVENT_CLONE)
2063 {
2064 linux_target->low_new_clone (lp, new_pid);
2065 }
26cb8b7c 2066
2277426b 2067 if (event == PTRACE_EVENT_FORK
e99b03dc 2068 && linux_fork_checkpointing_p (lp->ptid.pid ()))
2277426b 2069 {
2277426b
PA
2070 /* Handle checkpointing by linux-fork.c here as a special
2071 case. We don't want the follow-fork-mode or 'catch fork'
2072 to interfere with this. */
2073
2074 /* This won't actually modify the breakpoint list, but will
2075 physically remove the breakpoints from the child. */
184ea2f7 2076 detach_breakpoints (ptid_t (new_pid, new_pid));
2277426b
PA
2077
2078 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
2079 if (!find_fork_pid (new_pid))
2080 add_fork (new_pid);
2277426b
PA
2081
2082 /* Report as spurious, so that infrun doesn't want to follow
2083 this fork. We're actually doing an infcall in
2084 linux-fork.c. */
183be222 2085 ourstatus->set_spurious ();
2277426b
PA
2086
2087 /* Report the stop to the core. */
2088 return 0;
2089 }
2090
3d799a95 2091 if (event == PTRACE_EVENT_FORK)
0d36baa9 2092 ourstatus->set_forked (ptid_t (new_pid, new_pid));
3d799a95 2093 else if (event == PTRACE_EVENT_VFORK)
0d36baa9 2094 ourstatus->set_vforked (ptid_t (new_pid, new_pid));
4dd63d48 2095 else if (event == PTRACE_EVENT_CLONE)
3d799a95 2096 {
9327494e
SM
2097 linux_nat_debug_printf
2098 ("Got clone event from LWP %d, new child is LWP %ld", pid, new_pid);
3c4d7e12 2099
0d36baa9
PA
2100 /* Save the status again, we'll use it in follow_clone. */
2101 add_to_pid_list (&stopped_pids, new_pid, status);
4dd63d48 2102
0d36baa9 2103 ourstatus->set_thread_cloned (ptid_t (lp->ptid.pid (), new_pid));
3d799a95
DJ
2104 }
2105
2106 return 0;
d6b0e80f
AC
2107 }
2108
3d799a95
DJ
2109 if (event == PTRACE_EVENT_EXEC)
2110 {
9327494e 2111 linux_nat_debug_printf ("Got exec event from LWP %ld", lp->ptid.lwp ());
a75724bc 2112
8a89ddbd
PA
2113 /* Close the previous /proc/PID/mem file for this inferior,
2114 which was using the address space which is now gone.
2115 Reading/writing from this file would return 0/EOF. */
2116 close_proc_mem_file (lp->ptid.pid ());
2117
2118 /* Open a new file for the new address space. */
2119 open_proc_mem_file (lp->ptid);
05c06f31 2120
183be222
SM
2121 ourstatus->set_execd
2122 (make_unique_xstrdup (linux_proc_pid_to_exec_file (pid)));
3d799a95 2123
8af756ef
PA
2124 /* The thread that execed must have been resumed, but, when a
2125 thread execs, it changes its tid to the tgid, and the old
2126 tgid thread might have not been resumed. */
2127 lp->resumed = 1;
6a534f85
PA
2128
2129 /* All other LWPs are gone now. We'll have received a thread
2130 exit notification for all threads other the execing one.
2131 That one, if it wasn't the leader, just silently changes its
2132 tid to the tgid, and the previous leader vanishes. Since
2133 Linux 3.0, the former thread ID can be retrieved with
2134 PTRACE_GETEVENTMSG, but since we support older kernels, don't
2135 bother with it, and just walk the LWP list. Even with
2136 PTRACE_GETEVENTMSG, we'd still need to lookup the
2137 corresponding LWP object, and it would be an extra ptrace
2138 syscall, so this way may even be more efficient. */
2139 for (lwp_info *other_lp : all_lwps_safe ())
2140 if (other_lp != lp && other_lp->ptid.pid () == lp->ptid.pid ())
2141 exit_lwp (other_lp);
2142
6c95b8df
PA
2143 return 0;
2144 }
2145
2146 if (event == PTRACE_EVENT_VFORK_DONE)
2147 {
9327494e 2148 linux_nat_debug_printf
5a0c4a06
SM
2149 ("Got PTRACE_EVENT_VFORK_DONE from LWP %ld",
2150 lp->ptid.lwp ());
2151 ourstatus->set_vfork_done ();
2152 return 0;
3d799a95
DJ
2153 }
2154
f34652de 2155 internal_error (_("unknown ptrace event %d"), event);
d6b0e80f
AC
2156}
2157
9c3a5d93
PA
2158/* Suspend waiting for a signal. We're mostly interested in
2159 SIGCHLD/SIGINT. */
2160
2161static void
2162wait_for_signal ()
2163{
9327494e 2164 linux_nat_debug_printf ("about to sigsuspend");
9c3a5d93
PA
2165 sigsuspend (&suspend_mask);
2166
2167 /* If the quit flag is set, it means that the user pressed Ctrl-C
2168 and we're debugging a process that is running on a separate
2169 terminal, so we must forward the Ctrl-C to the inferior. (If the
2170 inferior is sharing GDB's terminal, then the Ctrl-C reaches the
2171 inferior directly.) We must do this here because functions that
2172 need to block waiting for a signal loop forever until there's an
2173 event to report before returning back to the event loop. */
2174 if (!target_terminal::is_ours ())
2175 {
2176 if (check_quit_flag ())
2177 target_pass_ctrlc ();
2178 }
2179}
2180
3d2d2172
PA
2181/* Mark LWP dead, with STATUS as exit status pending to report
2182 later. */
2183
2184static void
2185mark_lwp_dead (lwp_info *lp, int status)
2186{
2187 /* Store the exit status lp->waitstatus, because lp->status would be
2188 ambiguous (W_EXITCODE(0,0) == 0). */
2189 lp->waitstatus = host_status_to_waitstatus (status);
2190
2191 /* If we're processing LP's status, there should be no other event
2192 already recorded as pending. */
2193 gdb_assert (lp->status == 0);
2194
2195 /* Dead LWPs aren't expected to report a pending sigstop. */
2196 lp->signalled = 0;
2197
2198 /* Prevent trying to stop it. */
2199 lp->stopped = 1;
2200}
2201
5e86aab8
PA
2202/* Return true if LP is dead, with a pending exit/signalled event. */
2203
2204static bool
2205is_lwp_marked_dead (lwp_info *lp)
2206{
2207 switch (lp->waitstatus.kind ())
2208 {
2209 case TARGET_WAITKIND_EXITED:
2210 case TARGET_WAITKIND_THREAD_EXITED:
2211 case TARGET_WAITKIND_SIGNALLED:
2212 return true;
2213 }
2214 return false;
2215}
2216
d6b0e80f
AC
2217/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2218 exited. */
2219
2220static int
2221wait_lwp (struct lwp_info *lp)
2222{
2223 pid_t pid;
432b4d03 2224 int status = 0;
d6b0e80f 2225 int thread_dead = 0;
432b4d03 2226 sigset_t prev_mask;
d6b0e80f
AC
2227
2228 gdb_assert (!lp->stopped);
2229 gdb_assert (lp->status == 0);
2230
432b4d03
JK
2231 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2232 block_child_signals (&prev_mask);
2233
2234 for (;;)
d6b0e80f 2235 {
e38504b3 2236 pid = my_waitpid (lp->ptid.lwp (), &status, __WALL | WNOHANG);
a9f4bb21
PA
2237 if (pid == -1 && errno == ECHILD)
2238 {
2239 /* The thread has previously exited. We need to delete it
4a6ed09b
PA
2240 now because if this was a non-leader thread execing, we
2241 won't get an exit event. See comments on exec events at
2242 the top of the file. */
a9f4bb21 2243 thread_dead = 1;
9327494e 2244 linux_nat_debug_printf ("%s vanished.",
e53c95d4 2245 lp->ptid.to_string ().c_str ());
a9f4bb21 2246 }
432b4d03
JK
2247 if (pid != 0)
2248 break;
2249
2250 /* Bugs 10970, 12702.
2251 Thread group leader may have exited in which case we'll lock up in
2252 waitpid if there are other threads, even if they are all zombies too.
2253 Basically, we're not supposed to use waitpid this way.
4a6ed09b
PA
2254 tkill(pid,0) cannot be used here as it gets ESRCH for both
2255 for zombie and running processes.
432b4d03
JK
2256
2257 As a workaround, check if we're waiting for the thread group leader and
2258 if it's a zombie, and avoid calling waitpid if it is.
2259
2260 This is racy, what if the tgl becomes a zombie right after we check?
2261 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2262 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03 2263
e38504b3
TT
2264 if (lp->ptid.pid () == lp->ptid.lwp ()
2265 && linux_proc_pid_is_zombie (lp->ptid.lwp ()))
d6b0e80f 2266 {
d6b0e80f 2267 thread_dead = 1;
9327494e 2268 linux_nat_debug_printf ("Thread group leader %s vanished.",
e53c95d4 2269 lp->ptid.to_string ().c_str ());
432b4d03 2270 break;
d6b0e80f 2271 }
432b4d03
JK
2272
2273 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2274 get invoked despite our caller had them intentionally blocked by
2275 block_child_signals. This is sensitive only to the loop of
2276 linux_nat_wait_1 and there if we get called my_waitpid gets called
2277 again before it gets to sigsuspend so we can safely let the handlers
2278 get executed here. */
9c3a5d93 2279 wait_for_signal ();
432b4d03
JK
2280 }
2281
2282 restore_child_signals_mask (&prev_mask);
2283
d6b0e80f
AC
2284 if (!thread_dead)
2285 {
e38504b3 2286 gdb_assert (pid == lp->ptid.lwp ());
d6b0e80f 2287
9327494e 2288 linux_nat_debug_printf ("waitpid %s received %s",
e53c95d4 2289 lp->ptid.to_string ().c_str (),
8d06918f 2290 status_to_str (status).c_str ());
d6b0e80f 2291
a9f4bb21
PA
2292 /* Check if the thread has exited. */
2293 if (WIFEXITED (status) || WIFSIGNALED (status))
2294 {
a51e14ef 2295 if (report_exit_events_for (lp) || is_leader (lp))
69dde7dc 2296 {
9327494e 2297 linux_nat_debug_printf ("LWP %d exited.", lp->ptid.pid ());
69dde7dc 2298
aa01bd36 2299 /* If this is the leader exiting, it means the whole
69dde7dc 2300 process is gone. Store the status to report to the
3d2d2172
PA
2301 core. */
2302 mark_lwp_dead (lp, status);
69dde7dc
PA
2303 return 0;
2304 }
2305
a9f4bb21 2306 thread_dead = 1;
9327494e 2307 linux_nat_debug_printf ("%s exited.",
e53c95d4 2308 lp->ptid.to_string ().c_str ());
a9f4bb21 2309 }
d6b0e80f
AC
2310 }
2311
2312 if (thread_dead)
2313 {
e26af52f 2314 exit_lwp (lp);
d6b0e80f
AC
2315 return 0;
2316 }
2317
2318 gdb_assert (WIFSTOPPED (status));
8817a6f2 2319 lp->stopped = 1;
d6b0e80f 2320
8784d563
PA
2321 if (lp->must_set_ptrace_flags)
2322 {
5b6d1e4f 2323 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
de0d863e 2324 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2325
e38504b3 2326 linux_enable_event_reporting (lp->ptid.lwp (), options);
8784d563
PA
2327 lp->must_set_ptrace_flags = 0;
2328 }
2329
ca2163eb
PA
2330 /* Handle GNU/Linux's syscall SIGTRAPs. */
2331 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2332 {
2333 /* No longer need the sysgood bit. The ptrace event ends up
2334 recorded in lp->waitstatus if we care for it. We can carry
2335 on handling the event like a regular SIGTRAP from here
2336 on. */
2337 status = W_STOPCODE (SIGTRAP);
2338 if (linux_handle_syscall_trap (lp, 1))
2339 return wait_lwp (lp);
2340 }
bfd09d20
JS
2341 else
2342 {
2343 /* Almost all other ptrace-stops are known to be outside of system
2344 calls, with further exceptions in linux_handle_extended_wait. */
2345 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2346 }
ca2163eb 2347
d6b0e80f 2348 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2349 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2350 && linux_is_extended_waitstatus (status))
d6b0e80f 2351 {
9327494e 2352 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
4dd63d48 2353 linux_handle_extended_wait (lp, status);
20ba1ce6 2354 return 0;
d6b0e80f
AC
2355 }
2356
2357 return status;
2358}
2359
2360/* Send a SIGSTOP to LP. */
2361
2362static int
d3a70e03 2363stop_callback (struct lwp_info *lp)
d6b0e80f
AC
2364{
2365 if (!lp->stopped && !lp->signalled)
2366 {
2367 int ret;
2368
9327494e 2369 linux_nat_debug_printf ("kill %s **<SIGSTOP>**",
e53c95d4 2370 lp->ptid.to_string ().c_str ());
9327494e 2371
d6b0e80f 2372 errno = 0;
e38504b3 2373 ret = kill_lwp (lp->ptid.lwp (), SIGSTOP);
9327494e 2374 linux_nat_debug_printf ("lwp kill %d %s", ret,
d6b0e80f 2375 errno ? safe_strerror (errno) : "ERRNO-OK");
d6b0e80f
AC
2376
2377 lp->signalled = 1;
2378 gdb_assert (lp->status == 0);
2379 }
2380
2381 return 0;
2382}
2383
7b50312a
PA
2384/* Request a stop on LWP. */
2385
2386void
2387linux_stop_lwp (struct lwp_info *lwp)
2388{
d3a70e03 2389 stop_callback (lwp);
7b50312a
PA
2390}
2391
2db9a427
PA
2392/* See linux-nat.h */
2393
2394void
2395linux_stop_and_wait_all_lwps (void)
2396{
2397 /* Stop all LWP's ... */
d3a70e03 2398 iterate_over_lwps (minus_one_ptid, stop_callback);
2db9a427
PA
2399
2400 /* ... and wait until all of them have reported back that
2401 they're no longer running. */
d3a70e03 2402 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
2db9a427
PA
2403}
2404
2405/* See linux-nat.h */
2406
2407void
2408linux_unstop_all_lwps (void)
2409{
2410 iterate_over_lwps (minus_one_ptid,
d3a70e03
TT
2411 [] (struct lwp_info *info)
2412 {
2413 return resume_stopped_resumed_lwps (info, minus_one_ptid);
2414 });
2db9a427
PA
2415}
2416
57380f4e 2417/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2418
2419static int
57380f4e
DJ
2420linux_nat_has_pending_sigint (int pid)
2421{
2422 sigset_t pending, blocked, ignored;
57380f4e
DJ
2423
2424 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2425
2426 if (sigismember (&pending, SIGINT)
2427 && !sigismember (&ignored, SIGINT))
2428 return 1;
2429
2430 return 0;
2431}
2432
2433/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2434
2435static int
d3a70e03 2436set_ignore_sigint (struct lwp_info *lp)
d6b0e80f 2437{
57380f4e
DJ
2438 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2439 flag to consume the next one. */
2440 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2441 && WSTOPSIG (lp->status) == SIGINT)
2442 lp->status = 0;
2443 else
2444 lp->ignore_sigint = 1;
2445
2446 return 0;
2447}
2448
2449/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2450 This function is called after we know the LWP has stopped; if the LWP
2451 stopped before the expected SIGINT was delivered, then it will never have
2452 arrived. Also, if the signal was delivered to a shared queue and consumed
2453 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2454
57380f4e
DJ
2455static void
2456maybe_clear_ignore_sigint (struct lwp_info *lp)
2457{
2458 if (!lp->ignore_sigint)
2459 return;
2460
e38504b3 2461 if (!linux_nat_has_pending_sigint (lp->ptid.lwp ()))
57380f4e 2462 {
9327494e 2463 linux_nat_debug_printf ("Clearing bogus flag for %s",
e53c95d4 2464 lp->ptid.to_string ().c_str ());
57380f4e
DJ
2465 lp->ignore_sigint = 0;
2466 }
2467}
2468
ebec9a0f
PA
2469/* Fetch the possible triggered data watchpoint info and store it in
2470 LP.
2471
2472 On some archs, like x86, that use debug registers to set
2473 watchpoints, it's possible that the way to know which watched
2474 address trapped, is to check the register that is used to select
2475 which address to watch. Problem is, between setting the watchpoint
2476 and reading back which data address trapped, the user may change
2477 the set of watchpoints, and, as a consequence, GDB changes the
2478 debug registers in the inferior. To avoid reading back a stale
2479 stopped-data-address when that happens, we cache in LP the fact
2480 that a watchpoint trapped, and the corresponding data address, as
2481 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2482 registers meanwhile, we have the cached data we can rely on. */
2483
9c02b525
PA
2484static int
2485check_stopped_by_watchpoint (struct lwp_info *lp)
ebec9a0f 2486{
2989a365 2487 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
ebec9a0f
PA
2488 inferior_ptid = lp->ptid;
2489
f6ac5f3d 2490 if (linux_target->low_stopped_by_watchpoint ())
ebec9a0f 2491 {
15c66dd6 2492 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
f6ac5f3d
PA
2493 lp->stopped_data_address_p
2494 = linux_target->low_stopped_data_address (&lp->stopped_data_address);
ebec9a0f
PA
2495 }
2496
15c66dd6 2497 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
9c02b525
PA
2498}
2499
9c02b525 2500/* Returns true if the LWP had stopped for a watchpoint. */
ebec9a0f 2501
57810aa7 2502bool
f6ac5f3d 2503linux_nat_target::stopped_by_watchpoint ()
ebec9a0f
PA
2504{
2505 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2506
2507 gdb_assert (lp != NULL);
2508
15c66dd6 2509 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
ebec9a0f
PA
2510}
2511
57810aa7 2512bool
f6ac5f3d 2513linux_nat_target::stopped_data_address (CORE_ADDR *addr_p)
ebec9a0f
PA
2514{
2515 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2516
2517 gdb_assert (lp != NULL);
2518
2519 *addr_p = lp->stopped_data_address;
2520
2521 return lp->stopped_data_address_p;
2522}
2523
26ab7092
JK
2524/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2525
135340af
PA
2526bool
2527linux_nat_target::low_status_is_event (int status)
26ab7092
JK
2528{
2529 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2530}
2531
57380f4e
DJ
2532/* Wait until LP is stopped. */
2533
2534static int
d3a70e03 2535stop_wait_callback (struct lwp_info *lp)
57380f4e 2536{
5b6d1e4f 2537 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
6c95b8df
PA
2538
2539 /* If this is a vfork parent, bail out, it is not going to report
2540 any SIGSTOP until the vfork is done with. */
2541 if (inf->vfork_child != NULL)
2542 return 0;
2543
d6b0e80f
AC
2544 if (!lp->stopped)
2545 {
2546 int status;
2547
2548 status = wait_lwp (lp);
2549 if (status == 0)
2550 return 0;
2551
57380f4e
DJ
2552 if (lp->ignore_sigint && WIFSTOPPED (status)
2553 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2554 {
57380f4e 2555 lp->ignore_sigint = 0;
d6b0e80f
AC
2556
2557 errno = 0;
e38504b3 2558 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
8817a6f2 2559 lp->stopped = 0;
9327494e
SM
2560 linux_nat_debug_printf
2561 ("PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)",
e53c95d4 2562 lp->ptid.to_string ().c_str (),
9327494e 2563 errno ? safe_strerror (errno) : "OK");
d6b0e80f 2564
d3a70e03 2565 return stop_wait_callback (lp);
d6b0e80f
AC
2566 }
2567
57380f4e
DJ
2568 maybe_clear_ignore_sigint (lp);
2569
d6b0e80f
AC
2570 if (WSTOPSIG (status) != SIGSTOP)
2571 {
e5ef252a 2572 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2573
9327494e 2574 linux_nat_debug_printf ("Pending event %s in %s",
8d06918f 2575 status_to_str ((int) status).c_str (),
e53c95d4 2576 lp->ptid.to_string ().c_str ());
e5ef252a
PA
2577
2578 /* Save the sigtrap event. */
2579 lp->status = status;
e5ef252a 2580 gdb_assert (lp->signalled);
e7ad2f14 2581 save_stop_reason (lp);
d6b0e80f
AC
2582 }
2583 else
2584 {
7010835a 2585 /* We caught the SIGSTOP that we intended to catch. */
e5ef252a 2586
9327494e 2587 linux_nat_debug_printf ("Expected SIGSTOP caught for %s.",
e53c95d4 2588 lp->ptid.to_string ().c_str ());
e5ef252a 2589
d6b0e80f 2590 lp->signalled = 0;
7010835a
AB
2591
2592 /* If we are waiting for this stop so we can report the thread
2593 stopped then we need to record this status. Otherwise, we can
2594 now discard this stop event. */
2595 if (lp->last_resume_kind == resume_stop)
2596 {
2597 lp->status = status;
2598 save_stop_reason (lp);
2599 }
d6b0e80f
AC
2600 }
2601 }
2602
2603 return 0;
2604}
2605
74387712
SM
2606/* Get the inferior associated to LWP. Must be called with an LWP that has
2607 an associated inferior. Always return non-nullptr. */
2608
2609static inferior *
2610lwp_inferior (const lwp_info *lwp)
2611{
2612 inferior *inf = find_inferior_ptid (linux_target, lwp->ptid);
2613 gdb_assert (inf != nullptr);
2614 return inf;
2615}
2616
9c02b525
PA
2617/* Return non-zero if LP has a wait status pending. Discard the
2618 pending event and resume the LWP if the event that originally
2619 caused the stop became uninteresting. */
d6b0e80f
AC
2620
2621static int
d3a70e03 2622status_callback (struct lwp_info *lp)
d6b0e80f
AC
2623{
2624 /* Only report a pending wait status if we pretend that this has
2625 indeed been resumed. */
ca2163eb
PA
2626 if (!lp->resumed)
2627 return 0;
2628
eb54c8bf
PA
2629 if (!lwp_status_pending_p (lp))
2630 return 0;
2631
15c66dd6
PA
2632 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2633 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525 2634 {
5b6d1e4f 2635 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
9c02b525
PA
2636 CORE_ADDR pc;
2637 int discard = 0;
2638
9c02b525
PA
2639 pc = regcache_read_pc (regcache);
2640
2641 if (pc != lp->stop_pc)
2642 {
9327494e 2643 linux_nat_debug_printf ("PC of %s changed. was=%s, now=%s",
e53c95d4 2644 lp->ptid.to_string ().c_str (),
99d9c3b9
SM
2645 paddress (current_inferior ()->arch (),
2646 lp->stop_pc),
2647 paddress (current_inferior ()->arch (), pc));
9c02b525
PA
2648 discard = 1;
2649 }
faf09f01 2650
9c02b525
PA
2651 if (discard)
2652 {
9327494e 2653 linux_nat_debug_printf ("pending event of %s cancelled.",
e53c95d4 2654 lp->ptid.to_string ().c_str ());
9c02b525
PA
2655
2656 lp->status = 0;
2657 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2658 return 0;
2659 }
9c02b525
PA
2660 }
2661
eb54c8bf 2662 return 1;
d6b0e80f
AC
2663}
2664
d6b0e80f
AC
2665/* Count the LWP's that have had events. */
2666
2667static int
d3a70e03 2668count_events_callback (struct lwp_info *lp, int *count)
d6b0e80f 2669{
d6b0e80f
AC
2670 gdb_assert (count != NULL);
2671
9c02b525
PA
2672 /* Select only resumed LWPs that have an event pending. */
2673 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2674 (*count)++;
2675
2676 return 0;
2677}
2678
2679/* Select the LWP (if any) that is currently being single-stepped. */
2680
2681static int
d3a70e03 2682select_singlestep_lwp_callback (struct lwp_info *lp)
d6b0e80f 2683{
25289eb2
PA
2684 if (lp->last_resume_kind == resume_step
2685 && lp->status != 0)
d6b0e80f
AC
2686 return 1;
2687 else
2688 return 0;
2689}
2690
8a99810d
PA
2691/* Returns true if LP has a status pending. */
2692
2693static int
2694lwp_status_pending_p (struct lwp_info *lp)
2695{
2696 /* We check for lp->waitstatus in addition to lp->status, because we
2697 can have pending process exits recorded in lp->status and
2698 W_EXITCODE(0,0) happens to be 0. */
183be222 2699 return lp->status != 0 || lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE;
8a99810d
PA
2700}
2701
b90fc188 2702/* Select the Nth LWP that has had an event. */
d6b0e80f
AC
2703
2704static int
d3a70e03 2705select_event_lwp_callback (struct lwp_info *lp, int *selector)
d6b0e80f 2706{
d6b0e80f
AC
2707 gdb_assert (selector != NULL);
2708
9c02b525
PA
2709 /* Select only resumed LWPs that have an event pending. */
2710 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2711 if ((*selector)-- == 0)
2712 return 1;
2713
2714 return 0;
2715}
2716
e7ad2f14
PA
2717/* Called when the LWP stopped for a signal/trap. If it stopped for a
2718 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2719 and save the result in the LWP's stop_reason field. If it stopped
2720 for a breakpoint, decrement the PC if necessary on the lwp's
2721 architecture. */
9c02b525 2722
e7ad2f14
PA
2723static void
2724save_stop_reason (struct lwp_info *lp)
710151dd 2725{
e7ad2f14
PA
2726 struct regcache *regcache;
2727 struct gdbarch *gdbarch;
515630c5 2728 CORE_ADDR pc;
9c02b525 2729 CORE_ADDR sw_bp_pc;
faf09f01 2730 siginfo_t siginfo;
9c02b525 2731
e7ad2f14
PA
2732 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
2733 gdb_assert (lp->status != 0);
2734
135340af 2735 if (!linux_target->low_status_is_event (lp->status))
e7ad2f14
PA
2736 return;
2737
74387712 2738 inferior *inf = lwp_inferior (lp);
a9deee17
PA
2739 if (inf->starting_up)
2740 return;
2741
5b6d1e4f 2742 regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 2743 gdbarch = regcache->arch ();
e7ad2f14 2744
9c02b525 2745 pc = regcache_read_pc (regcache);
527a273a 2746 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
515630c5 2747
faf09f01
PA
2748 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2749 {
2750 if (siginfo.si_signo == SIGTRAP)
2751 {
e7ad2f14
PA
2752 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
2753 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2754 {
e7ad2f14
PA
2755 /* The si_code is ambiguous on this arch -- check debug
2756 registers. */
2757 if (!check_stopped_by_watchpoint (lp))
2758 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2759 }
2760 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
2761 {
2762 /* If we determine the LWP stopped for a SW breakpoint,
2763 trust it. Particularly don't check watchpoint
7da6a5b9 2764 registers, because, at least on s390, we'd find
e7ad2f14
PA
2765 stopped-by-watchpoint as long as there's a watchpoint
2766 set. */
faf09f01 2767 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
faf09f01 2768 }
e7ad2f14 2769 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2770 {
e7ad2f14
PA
2771 /* This can indicate either a hardware breakpoint or
2772 hardware watchpoint. Check debug registers. */
2773 if (!check_stopped_by_watchpoint (lp))
2774 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
faf09f01 2775 }
2bf6fb9d
PA
2776 else if (siginfo.si_code == TRAP_TRACE)
2777 {
9327494e 2778 linux_nat_debug_printf ("%s stopped by trace",
e53c95d4 2779 lp->ptid.to_string ().c_str ());
e7ad2f14
PA
2780
2781 /* We may have single stepped an instruction that
2782 triggered a watchpoint. In that case, on some
2783 architectures (such as x86), instead of TRAP_HWBKPT,
2784 si_code indicates TRAP_TRACE, and we need to check
2785 the debug registers separately. */
2786 check_stopped_by_watchpoint (lp);
2bf6fb9d 2787 }
faf09f01
PA
2788 }
2789 }
e7ad2f14
PA
2790
2791 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2792 {
9327494e 2793 linux_nat_debug_printf ("%s stopped by software breakpoint",
e53c95d4 2794 lp->ptid.to_string ().c_str ());
710151dd
PA
2795
2796 /* Back up the PC if necessary. */
9c02b525
PA
2797 if (pc != sw_bp_pc)
2798 regcache_write_pc (regcache, sw_bp_pc);
515630c5 2799
e7ad2f14
PA
2800 /* Update this so we record the correct stop PC below. */
2801 pc = sw_bp_pc;
710151dd 2802 }
e7ad2f14 2803 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525 2804 {
9327494e 2805 linux_nat_debug_printf ("%s stopped by hardware breakpoint",
e53c95d4 2806 lp->ptid.to_string ().c_str ());
e7ad2f14
PA
2807 }
2808 else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2809 {
9327494e 2810 linux_nat_debug_printf ("%s stopped by hardware watchpoint",
e53c95d4 2811 lp->ptid.to_string ().c_str ());
9c02b525 2812 }
d6b0e80f 2813
e7ad2f14 2814 lp->stop_pc = pc;
d6b0e80f
AC
2815}
2816
faf09f01
PA
2817
2818/* Returns true if the LWP had stopped for a software breakpoint. */
2819
57810aa7 2820bool
f6ac5f3d 2821linux_nat_target::stopped_by_sw_breakpoint ()
faf09f01
PA
2822{
2823 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2824
2825 gdb_assert (lp != NULL);
2826
2827 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2828}
2829
2830/* Implement the supports_stopped_by_sw_breakpoint method. */
2831
57810aa7 2832bool
f6ac5f3d 2833linux_nat_target::supports_stopped_by_sw_breakpoint ()
faf09f01 2834{
5739a1b9 2835 return true;
faf09f01
PA
2836}
2837
2838/* Returns true if the LWP had stopped for a hardware
2839 breakpoint/watchpoint. */
2840
57810aa7 2841bool
f6ac5f3d 2842linux_nat_target::stopped_by_hw_breakpoint ()
faf09f01
PA
2843{
2844 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2845
2846 gdb_assert (lp != NULL);
2847
2848 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2849}
2850
2851/* Implement the supports_stopped_by_hw_breakpoint method. */
2852
57810aa7 2853bool
f6ac5f3d 2854linux_nat_target::supports_stopped_by_hw_breakpoint ()
faf09f01 2855{
5739a1b9 2856 return true;
faf09f01
PA
2857}
2858
d6b0e80f
AC
2859/* Select one LWP out of those that have events pending. */
2860
2861static void
d90e17a7 2862select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2863{
2864 int num_events = 0;
2865 int random_selector;
9c02b525 2866 struct lwp_info *event_lp = NULL;
d6b0e80f 2867
ac264b3b 2868 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2869 (*orig_lp)->status = *status;
2870
9c02b525
PA
2871 /* In all-stop, give preference to the LWP that is being
2872 single-stepped. There will be at most one, and it will be the
2873 LWP that the core is most interested in. If we didn't do this,
2874 then we'd have to handle pending step SIGTRAPs somehow in case
2875 the core later continues the previously-stepped thread, as
2876 otherwise we'd report the pending SIGTRAP then, and the core, not
2877 having stepped the thread, wouldn't understand what the trap was
2878 for, and therefore would report it to the user as a random
2879 signal. */
fbea99ea 2880 if (!target_is_non_stop_p ())
d6b0e80f 2881 {
d3a70e03 2882 event_lp = iterate_over_lwps (filter, select_singlestep_lwp_callback);
9c02b525
PA
2883 if (event_lp != NULL)
2884 {
9327494e 2885 linux_nat_debug_printf ("Select single-step %s",
e53c95d4 2886 event_lp->ptid.to_string ().c_str ());
9c02b525 2887 }
d6b0e80f 2888 }
9c02b525
PA
2889
2890 if (event_lp == NULL)
d6b0e80f 2891 {
9c02b525 2892 /* Pick one at random, out of those which have had events. */
d6b0e80f 2893
9c02b525 2894 /* First see how many events we have. */
d3a70e03
TT
2895 iterate_over_lwps (filter,
2896 [&] (struct lwp_info *info)
2897 {
2898 return count_events_callback (info, &num_events);
2899 });
8bf3b159 2900 gdb_assert (num_events > 0);
d6b0e80f 2901
9c02b525
PA
2902 /* Now randomly pick a LWP out of those that have had
2903 events. */
d6b0e80f
AC
2904 random_selector = (int)
2905 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2906
9327494e
SM
2907 if (num_events > 1)
2908 linux_nat_debug_printf ("Found %d events, selecting #%d",
2909 num_events, random_selector);
d6b0e80f 2910
d3a70e03
TT
2911 event_lp
2912 = (iterate_over_lwps
2913 (filter,
2914 [&] (struct lwp_info *info)
2915 {
2916 return select_event_lwp_callback (info,
2917 &random_selector);
2918 }));
d6b0e80f
AC
2919 }
2920
2921 if (event_lp != NULL)
2922 {
2923 /* Switch the event LWP. */
2924 *orig_lp = event_lp;
2925 *status = event_lp->status;
2926 }
2927
2928 /* Flush the wait status for the event LWP. */
2929 (*orig_lp)->status = 0;
2930}
2931
2932/* Return non-zero if LP has been resumed. */
2933
2934static int
d3a70e03 2935resumed_callback (struct lwp_info *lp)
d6b0e80f
AC
2936{
2937 return lp->resumed;
2938}
2939
02f3fc28 2940/* Check if we should go on and pass this event to common code.
12d9289a 2941
897608ed
SM
2942 If so, save the status to the lwp_info structure associated to LWPID. */
2943
2944static void
9c02b525 2945linux_nat_filter_event (int lwpid, int status)
02f3fc28
PA
2946{
2947 struct lwp_info *lp;
89a5711c 2948 int event = linux_ptrace_get_extended_event (status);
02f3fc28 2949
f2907e49 2950 lp = find_lwp_pid (ptid_t (lwpid));
02f3fc28 2951
1abeb1e9
PA
2952 /* Check for events reported by anything not in our LWP list. */
2953 if (lp == nullptr)
0e5bf2a8 2954 {
1abeb1e9
PA
2955 if (WIFSTOPPED (status))
2956 {
2957 if (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC)
2958 {
2959 /* A non-leader thread exec'ed after we've seen the
2960 leader zombie, and removed it from our lists (in
2961 check_zombie_leaders). The non-leader thread changes
2962 its tid to the tgid. */
2963 linux_nat_debug_printf
2964 ("Re-adding thread group leader LWP %d after exec.",
2965 lwpid);
0e5bf2a8 2966
1abeb1e9
PA
2967 lp = add_lwp (ptid_t (lwpid, lwpid));
2968 lp->stopped = 1;
2969 lp->resumed = 1;
2970 add_thread (linux_target, lp->ptid);
2971 }
2972 else
2973 {
2974 /* A process we are controlling has forked and the new
2975 child's stop was reported to us by the kernel. Save
2976 its PID and go back to waiting for the fork event to
2977 be reported - the stopped process might be returned
2978 from waitpid before or after the fork event is. */
2979 linux_nat_debug_printf
2980 ("Saving LWP %d status %s in stopped_pids list",
2981 lwpid, status_to_str (status).c_str ());
2982 add_to_pid_list (&stopped_pids, lwpid, status);
2983 }
2984 }
2985 else
2986 {
2987 /* Don't report an event for the exit of an LWP not in our
2988 list, i.e. not part of any inferior we're debugging.
2989 This can happen if we detach from a program we originally
6cf20c46
PA
2990 forked and then it exits. However, note that we may have
2991 earlier deleted a leader of an inferior we're debugging,
2992 in check_zombie_leaders. Re-add it back here if so. */
2993 for (inferior *inf : all_inferiors (linux_target))
2994 {
2995 if (inf->pid == lwpid)
2996 {
2997 linux_nat_debug_printf
2998 ("Re-adding thread group leader LWP %d after exit.",
2999 lwpid);
3000
3001 lp = add_lwp (ptid_t (lwpid, lwpid));
3002 lp->resumed = 1;
3003 add_thread (linux_target, lp->ptid);
3004 break;
3005 }
3006 }
1abeb1e9 3007 }
0e5bf2a8 3008
1abeb1e9
PA
3009 if (lp == nullptr)
3010 return;
02f3fc28
PA
3011 }
3012
8817a6f2
PA
3013 /* This LWP is stopped now. (And if dead, this prevents it from
3014 ever being continued.) */
3015 lp->stopped = 1;
3016
8784d563
PA
3017 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
3018 {
5b6d1e4f 3019 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
de0d863e 3020 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 3021
e38504b3 3022 linux_enable_event_reporting (lp->ptid.lwp (), options);
8784d563
PA
3023 lp->must_set_ptrace_flags = 0;
3024 }
3025
ca2163eb
PA
3026 /* Handle GNU/Linux's syscall SIGTRAPs. */
3027 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3028 {
3029 /* No longer need the sysgood bit. The ptrace event ends up
3030 recorded in lp->waitstatus if we care for it. We can carry
3031 on handling the event like a regular SIGTRAP from here
3032 on. */
3033 status = W_STOPCODE (SIGTRAP);
3034 if (linux_handle_syscall_trap (lp, 0))
897608ed 3035 return;
ca2163eb 3036 }
bfd09d20
JS
3037 else
3038 {
3039 /* Almost all other ptrace-stops are known to be outside of system
3040 calls, with further exceptions in linux_handle_extended_wait. */
3041 lp->syscall_state = TARGET_WAITKIND_IGNORE;
3042 }
02f3fc28 3043
ca2163eb 3044 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
3045 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
3046 && linux_is_extended_waitstatus (status))
02f3fc28 3047 {
9327494e
SM
3048 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
3049
4dd63d48 3050 if (linux_handle_extended_wait (lp, status))
897608ed 3051 return;
02f3fc28
PA
3052 }
3053
3054 /* Check if the thread has exited. */
9c02b525
PA
3055 if (WIFEXITED (status) || WIFSIGNALED (status))
3056 {
a51e14ef 3057 if (!report_exit_events_for (lp) && !is_leader (lp))
02f3fc28 3058 {
9327494e 3059 linux_nat_debug_printf ("%s exited.",
e53c95d4 3060 lp->ptid.to_string ().c_str ());
9c02b525 3061
6cf20c46 3062 /* If this was not the leader exiting, then the exit signal
4a6ed09b
PA
3063 was not the end of the debugged application and should be
3064 ignored. */
3065 exit_lwp (lp);
897608ed 3066 return;
02f3fc28
PA
3067 }
3068
77598427
PA
3069 /* Note that even if the leader was ptrace-stopped, it can still
3070 exit, if e.g., some other thread brings down the whole
3071 process (calls `exit'). So don't assert that the lwp is
3072 resumed. */
9327494e
SM
3073 linux_nat_debug_printf ("LWP %ld exited (resumed=%d)",
3074 lp->ptid.lwp (), lp->resumed);
02f3fc28 3075
3d2d2172 3076 mark_lwp_dead (lp, status);
897608ed 3077 return;
02f3fc28
PA
3078 }
3079
02f3fc28
PA
3080 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3081 an attempt to stop an LWP. */
3082 if (lp->signalled
3083 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3084 {
02f3fc28
PA
3085 lp->signalled = 0;
3086
2bf6fb9d 3087 if (lp->last_resume_kind == resume_stop)
25289eb2 3088 {
9327494e 3089 linux_nat_debug_printf ("resume_stop SIGSTOP caught for %s.",
e53c95d4 3090 lp->ptid.to_string ().c_str ());
2bf6fb9d
PA
3091 }
3092 else
3093 {
3094 /* This is a delayed SIGSTOP. Filter out the event. */
02f3fc28 3095
9327494e
SM
3096 linux_nat_debug_printf
3097 ("%s %s, 0, 0 (discard delayed SIGSTOP)",
3098 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 3099 lp->ptid.to_string ().c_str ());
02f3fc28 3100
2bf6fb9d 3101 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
25289eb2 3102 gdb_assert (lp->resumed);
897608ed 3103 return;
25289eb2 3104 }
02f3fc28
PA
3105 }
3106
57380f4e
DJ
3107 /* Make sure we don't report a SIGINT that we have already displayed
3108 for another thread. */
3109 if (lp->ignore_sigint
3110 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3111 {
9327494e 3112 linux_nat_debug_printf ("Delayed SIGINT caught for %s.",
e53c95d4 3113 lp->ptid.to_string ().c_str ());
57380f4e
DJ
3114
3115 /* This is a delayed SIGINT. */
3116 lp->ignore_sigint = 0;
3117
8a99810d 3118 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
9327494e
SM
3119 linux_nat_debug_printf ("%s %s, 0, 0 (discard SIGINT)",
3120 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 3121 lp->ptid.to_string ().c_str ());
57380f4e
DJ
3122 gdb_assert (lp->resumed);
3123
3124 /* Discard the event. */
897608ed 3125 return;
57380f4e
DJ
3126 }
3127
9c02b525
PA
3128 /* Don't report signals that GDB isn't interested in, such as
3129 signals that are neither printed nor stopped upon. Stopping all
7da6a5b9 3130 threads can be a bit time-consuming, so if we want decent
9c02b525
PA
3131 performance with heavily multi-threaded programs, especially when
3132 they're using a high frequency timer, we'd better avoid it if we
3133 can. */
3134 if (WIFSTOPPED (status))
3135 {
3136 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3137
fbea99ea 3138 if (!target_is_non_stop_p ())
9c02b525
PA
3139 {
3140 /* Only do the below in all-stop, as we currently use SIGSTOP
3141 to implement target_stop (see linux_nat_stop) in
3142 non-stop. */
3143 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3144 {
3145 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3146 forwarded to the entire process group, that is, all LWPs
3147 will receive it - unless they're using CLONE_THREAD to
3148 share signals. Since we only want to report it once, we
3149 mark it as ignored for all LWPs except this one. */
d3a70e03 3150 iterate_over_lwps (ptid_t (lp->ptid.pid ()), set_ignore_sigint);
9c02b525
PA
3151 lp->ignore_sigint = 0;
3152 }
3153 else
3154 maybe_clear_ignore_sigint (lp);
3155 }
3156
3157 /* When using hardware single-step, we need to report every signal.
c9587f88 3158 Otherwise, signals in pass_mask may be short-circuited
d8c06f22
AB
3159 except signals that might be caused by a breakpoint, or SIGSTOP
3160 if we sent the SIGSTOP and are waiting for it to arrive. */
9c02b525 3161 if (!lp->step
c9587f88 3162 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
d8c06f22 3163 && (WSTOPSIG (status) != SIGSTOP
9213a6d7 3164 || !linux_target->find_thread (lp->ptid)->stop_requested)
c9587f88 3165 && !linux_wstatus_maybe_breakpoint (status))
9c02b525
PA
3166 {
3167 linux_resume_one_lwp (lp, lp->step, signo);
9327494e
SM
3168 linux_nat_debug_printf
3169 ("%s %s, %s (preempt 'handle')",
3170 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 3171 lp->ptid.to_string ().c_str (),
9327494e
SM
3172 (signo != GDB_SIGNAL_0
3173 ? strsignal (gdb_signal_to_host (signo)) : "0"));
897608ed 3174 return;
9c02b525
PA
3175 }
3176 }
3177
02f3fc28
PA
3178 /* An interesting event. */
3179 gdb_assert (lp);
ca2163eb 3180 lp->status = status;
e7ad2f14 3181 save_stop_reason (lp);
02f3fc28
PA
3182}
3183
0e5bf2a8
PA
3184/* Detect zombie thread group leaders, and "exit" them. We can't reap
3185 their exits until all other threads in the group have exited. */
3186
3187static void
3188check_zombie_leaders (void)
3189{
08036331 3190 for (inferior *inf : all_inferiors ())
0e5bf2a8
PA
3191 {
3192 struct lwp_info *leader_lp;
3193
3194 if (inf->pid == 0)
3195 continue;
3196
f2907e49 3197 leader_lp = find_lwp_pid (ptid_t (inf->pid));
0e5bf2a8
PA
3198 if (leader_lp != NULL
3199 /* Check if there are other threads in the group, as we may
6cf20c46
PA
3200 have raced with the inferior simply exiting. Note this
3201 isn't a watertight check. If the inferior is
3202 multi-threaded and is exiting, it may be we see the
3203 leader as zombie before we reap all the non-leader
3204 threads. See comments below. */
0e5bf2a8 3205 && num_lwps (inf->pid) > 1
5f572dec 3206 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8 3207 {
6cf20c46
PA
3208 /* A zombie leader in a multi-threaded program can mean one
3209 of three things:
3210
3211 #1 - Only the leader exited, not the whole program, e.g.,
3212 with pthread_exit. Since we can't reap the leader's exit
3213 status until all other threads are gone and reaped too,
3214 we want to delete the zombie leader right away, as it
3215 can't be debugged, we can't read its registers, etc.
3216 This is the main reason we check for zombie leaders
3217 disappearing.
3218
3219 #2 - The whole thread-group/process exited (a group exit,
3220 via e.g. exit(3), and there is (or will be shortly) an
3221 exit reported for each thread in the process, and then
3222 finally an exit for the leader once the non-leaders are
3223 reaped.
3224
3225 #3 - There are 3 or more threads in the group, and a
3226 thread other than the leader exec'd. See comments on
3227 exec events at the top of the file.
3228
3229 Ideally we would never delete the leader for case #2.
3230 Instead, we want to collect the exit status of each
3231 non-leader thread, and then finally collect the exit
3232 status of the leader as normal and use its exit code as
3233 whole-process exit code. Unfortunately, there's no
3234 race-free way to distinguish cases #1 and #2. We can't
3235 assume the exit events for the non-leaders threads are
3236 already pending in the kernel, nor can we assume the
3237 non-leader threads are in zombie state already. Between
3238 the leader becoming zombie and the non-leaders exiting
3239 and becoming zombie themselves, there's a small time
3240 window, so such a check would be racy. Temporarily
3241 pausing all threads and checking to see if all threads
3242 exit or not before re-resuming them would work in the
3243 case that all threads are running right now, but it
3244 wouldn't work if some thread is currently already
3245 ptrace-stopped, e.g., due to scheduler-locking.
3246
3247 So what we do is we delete the leader anyhow, and then
3248 later on when we see its exit status, we re-add it back.
3249 We also make sure that we only report a whole-process
3250 exit when we see the leader exiting, as opposed to when
3251 the last LWP in the LWP list exits, which can be a
3252 non-leader if we deleted the leader here. */
9327494e 3253 linux_nat_debug_printf ("Thread group leader %d zombie "
6cf20c46
PA
3254 "(it exited, or another thread execd), "
3255 "deleting it.",
9327494e 3256 inf->pid);
0e5bf2a8
PA
3257 exit_lwp (leader_lp);
3258 }
3259 }
3260}
3261
a51e14ef
PA
3262/* Convenience function that is called when we're about to return an
3263 event to the core. If the event is an exit or signalled event,
3264 then this decides whether to report it as process-wide event, as a
3265 thread exit event, or to suppress it. All other event kinds are
3266 passed through unmodified. */
aa01bd36
PA
3267
3268static ptid_t
3269filter_exit_event (struct lwp_info *event_child,
3270 struct target_waitstatus *ourstatus)
3271{
3272 ptid_t ptid = event_child->ptid;
3273
a51e14ef
PA
3274 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
3275 if a non-leader thread exits with a signal, we'd report it to the
3276 core which would interpret it as the whole-process exiting.
3277 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
3278 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
3279 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
3280 return ptid;
3281
6cf20c46 3282 if (!is_leader (event_child))
aa01bd36 3283 {
a51e14ef 3284 if (report_exit_events_for (event_child))
7730e5c6
PA
3285 {
3286 ourstatus->set_thread_exited (0);
3287 /* Delete lwp, but not thread_info, infrun will need it to
3288 process the event. */
3289 exit_lwp (event_child, false);
3290 }
aa01bd36 3291 else
7730e5c6
PA
3292 {
3293 ourstatus->set_ignore ();
3294 exit_lwp (event_child);
3295 }
aa01bd36
PA
3296 }
3297
3298 return ptid;
3299}
3300
d6b0e80f 3301static ptid_t
f6ac5f3d 3302linux_nat_wait_1 (ptid_t ptid, struct target_waitstatus *ourstatus,
b60cea74 3303 target_wait_flags target_options)
d6b0e80f 3304{
b26b06dd
AB
3305 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3306
fc9b8e47 3307 sigset_t prev_mask;
4b60df3d 3308 enum resume_kind last_resume_kind;
12d9289a 3309 struct lwp_info *lp;
12d9289a 3310 int status;
d6b0e80f 3311
f973ed9c
DJ
3312 /* The first time we get here after starting a new inferior, we may
3313 not have added it to the LWP list yet - this is the earliest
3314 moment at which we know its PID. */
677c92fe 3315 if (ptid.is_pid () && find_lwp_pid (ptid) == nullptr)
f973ed9c 3316 {
677c92fe 3317 ptid_t lwp_ptid (ptid.pid (), ptid.pid ());
27c9d204 3318
677c92fe
SM
3319 /* Upgrade the main thread's ptid. */
3320 thread_change_ptid (linux_target, ptid, lwp_ptid);
3321 lp = add_initial_lwp (lwp_ptid);
f973ed9c
DJ
3322 lp->resumed = 1;
3323 }
3324
12696c10 3325 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
7feb7d06 3326 block_child_signals (&prev_mask);
d6b0e80f 3327
d6b0e80f 3328 /* First check if there is a LWP with a wait status pending. */
d3a70e03 3329 lp = iterate_over_lwps (ptid, status_callback);
8a99810d 3330 if (lp != NULL)
d6b0e80f 3331 {
9327494e 3332 linux_nat_debug_printf ("Using pending wait status %s for %s.",
57573e54 3333 pending_status_str (lp).c_str (),
e53c95d4 3334 lp->ptid.to_string ().c_str ());
d6b0e80f
AC
3335 }
3336
9c02b525
PA
3337 /* But if we don't find a pending event, we'll have to wait. Always
3338 pull all events out of the kernel. We'll randomly select an
3339 event LWP out of all that have events, to prevent starvation. */
7feb7d06 3340
d90e17a7 3341 while (lp == NULL)
d6b0e80f
AC
3342 {
3343 pid_t lwpid;
3344
0e5bf2a8
PA
3345 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3346 quirks:
3347
3348 - If the thread group leader exits while other threads in the
3349 thread group still exist, waitpid(TGID, ...) hangs. That
3350 waitpid won't return an exit status until the other threads
85102364 3351 in the group are reaped.
0e5bf2a8
PA
3352
3353 - When a non-leader thread execs, that thread just vanishes
3354 without reporting an exit (so we'd hang if we waited for it
3355 explicitly in that case). The exec event is reported to
3356 the TGID pid. */
3357
3358 errno = 0;
4a6ed09b 3359 lwpid = my_waitpid (-1, &status, __WALL | WNOHANG);
0e5bf2a8 3360
9327494e
SM
3361 linux_nat_debug_printf ("waitpid(-1, ...) returned %d, %s",
3362 lwpid,
3363 errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3364
d6b0e80f
AC
3365 if (lwpid > 0)
3366 {
9327494e 3367 linux_nat_debug_printf ("waitpid %ld received %s",
8d06918f
SM
3368 (long) lwpid,
3369 status_to_str (status).c_str ());
d6b0e80f 3370
9c02b525 3371 linux_nat_filter_event (lwpid, status);
0e5bf2a8
PA
3372 /* Retry until nothing comes out of waitpid. A single
3373 SIGCHLD can indicate more than one child stopped. */
3374 continue;
d6b0e80f
AC
3375 }
3376
20ba1ce6
PA
3377 /* Now that we've pulled all events out of the kernel, resume
3378 LWPs that don't have an interesting event to report. */
3379 iterate_over_lwps (minus_one_ptid,
d3a70e03
TT
3380 [] (struct lwp_info *info)
3381 {
3382 return resume_stopped_resumed_lwps (info, minus_one_ptid);
3383 });
20ba1ce6
PA
3384
3385 /* ... and find an LWP with a status to report to the core, if
3386 any. */
d3a70e03 3387 lp = iterate_over_lwps (ptid, status_callback);
9c02b525
PA
3388 if (lp != NULL)
3389 break;
3390
0e5bf2a8
PA
3391 /* Check for zombie thread group leaders. Those can't be reaped
3392 until all other threads in the thread group are. */
3393 check_zombie_leaders ();
d6b0e80f 3394
0e5bf2a8
PA
3395 /* If there are no resumed children left, bail. We'd be stuck
3396 forever in the sigsuspend call below otherwise. */
d3a70e03 3397 if (iterate_over_lwps (ptid, resumed_callback) == NULL)
0e5bf2a8 3398 {
9327494e 3399 linux_nat_debug_printf ("exit (no resumed LWP)");
b84876c2 3400
183be222 3401 ourstatus->set_no_resumed ();
b84876c2 3402
0e5bf2a8
PA
3403 restore_child_signals_mask (&prev_mask);
3404 return minus_one_ptid;
d6b0e80f 3405 }
28736962 3406
0e5bf2a8
PA
3407 /* No interesting event to report to the core. */
3408
3409 if (target_options & TARGET_WNOHANG)
3410 {
b26b06dd 3411 linux_nat_debug_printf ("no interesting events found");
28736962 3412
183be222 3413 ourstatus->set_ignore ();
28736962
PA
3414 restore_child_signals_mask (&prev_mask);
3415 return minus_one_ptid;
3416 }
d6b0e80f
AC
3417
3418 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3419 gdb_assert (lp == NULL);
0e5bf2a8
PA
3420
3421 /* Block until we get an event reported with SIGCHLD. */
9c3a5d93 3422 wait_for_signal ();
d6b0e80f
AC
3423 }
3424
d6b0e80f 3425 gdb_assert (lp);
3d2d2172 3426 gdb_assert (lp->stopped);
d6b0e80f 3427
ca2163eb
PA
3428 status = lp->status;
3429 lp->status = 0;
3430
fbea99ea 3431 if (!target_is_non_stop_p ())
4c28f408
PA
3432 {
3433 /* Now stop all other LWP's ... */
d3a70e03 3434 iterate_over_lwps (minus_one_ptid, stop_callback);
4c28f408
PA
3435
3436 /* ... and wait until all of them have reported back that
3437 they're no longer running. */
d3a70e03 3438 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
9c02b525
PA
3439 }
3440
3441 /* If we're not waiting for a specific LWP, choose an event LWP from
3442 among those that have had events. Giving equal priority to all
3443 LWPs that have had events helps prevent starvation. */
d7e15655 3444 if (ptid == minus_one_ptid || ptid.is_pid ())
9c02b525
PA
3445 select_event_lwp (ptid, &lp, &status);
3446
3447 gdb_assert (lp != NULL);
3448
9c02b525
PA
3449 /* We'll need this to determine whether to report a SIGSTOP as
3450 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3451 clears it. */
3452 last_resume_kind = lp->last_resume_kind;
4b60df3d 3453
fbea99ea 3454 if (!target_is_non_stop_p ())
9c02b525 3455 {
e3e9f5a2
PA
3456 /* In all-stop, from the core's perspective, all LWPs are now
3457 stopped until a new resume action is sent over. */
d3a70e03 3458 iterate_over_lwps (minus_one_ptid, resume_clear_callback);
e3e9f5a2
PA
3459 }
3460 else
25289eb2 3461 {
d3a70e03 3462 resume_clear_callback (lp);
25289eb2 3463 }
d6b0e80f 3464
135340af 3465 if (linux_target->low_status_is_event (status))
d6b0e80f 3466 {
9327494e 3467 linux_nat_debug_printf ("trap ptid is %s.",
e53c95d4 3468 lp->ptid.to_string ().c_str ());
d6b0e80f 3469 }
d6b0e80f 3470
183be222 3471 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
d6b0e80f
AC
3472 {
3473 *ourstatus = lp->waitstatus;
183be222 3474 lp->waitstatus.set_ignore ();
d6b0e80f
AC
3475 }
3476 else
7509b829 3477 *ourstatus = host_status_to_waitstatus (status);
d6b0e80f 3478
b26b06dd 3479 linux_nat_debug_printf ("event found");
b84876c2 3480
7feb7d06 3481 restore_child_signals_mask (&prev_mask);
1e225492 3482
4b60df3d 3483 if (last_resume_kind == resume_stop
183be222 3484 && ourstatus->kind () == TARGET_WAITKIND_STOPPED
25289eb2
PA
3485 && WSTOPSIG (status) == SIGSTOP)
3486 {
3487 /* A thread that has been requested to stop by GDB with
3488 target_stop, and it stopped cleanly, so report as SIG0. The
3489 use of SIGSTOP is an implementation detail. */
183be222 3490 ourstatus->set_stopped (GDB_SIGNAL_0);
25289eb2
PA
3491 }
3492
183be222
SM
3493 if (ourstatus->kind () == TARGET_WAITKIND_EXITED
3494 || ourstatus->kind () == TARGET_WAITKIND_SIGNALLED)
1e225492
JK
3495 lp->core = -1;
3496 else
2e794194 3497 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3498
a51e14ef 3499 return filter_exit_event (lp, ourstatus);
d6b0e80f
AC
3500}
3501
e3e9f5a2
PA
3502/* Resume LWPs that are currently stopped without any pending status
3503 to report, but are resumed from the core's perspective. */
3504
3505static int
d3a70e03 3506resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid)
e3e9f5a2 3507{
74387712 3508 inferior *inf = lwp_inferior (lp);
14ec4172 3509
8a9da63e 3510 if (!lp->stopped)
4dd63d48 3511 {
9327494e 3512 linux_nat_debug_printf ("NOT resuming LWP %s, not stopped",
e53c95d4 3513 lp->ptid.to_string ().c_str ());
4dd63d48
PA
3514 }
3515 else if (!lp->resumed)
3516 {
9327494e 3517 linux_nat_debug_printf ("NOT resuming LWP %s, not resumed",
e53c95d4 3518 lp->ptid.to_string ().c_str ());
4dd63d48
PA
3519 }
3520 else if (lwp_status_pending_p (lp))
3521 {
9327494e 3522 linux_nat_debug_printf ("NOT resuming LWP %s, has pending status",
e53c95d4 3523 lp->ptid.to_string ().c_str ());
4dd63d48 3524 }
8a9da63e
AB
3525 else if (inf->vfork_child != nullptr)
3526 {
3527 linux_nat_debug_printf ("NOT resuming LWP %s (vfork parent)",
3528 lp->ptid.to_string ().c_str ());
3529 }
4dd63d48 3530 else
e3e9f5a2 3531 {
5b6d1e4f 3532 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 3533 struct gdbarch *gdbarch = regcache->arch ();
336060f3 3534
a70b8144 3535 try
e3e9f5a2 3536 {
23f238d3
PA
3537 CORE_ADDR pc = regcache_read_pc (regcache);
3538 int leave_stopped = 0;
e3e9f5a2 3539
23f238d3
PA
3540 /* Don't bother if there's a breakpoint at PC that we'd hit
3541 immediately, and we're not waiting for this LWP. */
d3a70e03 3542 if (!lp->ptid.matches (wait_ptid))
23f238d3 3543 {
f9582a22 3544 if (breakpoint_inserted_here_p (inf->aspace.get (), pc))
23f238d3
PA
3545 leave_stopped = 1;
3546 }
e3e9f5a2 3547
23f238d3
PA
3548 if (!leave_stopped)
3549 {
9327494e
SM
3550 linux_nat_debug_printf
3551 ("resuming stopped-resumed LWP %s at %s: step=%d",
e53c95d4 3552 lp->ptid.to_string ().c_str (), paddress (gdbarch, pc),
9327494e 3553 lp->step);
23f238d3
PA
3554
3555 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3556 }
3557 }
230d2906 3558 catch (const gdb_exception_error &ex)
23f238d3
PA
3559 {
3560 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 3561 throw;
23f238d3 3562 }
e3e9f5a2
PA
3563 }
3564
3565 return 0;
3566}
3567
f6ac5f3d
PA
3568ptid_t
3569linux_nat_target::wait (ptid_t ptid, struct target_waitstatus *ourstatus,
b60cea74 3570 target_wait_flags target_options)
7feb7d06 3571{
b26b06dd
AB
3572 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3573
7feb7d06
PA
3574 ptid_t event_ptid;
3575
e53c95d4 3576 linux_nat_debug_printf ("[%s], [%s]", ptid.to_string ().c_str (),
9327494e 3577 target_options_to_string (target_options).c_str ());
7feb7d06
PA
3578
3579 /* Flush the async file first. */
d9d41e78 3580 if (target_is_async_p ())
7feb7d06
PA
3581 async_file_flush ();
3582
e3e9f5a2
PA
3583 /* Resume LWPs that are currently stopped without any pending status
3584 to report, but are resumed from the core's perspective. LWPs get
3585 in this state if we find them stopping at a time we're not
3586 interested in reporting the event (target_wait on a
3587 specific_process, for example, see linux_nat_wait_1), and
3588 meanwhile the event became uninteresting. Don't bother resuming
3589 LWPs we're not going to wait for if they'd stop immediately. */
fbea99ea 3590 if (target_is_non_stop_p ())
d3a70e03
TT
3591 iterate_over_lwps (minus_one_ptid,
3592 [=] (struct lwp_info *info)
3593 {
3594 return resume_stopped_resumed_lwps (info, ptid);
3595 });
e3e9f5a2 3596
f6ac5f3d 3597 event_ptid = linux_nat_wait_1 (ptid, ourstatus, target_options);
7feb7d06
PA
3598
3599 /* If we requested any event, and something came out, assume there
3600 may be more. If we requested a specific lwp or process, also
3601 assume there may be more. */
d9d41e78 3602 if (target_is_async_p ()
183be222
SM
3603 && ((ourstatus->kind () != TARGET_WAITKIND_IGNORE
3604 && ourstatus->kind () != TARGET_WAITKIND_NO_RESUMED)
d7e15655 3605 || ptid != minus_one_ptid))
7feb7d06
PA
3606 async_file_mark ();
3607
7feb7d06
PA
3608 return event_ptid;
3609}
3610
1d2736d4
PA
3611/* Kill one LWP. */
3612
3613static void
3614kill_one_lwp (pid_t pid)
d6b0e80f 3615{
ed731959
JK
3616 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3617
3618 errno = 0;
1d2736d4 3619 kill_lwp (pid, SIGKILL);
9327494e 3620
ed731959 3621 if (debug_linux_nat)
57745c90
PA
3622 {
3623 int save_errno = errno;
3624
9327494e
SM
3625 linux_nat_debug_printf
3626 ("kill (SIGKILL) %ld, 0, 0 (%s)", (long) pid,
3627 save_errno != 0 ? safe_strerror (save_errno) : "OK");
57745c90 3628 }
ed731959
JK
3629
3630 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3631
d6b0e80f 3632 errno = 0;
1d2736d4 3633 ptrace (PTRACE_KILL, pid, 0, 0);
d6b0e80f 3634 if (debug_linux_nat)
57745c90
PA
3635 {
3636 int save_errno = errno;
3637
9327494e
SM
3638 linux_nat_debug_printf
3639 ("PTRACE_KILL %ld, 0, 0 (%s)", (long) pid,
3640 save_errno ? safe_strerror (save_errno) : "OK");
57745c90 3641 }
d6b0e80f
AC
3642}
3643
1d2736d4
PA
3644/* Wait for an LWP to die. */
3645
3646static void
3647kill_wait_one_lwp (pid_t pid)
d6b0e80f 3648{
1d2736d4 3649 pid_t res;
d6b0e80f
AC
3650
3651 /* We must make sure that there are no pending events (delayed
3652 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3653 program doesn't interfere with any following debugging session. */
3654
d6b0e80f
AC
3655 do
3656 {
1d2736d4
PA
3657 res = my_waitpid (pid, NULL, __WALL);
3658 if (res != (pid_t) -1)
d6b0e80f 3659 {
9327494e
SM
3660 linux_nat_debug_printf ("wait %ld received unknown.", (long) pid);
3661
4a6ed09b
PA
3662 /* The Linux kernel sometimes fails to kill a thread
3663 completely after PTRACE_KILL; that goes from the stop
3664 point in do_fork out to the one in get_signal_to_deliver
3665 and waits again. So kill it again. */
1d2736d4 3666 kill_one_lwp (pid);
d6b0e80f
AC
3667 }
3668 }
1d2736d4
PA
3669 while (res == pid);
3670
3671 gdb_assert (res == -1 && errno == ECHILD);
3672}
3673
3674/* Callback for iterate_over_lwps. */
d6b0e80f 3675
1d2736d4 3676static int
d3a70e03 3677kill_callback (struct lwp_info *lp)
1d2736d4 3678{
e38504b3 3679 kill_one_lwp (lp->ptid.lwp ());
d6b0e80f
AC
3680 return 0;
3681}
3682
1d2736d4
PA
3683/* Callback for iterate_over_lwps. */
3684
3685static int
d3a70e03 3686kill_wait_callback (struct lwp_info *lp)
1d2736d4 3687{
e38504b3 3688 kill_wait_one_lwp (lp->ptid.lwp ());
1d2736d4
PA
3689 return 0;
3690}
3691
0d36baa9 3692/* Kill the fork/clone child of LP if it has an unfollowed child. */
1d2736d4 3693
0d36baa9
PA
3694static int
3695kill_unfollowed_child_callback (lwp_info *lp)
1d2736d4 3696{
6b09f134 3697 std::optional<target_waitstatus> ws = get_pending_child_status (lp);
0d36baa9 3698 if (ws.has_value ())
08036331 3699 {
0d36baa9
PA
3700 ptid_t child_ptid = ws->child_ptid ();
3701 int child_pid = child_ptid.pid ();
3702 int child_lwp = child_ptid.lwp ();
08036331 3703
0d36baa9
PA
3704 kill_one_lwp (child_lwp);
3705 kill_wait_one_lwp (child_lwp);
08036331 3706
0d36baa9
PA
3707 /* Let the arch-specific native code know this process is
3708 gone. */
3709 if (ws->kind () != TARGET_WAITKIND_THREAD_CLONED)
3710 linux_target->low_forget_process (child_pid);
08036331 3711 }
0d36baa9
PA
3712
3713 return 0;
1d2736d4
PA
3714}
3715
f6ac5f3d
PA
3716void
3717linux_nat_target::kill ()
d6b0e80f 3718{
0d36baa9
PA
3719 ptid_t pid_ptid (inferior_ptid.pid ());
3720
3721 /* If we're stopped while forking/cloning and we haven't followed
3722 yet, kill the child task. We need to do this first because the
f973ed9c 3723 parent will be sleeping if this is a vfork. */
0d36baa9 3724 iterate_over_lwps (pid_ptid, kill_unfollowed_child_callback);
f973ed9c
DJ
3725
3726 if (forks_exist_p ())
7feb7d06 3727 linux_fork_killall ();
f973ed9c
DJ
3728 else
3729 {
4c28f408 3730 /* Stop all threads before killing them, since ptrace requires
30baf67b 3731 that the thread is stopped to successfully PTRACE_KILL. */
0d36baa9 3732 iterate_over_lwps (pid_ptid, stop_callback);
4c28f408
PA
3733 /* ... and wait until all of them have reported back that
3734 they're no longer running. */
0d36baa9 3735 iterate_over_lwps (pid_ptid, stop_wait_callback);
4c28f408 3736
f973ed9c 3737 /* Kill all LWP's ... */
0d36baa9 3738 iterate_over_lwps (pid_ptid, kill_callback);
f973ed9c
DJ
3739
3740 /* ... and wait until we've flushed all events. */
0d36baa9 3741 iterate_over_lwps (pid_ptid, kill_wait_callback);
f973ed9c
DJ
3742 }
3743
bc1e6c81 3744 target_mourn_inferior (inferior_ptid);
d6b0e80f
AC
3745}
3746
f6ac5f3d
PA
3747void
3748linux_nat_target::mourn_inferior ()
d6b0e80f 3749{
b26b06dd
AB
3750 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3751
e99b03dc 3752 int pid = inferior_ptid.pid ();
26cb8b7c
PA
3753
3754 purge_lwp_list (pid);
d6b0e80f 3755
8a89ddbd 3756 close_proc_mem_file (pid);
05c06f31 3757
f973ed9c 3758 if (! forks_exist_p ())
d90e17a7 3759 /* Normal case, no other forks available. */
f6ac5f3d 3760 inf_ptrace_target::mourn_inferior ();
f973ed9c
DJ
3761 else
3762 /* Multi-fork case. The current inferior_ptid has exited, but
3763 there are other viable forks to debug. Delete the exiting
3764 one and context-switch to the first available. */
3765 linux_fork_mourn_inferior ();
26cb8b7c
PA
3766
3767 /* Let the arch-specific native code know this process is gone. */
135340af 3768 linux_target->low_forget_process (pid);
d6b0e80f
AC
3769}
3770
5b009018
PA
3771/* Convert a native/host siginfo object, into/from the siginfo in the
3772 layout of the inferiors' architecture. */
3773
3774static void
a5362b9a 3775siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018 3776{
135340af
PA
3777 /* If the low target didn't do anything, then just do a straight
3778 memcpy. */
3779 if (!linux_target->low_siginfo_fixup (siginfo, inf_siginfo, direction))
5b009018
PA
3780 {
3781 if (direction == 1)
a5362b9a 3782 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 3783 else
a5362b9a 3784 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
3785 }
3786}
3787
9b409511 3788static enum target_xfer_status
7154e786 3789linux_xfer_siginfo (ptid_t ptid, enum target_object object,
dda83cd7 3790 const char *annex, gdb_byte *readbuf,
9b409511
YQ
3791 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3792 ULONGEST *xfered_len)
4aa995e1 3793{
a5362b9a
TS
3794 siginfo_t siginfo;
3795 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
3796
3797 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3798 gdb_assert (readbuf || writebuf);
3799
4aa995e1 3800 if (offset > sizeof (siginfo))
2ed4b548 3801 return TARGET_XFER_E_IO;
4aa995e1 3802
7154e786 3803 if (!linux_nat_get_siginfo (ptid, &siginfo))
2ed4b548 3804 return TARGET_XFER_E_IO;
4aa995e1 3805
5b009018
PA
3806 /* When GDB is built as a 64-bit application, ptrace writes into
3807 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3808 inferior with a 64-bit GDB should look the same as debugging it
3809 with a 32-bit GDB, we need to convert it. GDB core always sees
3810 the converted layout, so any read/write will have to be done
3811 post-conversion. */
3812 siginfo_fixup (&siginfo, inf_siginfo, 0);
3813
4aa995e1
PA
3814 if (offset + len > sizeof (siginfo))
3815 len = sizeof (siginfo) - offset;
3816
3817 if (readbuf != NULL)
5b009018 3818 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3819 else
3820 {
5b009018
PA
3821 memcpy (inf_siginfo + offset, writebuf, len);
3822
3823 /* Convert back to ptrace layout before flushing it out. */
3824 siginfo_fixup (&siginfo, inf_siginfo, 1);
3825
7154e786 3826 int pid = get_ptrace_pid (ptid);
4aa995e1
PA
3827 errno = 0;
3828 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3829 if (errno != 0)
2ed4b548 3830 return TARGET_XFER_E_IO;
4aa995e1
PA
3831 }
3832
9b409511
YQ
3833 *xfered_len = len;
3834 return TARGET_XFER_OK;
4aa995e1
PA
3835}
3836
9b409511 3837static enum target_xfer_status
f6ac5f3d
PA
3838linux_nat_xfer_osdata (enum target_object object,
3839 const char *annex, gdb_byte *readbuf,
3840 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3841 ULONGEST *xfered_len);
3842
f6ac5f3d 3843static enum target_xfer_status
f9f593dd
SM
3844linux_proc_xfer_memory_partial (int pid, gdb_byte *readbuf,
3845 const gdb_byte *writebuf, ULONGEST offset,
3846 LONGEST len, ULONGEST *xfered_len);
f6ac5f3d 3847
5e86aab8
PA
3848/* Look for an LWP of PID that we know is ptrace-stopped. Returns
3849 NULL if none is found. */
3850
3851static lwp_info *
3852find_stopped_lwp (int pid)
3853{
3854 for (lwp_info *lp : all_lwps ())
3855 if (lp->ptid.pid () == pid
3856 && lp->stopped
3857 && !is_lwp_marked_dead (lp))
3858 return lp;
3859 return nullptr;
3860}
3861
f6ac5f3d
PA
3862enum target_xfer_status
3863linux_nat_target::xfer_partial (enum target_object object,
3864 const char *annex, gdb_byte *readbuf,
3865 const gdb_byte *writebuf,
3866 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
d6b0e80f 3867{
4aa995e1 3868 if (object == TARGET_OBJECT_SIGNAL_INFO)
7154e786 3869 return linux_xfer_siginfo (inferior_ptid, object, annex, readbuf, writebuf,
9b409511 3870 offset, len, xfered_len);
4aa995e1 3871
c35b1492
PA
3872 /* The target is connected but no live inferior is selected. Pass
3873 this request down to a lower stratum (e.g., the executable
3874 file). */
d7e15655 3875 if (object == TARGET_OBJECT_MEMORY && inferior_ptid == null_ptid)
9b409511 3876 return TARGET_XFER_EOF;
c35b1492 3877
f6ac5f3d
PA
3878 if (object == TARGET_OBJECT_AUXV)
3879 return memory_xfer_auxv (this, object, annex, readbuf, writebuf,
3880 offset, len, xfered_len);
3881
3882 if (object == TARGET_OBJECT_OSDATA)
3883 return linux_nat_xfer_osdata (object, annex, readbuf, writebuf,
3884 offset, len, xfered_len);
d6b0e80f 3885
f6ac5f3d
PA
3886 if (object == TARGET_OBJECT_MEMORY)
3887 {
05c06f31
PA
3888 /* GDB calculates all addresses in the largest possible address
3889 width. The address width must be masked before its final use
3890 by linux_proc_xfer_partial.
3891
3892 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
99d9c3b9 3893 int addr_bit = gdbarch_addr_bit (current_inferior ()->arch ());
f6ac5f3d
PA
3894
3895 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
3896 offset &= ((ULONGEST) 1 << addr_bit) - 1;
f6ac5f3d 3897
dd09fe0d
KS
3898 /* If /proc/pid/mem is writable, don't fallback to ptrace. If
3899 the write via /proc/pid/mem fails because the inferior execed
3900 (and we haven't seen the exec event yet), a subsequent ptrace
3901 poke would incorrectly write memory to the post-exec address
3902 space, while the core was trying to write to the pre-exec
3903 address space. */
3904 if (proc_mem_file_is_writable ())
f9f593dd
SM
3905 return linux_proc_xfer_memory_partial (inferior_ptid.pid (), readbuf,
3906 writebuf, offset, len,
3907 xfered_len);
5e86aab8
PA
3908
3909 /* Fallback to ptrace. This should only really trigger on old
3910 systems. See "Accessing inferior memory" at the top.
3911
3912 The target_xfer interface for memory access uses
3913 inferior_ptid as sideband argument to indicate which process
3914 to access. Memory access is process-wide, it is not
3915 thread-specific, so inferior_ptid sometimes points at a
3916 process ptid_t. If we fallback to inf_ptrace_target with
3917 that inferior_ptid, then the ptrace code will do the ptrace
3918 call targeting inferior_ptid.pid(), the leader LWP. That
3919 may fail with ESRCH if the leader is currently running, or
3920 zombie. So if we get a pid-ptid, we try to find a stopped
3921 LWP to use with ptrace.
3922
3923 Note that inferior_ptid may not exist in the lwp / thread /
3924 inferior lists. This can happen when we're removing
3925 breakpoints from a fork child that we're not going to stay
3926 attached to. So if we don't find a stopped LWP, still do the
3927 ptrace call, targeting the inferior_ptid we had on entry. */
3928 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
3929 lwp_info *stopped = find_stopped_lwp (inferior_ptid.pid ());
3930 if (stopped != nullptr)
3931 inferior_ptid = stopped->ptid;
3932 return inf_ptrace_target::xfer_partial (object, annex, readbuf, writebuf,
3933 offset, len, xfered_len);
05c06f31 3934 }
f6ac5f3d
PA
3935
3936 return inf_ptrace_target::xfer_partial (object, annex, readbuf, writebuf,
3937 offset, len, xfered_len);
d6b0e80f
AC
3938}
3939
57810aa7 3940bool
f6ac5f3d 3941linux_nat_target::thread_alive (ptid_t ptid)
28439f5e 3942{
4a6ed09b
PA
3943 /* As long as a PTID is in lwp list, consider it alive. */
3944 return find_lwp_pid (ptid) != NULL;
28439f5e
PA
3945}
3946
8a06aea7
PA
3947/* Implement the to_update_thread_list target method for this
3948 target. */
3949
f6ac5f3d
PA
3950void
3951linux_nat_target::update_thread_list ()
8a06aea7 3952{
4a6ed09b
PA
3953 /* We add/delete threads from the list as clone/exit events are
3954 processed, so just try deleting exited threads still in the
3955 thread list. */
3956 delete_exited_threads ();
a6904d5a
PA
3957
3958 /* Update the processor core that each lwp/thread was last seen
3959 running on. */
901b9821 3960 for (lwp_info *lwp : all_lwps ())
1ad3de98
PA
3961 {
3962 /* Avoid accessing /proc if the thread hasn't run since we last
3963 time we fetched the thread's core. Accessing /proc becomes
3964 noticeably expensive when we have thousands of LWPs. */
3965 if (lwp->core == -1)
3966 lwp->core = linux_common_core_of_thread (lwp->ptid);
3967 }
8a06aea7
PA
3968}
3969
a068643d 3970std::string
f6ac5f3d 3971linux_nat_target::pid_to_str (ptid_t ptid)
d6b0e80f 3972{
15a9e13e 3973 if (ptid.lwp_p ()
e38504b3 3974 && (ptid.pid () != ptid.lwp ()
e99b03dc 3975 || num_lwps (ptid.pid ()) > 1))
a068643d 3976 return string_printf ("LWP %ld", ptid.lwp ());
d6b0e80f
AC
3977
3978 return normal_pid_to_str (ptid);
3979}
3980
f6ac5f3d
PA
3981const char *
3982linux_nat_target::thread_name (struct thread_info *thr)
4694da01 3983{
79efa585 3984 return linux_proc_tid_get_name (thr->ptid);
4694da01
TT
3985}
3986
dba24537
AC
3987/* Accepts an integer PID; Returns a string representing a file that
3988 can be opened to get the symbols for the child process. */
3989
0e90c441 3990const char *
f6ac5f3d 3991linux_nat_target::pid_to_exec_file (int pid)
dba24537 3992{
e0d86d2c 3993 return linux_proc_pid_to_exec_file (pid);
dba24537
AC
3994}
3995
8a89ddbd
PA
3996/* Object representing an /proc/PID/mem open file. We keep one such
3997 file open per inferior.
3998
3999 It might be tempting to think about only ever opening one file at
4000 most for all inferiors, closing/reopening the file as we access
4001 memory of different inferiors, to minimize number of file
4002 descriptors open, which can otherwise run into resource limits.
4003 However, that does not work correctly -- if the inferior execs and
4004 we haven't processed the exec event yet, and, we opened a
4005 /proc/PID/mem file, we will get a mem file accessing the post-exec
4006 address space, thinking we're opening it for the pre-exec address
4007 space. That is dangerous as we can poke memory (e.g. clearing
4008 breakpoints) in the post-exec memory by mistake, corrupting the
4009 inferior. For that reason, we open the mem file as early as
4010 possible, right after spawning, forking or attaching to the
4011 inferior, when the inferior is stopped and thus before it has a
4012 chance of execing.
4013
4014 Note that after opening the file, even if the thread we opened it
4015 for subsequently exits, the open file is still usable for accessing
4016 memory. It's only when the whole process exits or execs that the
4017 file becomes invalid, at which point reads/writes return EOF. */
4018
4019class proc_mem_file
4020{
4021public:
4022 proc_mem_file (ptid_t ptid, int fd)
4023 : m_ptid (ptid), m_fd (fd)
4024 {
4025 gdb_assert (m_fd != -1);
4026 }
05c06f31 4027
8a89ddbd 4028 ~proc_mem_file ()
05c06f31 4029 {
89662f69 4030 linux_nat_debug_printf ("closing fd %d for /proc/%d/task/%ld/mem",
8a89ddbd
PA
4031 m_fd, m_ptid.pid (), m_ptid.lwp ());
4032 close (m_fd);
05c06f31 4033 }
05c06f31 4034
8a89ddbd
PA
4035 DISABLE_COPY_AND_ASSIGN (proc_mem_file);
4036
4037 int fd ()
4038 {
4039 return m_fd;
4040 }
4041
4042private:
4043 /* The LWP this file was opened for. Just for debugging
4044 purposes. */
4045 ptid_t m_ptid;
4046
4047 /* The file descriptor. */
4048 int m_fd = -1;
4049};
4050
4051/* The map between an inferior process id, and the open /proc/PID/mem
4052 file. This is stored in a map instead of in a per-inferior
4053 structure because we need to be able to access memory of processes
4054 which don't have a corresponding struct inferior object. E.g.,
4055 with "detach-on-fork on" (the default), and "follow-fork parent"
4056 (also default), we don't create an inferior for the fork child, but
4057 we still need to remove breakpoints from the fork child's
4058 memory. */
4059static std::unordered_map<int, proc_mem_file> proc_mem_file_map;
4060
4061/* Close the /proc/PID/mem file for PID. */
05c06f31
PA
4062
4063static void
8a89ddbd 4064close_proc_mem_file (pid_t pid)
dba24537 4065{
8a89ddbd 4066 proc_mem_file_map.erase (pid);
05c06f31 4067}
dba24537 4068
8a89ddbd
PA
4069/* Open the /proc/PID/mem file for the process (thread group) of PTID.
4070 We actually open /proc/PID/task/LWP/mem, as that's the LWP we know
4071 exists and is stopped right now. We prefer the
4072 /proc/PID/task/LWP/mem form over /proc/LWP/mem to avoid tid-reuse
4073 races, just in case this is ever called on an already-waited
4074 LWP. */
dba24537 4075
8a89ddbd
PA
4076static void
4077open_proc_mem_file (ptid_t ptid)
05c06f31 4078{
8a89ddbd
PA
4079 auto iter = proc_mem_file_map.find (ptid.pid ());
4080 gdb_assert (iter == proc_mem_file_map.end ());
dba24537 4081
8a89ddbd
PA
4082 char filename[64];
4083 xsnprintf (filename, sizeof filename,
4084 "/proc/%d/task/%ld/mem", ptid.pid (), ptid.lwp ());
4085
4086 int fd = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
05c06f31 4087
8a89ddbd
PA
4088 if (fd == -1)
4089 {
4090 warning (_("opening /proc/PID/mem file for lwp %d.%ld failed: %s (%d)"),
4091 ptid.pid (), ptid.lwp (),
4092 safe_strerror (errno), errno);
4093 return;
05c06f31
PA
4094 }
4095
8a89ddbd
PA
4096 proc_mem_file_map.emplace (std::piecewise_construct,
4097 std::forward_as_tuple (ptid.pid ()),
4098 std::forward_as_tuple (ptid, fd));
4099
9221923c 4100 linux_nat_debug_printf ("opened fd %d for lwp %d.%ld",
8a89ddbd
PA
4101 fd, ptid.pid (), ptid.lwp ());
4102}
4103
1bcb0708
PA
4104/* Helper for linux_proc_xfer_memory_partial and
4105 proc_mem_file_is_writable. FD is the already opened /proc/pid/mem
4106 file, and PID is the pid of the corresponding process. The rest of
4107 the arguments are like linux_proc_xfer_memory_partial's. */
8a89ddbd
PA
4108
4109static enum target_xfer_status
1bcb0708
PA
4110linux_proc_xfer_memory_partial_fd (int fd, int pid,
4111 gdb_byte *readbuf, const gdb_byte *writebuf,
4112 ULONGEST offset, LONGEST len,
4113 ULONGEST *xfered_len)
8a89ddbd
PA
4114{
4115 ssize_t ret;
4116
8a89ddbd 4117 gdb_assert (fd != -1);
dba24537 4118
31a56a22
PA
4119 /* Use pread64/pwrite64 if available, since they save a syscall and
4120 can handle 64-bit offsets even on 32-bit platforms (for instance,
4121 SPARC debugging a SPARC64 application). But only use them if the
4122 offset isn't so high that when cast to off_t it'd be negative, as
4123 seen on SPARC64. pread64/pwrite64 outright reject such offsets.
4124 lseek does not. */
dba24537 4125#ifdef HAVE_PREAD64
31a56a22
PA
4126 if ((off_t) offset >= 0)
4127 ret = (readbuf != nullptr
4128 ? pread64 (fd, readbuf, len, offset)
4129 : pwrite64 (fd, writebuf, len, offset));
4130 else
dba24537 4131#endif
31a56a22
PA
4132 {
4133 ret = lseek (fd, offset, SEEK_SET);
4134 if (ret != -1)
4135 ret = (readbuf != nullptr
4136 ? read (fd, readbuf, len)
4137 : write (fd, writebuf, len));
4138 }
dba24537 4139
05c06f31
PA
4140 if (ret == -1)
4141 {
9221923c 4142 linux_nat_debug_printf ("accessing fd %d for pid %d failed: %s (%d)",
1bcb0708 4143 fd, pid, safe_strerror (errno), errno);
284b6bb5 4144 return TARGET_XFER_E_IO;
05c06f31
PA
4145 }
4146 else if (ret == 0)
4147 {
8a89ddbd
PA
4148 /* EOF means the address space is gone, the whole process exited
4149 or execed. */
9221923c 4150 linux_nat_debug_printf ("accessing fd %d for pid %d got EOF",
1bcb0708 4151 fd, pid);
05c06f31
PA
4152 return TARGET_XFER_EOF;
4153 }
9b409511
YQ
4154 else
4155 {
8a89ddbd 4156 *xfered_len = ret;
9b409511
YQ
4157 return TARGET_XFER_OK;
4158 }
05c06f31 4159}
efcbbd14 4160
1bcb0708
PA
4161/* Implement the to_xfer_partial target method using /proc/PID/mem.
4162 Because we can use a single read/write call, this can be much more
4163 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
4164 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running
4165 threads. */
4166
4167static enum target_xfer_status
f9f593dd
SM
4168linux_proc_xfer_memory_partial (int pid, gdb_byte *readbuf,
4169 const gdb_byte *writebuf, ULONGEST offset,
4170 LONGEST len, ULONGEST *xfered_len)
1bcb0708 4171{
1bcb0708
PA
4172 auto iter = proc_mem_file_map.find (pid);
4173 if (iter == proc_mem_file_map.end ())
4174 return TARGET_XFER_EOF;
4175
4176 int fd = iter->second.fd ();
4177
4178 return linux_proc_xfer_memory_partial_fd (fd, pid, readbuf, writebuf, offset,
4179 len, xfered_len);
4180}
4181
4182/* Check whether /proc/pid/mem is writable in the current kernel, and
4183 return true if so. It wasn't writable before Linux 2.6.39, but
4184 there's no way to know whether the feature was backported to older
4185 kernels. So we check to see if it works. The result is cached,
3bfdcabb 4186 and this is guaranteed to be called once early during inferior
9dff6a5d
PA
4187 startup, so that any warning is printed out consistently between
4188 GDB invocations. Note we don't call it during GDB startup instead
4189 though, because then we might warn with e.g. just "gdb --version"
4190 on sandboxed systems. See PR gdb/29907. */
1bcb0708
PA
4191
4192static bool
4193proc_mem_file_is_writable ()
4194{
6b09f134 4195 static std::optional<bool> writable;
1bcb0708
PA
4196
4197 if (writable.has_value ())
4198 return *writable;
4199
4200 writable.emplace (false);
4201
4202 /* We check whether /proc/pid/mem is writable by trying to write to
4203 one of our variables via /proc/self/mem. */
4204
4205 int fd = gdb_open_cloexec ("/proc/self/mem", O_RDWR | O_LARGEFILE, 0).release ();
4206
4207 if (fd == -1)
4208 {
4209 warning (_("opening /proc/self/mem file failed: %s (%d)"),
4210 safe_strerror (errno), errno);
4211 return *writable;
4212 }
4213
4214 SCOPE_EXIT { close (fd); };
4215
4216 /* This is the variable we try to write to. Note OFFSET below. */
4217 volatile gdb_byte test_var = 0;
4218
4219 gdb_byte writebuf[] = {0x55};
4220 ULONGEST offset = (uintptr_t) &test_var;
4221 ULONGEST xfered_len;
4222
4223 enum target_xfer_status res
4224 = linux_proc_xfer_memory_partial_fd (fd, getpid (), nullptr, writebuf,
4225 offset, 1, &xfered_len);
4226
4227 if (res == TARGET_XFER_OK)
4228 {
4229 gdb_assert (xfered_len == 1);
4230 gdb_assert (test_var == 0x55);
4231 /* Success. */
4232 *writable = true;
4233 }
4234
4235 return *writable;
4236}
4237
dba24537
AC
4238/* Parse LINE as a signal set and add its set bits to SIGS. */
4239
4240static void
4241add_line_to_sigset (const char *line, sigset_t *sigs)
4242{
4243 int len = strlen (line) - 1;
4244 const char *p;
4245 int signum;
4246
4247 if (line[len] != '\n')
8a3fe4f8 4248 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4249
4250 p = line;
4251 signum = len * 4;
4252 while (len-- > 0)
4253 {
4254 int digit;
4255
4256 if (*p >= '0' && *p <= '9')
4257 digit = *p - '0';
4258 else if (*p >= 'a' && *p <= 'f')
4259 digit = *p - 'a' + 10;
4260 else
8a3fe4f8 4261 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4262
4263 signum -= 4;
4264
4265 if (digit & 1)
4266 sigaddset (sigs, signum + 1);
4267 if (digit & 2)
4268 sigaddset (sigs, signum + 2);
4269 if (digit & 4)
4270 sigaddset (sigs, signum + 3);
4271 if (digit & 8)
4272 sigaddset (sigs, signum + 4);
4273
4274 p++;
4275 }
4276}
4277
4278/* Find process PID's pending signals from /proc/pid/status and set
4279 SIGS to match. */
4280
4281void
3e43a32a
MS
4282linux_proc_pending_signals (int pid, sigset_t *pending,
4283 sigset_t *blocked, sigset_t *ignored)
dba24537 4284{
d8d2a3ee 4285 char buffer[PATH_MAX], fname[PATH_MAX];
dba24537
AC
4286
4287 sigemptyset (pending);
4288 sigemptyset (blocked);
4289 sigemptyset (ignored);
cde33bf1 4290 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
d419f42d 4291 gdb_file_up procfile = gdb_fopen_cloexec (fname, "r");
dba24537 4292 if (procfile == NULL)
8a3fe4f8 4293 error (_("Could not open %s"), fname);
dba24537 4294
d419f42d 4295 while (fgets (buffer, PATH_MAX, procfile.get ()) != NULL)
dba24537
AC
4296 {
4297 /* Normal queued signals are on the SigPnd line in the status
4298 file. However, 2.6 kernels also have a "shared" pending
4299 queue for delivering signals to a thread group, so check for
4300 a ShdPnd line also.
4301
4302 Unfortunately some Red Hat kernels include the shared pending
4303 queue but not the ShdPnd status field. */
4304
61012eef 4305 if (startswith (buffer, "SigPnd:\t"))
dba24537 4306 add_line_to_sigset (buffer + 8, pending);
61012eef 4307 else if (startswith (buffer, "ShdPnd:\t"))
dba24537 4308 add_line_to_sigset (buffer + 8, pending);
61012eef 4309 else if (startswith (buffer, "SigBlk:\t"))
dba24537 4310 add_line_to_sigset (buffer + 8, blocked);
61012eef 4311 else if (startswith (buffer, "SigIgn:\t"))
dba24537
AC
4312 add_line_to_sigset (buffer + 8, ignored);
4313 }
dba24537
AC
4314}
4315
9b409511 4316static enum target_xfer_status
f6ac5f3d 4317linux_nat_xfer_osdata (enum target_object object,
e0881a8e 4318 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4319 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4320 ULONGEST *xfered_len)
07e059b5 4321{
07e059b5
VP
4322 gdb_assert (object == TARGET_OBJECT_OSDATA);
4323
9b409511
YQ
4324 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4325 if (*xfered_len == 0)
4326 return TARGET_XFER_EOF;
4327 else
4328 return TARGET_XFER_OK;
07e059b5
VP
4329}
4330
f6ac5f3d
PA
4331std::vector<static_tracepoint_marker>
4332linux_nat_target::static_tracepoint_markers_by_strid (const char *strid)
5808517f
YQ
4333{
4334 char s[IPA_CMD_BUF_SIZE];
e99b03dc 4335 int pid = inferior_ptid.pid ();
5d9310c4 4336 std::vector<static_tracepoint_marker> markers;
256642e8 4337 const char *p = s;
184ea2f7 4338 ptid_t ptid = ptid_t (pid, 0);
5d9310c4 4339 static_tracepoint_marker marker;
5808517f
YQ
4340
4341 /* Pause all */
4342 target_stop (ptid);
4343
81aa19c3 4344 strcpy (s, "qTfSTM");
42476b70 4345 agent_run_command (pid, s, strlen (s) + 1);
5808517f 4346
1db93f14
TT
4347 /* Unpause all. */
4348 SCOPE_EXIT { target_continue_no_signal (ptid); };
5808517f
YQ
4349
4350 while (*p++ == 'm')
4351 {
5808517f
YQ
4352 do
4353 {
5d9310c4 4354 parse_static_tracepoint_marker_definition (p, &p, &marker);
5808517f 4355
5d9310c4
SM
4356 if (strid == NULL || marker.str_id == strid)
4357 markers.push_back (std::move (marker));
5808517f
YQ
4358 }
4359 while (*p++ == ','); /* comma-separated list */
4360
81aa19c3 4361 strcpy (s, "qTsSTM");
42476b70 4362 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4363 p = s;
4364 }
4365
5808517f
YQ
4366 return markers;
4367}
4368
b84876c2
PA
4369/* target_can_async_p implementation. */
4370
57810aa7 4371bool
f6ac5f3d 4372linux_nat_target::can_async_p ()
b84876c2 4373{
fce6cd34
AB
4374 /* This flag should be checked in the common target.c code. */
4375 gdb_assert (target_async_permitted);
4376
4377 /* Otherwise, this targets is always able to support async mode. */
4378 return true;
b84876c2
PA
4379}
4380
57810aa7 4381bool
f6ac5f3d 4382linux_nat_target::supports_non_stop ()
9908b566 4383{
f80c8ec4 4384 return true;
9908b566
VP
4385}
4386
fbea99ea
PA
4387/* to_always_non_stop_p implementation. */
4388
57810aa7 4389bool
f6ac5f3d 4390linux_nat_target::always_non_stop_p ()
fbea99ea 4391{
f80c8ec4 4392 return true;
fbea99ea
PA
4393}
4394
57810aa7 4395bool
f6ac5f3d 4396linux_nat_target::supports_multi_process ()
d90e17a7 4397{
aee91db3 4398 return true;
d90e17a7
PA
4399}
4400
57810aa7 4401bool
f6ac5f3d 4402linux_nat_target::supports_disable_randomization ()
03583c20 4403{
f80c8ec4 4404 return true;
03583c20
UW
4405}
4406
7feb7d06
PA
4407/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4408 so we notice when any child changes state, and notify the
4409 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4410 above to wait for the arrival of a SIGCHLD. */
4411
b84876c2 4412static void
7feb7d06 4413sigchld_handler (int signo)
b84876c2 4414{
7feb7d06
PA
4415 int old_errno = errno;
4416
01124a23 4417 if (debug_linux_nat)
da5bd37e 4418 gdb_stdlog->write_async_safe ("sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06 4419
b146ba14
JB
4420 if (signo == SIGCHLD)
4421 {
4422 /* Let the event loop know that there are events to handle. */
4423 linux_nat_target::async_file_mark_if_open ();
4424 }
7feb7d06
PA
4425
4426 errno = old_errno;
4427}
4428
4429/* Callback registered with the target events file descriptor. */
4430
4431static void
4432handle_target_event (int error, gdb_client_data client_data)
4433{
b1a35af2 4434 inferior_event_handler (INF_REG_EVENT);
7feb7d06
PA
4435}
4436
b84876c2
PA
4437/* target_async implementation. */
4438
f6ac5f3d 4439void
4a570176 4440linux_nat_target::async (bool enable)
b84876c2 4441{
4a570176 4442 if (enable == is_async_p ())
b146ba14
JB
4443 return;
4444
4445 /* Block child signals while we create/destroy the pipe, as their
4446 handler writes to it. */
4447 gdb::block_signals blocker;
4448
6a3753b3 4449 if (enable)
b84876c2 4450 {
b146ba14 4451 if (!async_file_open ())
f34652de 4452 internal_error ("creating event pipe failed.");
b146ba14
JB
4453
4454 add_file_handler (async_wait_fd (), handle_target_event, NULL,
4455 "linux-nat");
4456
4457 /* There may be pending events to handle. Tell the event loop
4458 to poll them. */
4459 async_file_mark ();
b84876c2
PA
4460 }
4461 else
4462 {
b146ba14
JB
4463 delete_file_handler (async_wait_fd ());
4464 async_file_close ();
b84876c2 4465 }
b84876c2
PA
4466}
4467
a493e3e2 4468/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4469 event came out. */
4470
4c28f408 4471static int
d3a70e03 4472linux_nat_stop_lwp (struct lwp_info *lwp)
4c28f408 4473{
d90e17a7 4474 if (!lwp->stopped)
252fbfc8 4475 {
9327494e 4476 linux_nat_debug_printf ("running -> suspending %s",
e53c95d4 4477 lwp->ptid.to_string ().c_str ());
252fbfc8 4478
252fbfc8 4479
25289eb2
PA
4480 if (lwp->last_resume_kind == resume_stop)
4481 {
9327494e
SM
4482 linux_nat_debug_printf ("already stopping LWP %ld at GDB's request",
4483 lwp->ptid.lwp ());
25289eb2
PA
4484 return 0;
4485 }
252fbfc8 4486
d3a70e03 4487 stop_callback (lwp);
25289eb2 4488 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
4489 }
4490 else
4491 {
4492 /* Already known to be stopped; do nothing. */
252fbfc8 4493
d90e17a7
PA
4494 if (debug_linux_nat)
4495 {
9213a6d7 4496 if (linux_target->find_thread (lwp->ptid)->stop_requested)
9327494e 4497 linux_nat_debug_printf ("already stopped/stop_requested %s",
e53c95d4 4498 lwp->ptid.to_string ().c_str ());
d90e17a7 4499 else
9327494e 4500 linux_nat_debug_printf ("already stopped/no stop_requested yet %s",
e53c95d4 4501 lwp->ptid.to_string ().c_str ());
252fbfc8
PA
4502 }
4503 }
4c28f408
PA
4504 return 0;
4505}
4506
f6ac5f3d
PA
4507void
4508linux_nat_target::stop (ptid_t ptid)
4c28f408 4509{
b6e52a0b 4510 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
d3a70e03 4511 iterate_over_lwps (ptid, linux_nat_stop_lwp);
bfedc46a
PA
4512}
4513
dc146f7c
VP
4514/* Return the cached value of the processor core for thread PTID. */
4515
f6ac5f3d
PA
4516int
4517linux_nat_target::core_of_thread (ptid_t ptid)
dc146f7c
VP
4518{
4519 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 4520
dc146f7c
VP
4521 if (info)
4522 return info->core;
4523 return -1;
4524}
4525
7a6a1731
GB
4526/* Implementation of to_filesystem_is_local. */
4527
57810aa7 4528bool
f6ac5f3d 4529linux_nat_target::filesystem_is_local ()
7a6a1731
GB
4530{
4531 struct inferior *inf = current_inferior ();
4532
4533 if (inf->fake_pid_p || inf->pid == 0)
57810aa7 4534 return true;
7a6a1731
GB
4535
4536 return linux_ns_same (inf->pid, LINUX_NS_MNT);
4537}
4538
4539/* Convert the INF argument passed to a to_fileio_* method
4540 to a process ID suitable for passing to its corresponding
4541 linux_mntns_* function. If INF is non-NULL then the
4542 caller is requesting the filesystem seen by INF. If INF
4543 is NULL then the caller is requesting the filesystem seen
4544 by the GDB. We fall back to GDB's filesystem in the case
4545 that INF is non-NULL but its PID is unknown. */
4546
4547static pid_t
4548linux_nat_fileio_pid_of (struct inferior *inf)
4549{
4550 if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4551 return getpid ();
4552 else
4553 return inf->pid;
4554}
4555
4556/* Implementation of to_fileio_open. */
4557
f6ac5f3d
PA
4558int
4559linux_nat_target::fileio_open (struct inferior *inf, const char *filename,
4560 int flags, int mode, int warn_if_slow,
b872057a 4561 fileio_error *target_errno)
7a6a1731
GB
4562{
4563 int nat_flags;
4564 mode_t nat_mode;
4565 int fd;
4566
4567 if (fileio_to_host_openflags (flags, &nat_flags) == -1
4568 || fileio_to_host_mode (mode, &nat_mode) == -1)
4569 {
4570 *target_errno = FILEIO_EINVAL;
4571 return -1;
4572 }
4573
4574 fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
4575 filename, nat_flags, nat_mode);
4576 if (fd == -1)
4577 *target_errno = host_to_fileio_error (errno);
4578
4579 return fd;
4580}
4581
4582/* Implementation of to_fileio_readlink. */
4583
6b09f134 4584std::optional<std::string>
f6ac5f3d 4585linux_nat_target::fileio_readlink (struct inferior *inf, const char *filename,
b872057a 4586 fileio_error *target_errno)
7a6a1731
GB
4587{
4588 char buf[PATH_MAX];
4589 int len;
7a6a1731
GB
4590
4591 len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
4592 filename, buf, sizeof (buf));
4593 if (len < 0)
4594 {
4595 *target_errno = host_to_fileio_error (errno);
e0d3522b 4596 return {};
7a6a1731
GB
4597 }
4598
e0d3522b 4599 return std::string (buf, len);
7a6a1731
GB
4600}
4601
4602/* Implementation of to_fileio_unlink. */
4603
f6ac5f3d
PA
4604int
4605linux_nat_target::fileio_unlink (struct inferior *inf, const char *filename,
b872057a 4606 fileio_error *target_errno)
7a6a1731
GB
4607{
4608 int ret;
4609
4610 ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
4611 filename);
4612 if (ret == -1)
4613 *target_errno = host_to_fileio_error (errno);
4614
4615 return ret;
4616}
4617
aa01bd36
PA
4618/* Implementation of the to_thread_events method. */
4619
f6ac5f3d
PA
4620void
4621linux_nat_target::thread_events (int enable)
aa01bd36
PA
4622{
4623 report_thread_events = enable;
4624}
4625
25b16bc9
PA
4626bool
4627linux_nat_target::supports_set_thread_options (gdb_thread_options options)
4628{
a51e14ef
PA
4629 constexpr gdb_thread_options supported_options
4630 = GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
25b16bc9
PA
4631 return ((options & supported_options) == options);
4632}
4633
f6ac5f3d
PA
4634linux_nat_target::linux_nat_target ()
4635{
f973ed9c
DJ
4636 /* We don't change the stratum; this target will sit at
4637 process_stratum and thread_db will set at thread_stratum. This
4638 is a little strange, since this is a multi-threaded-capable
4639 target, but we want to be on the stack below thread_db, and we
4640 also want to be used for single-threaded processes. */
f973ed9c
DJ
4641}
4642
f865ee35
JK
4643/* See linux-nat.h. */
4644
ef632b4b 4645bool
f865ee35 4646linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
9f0bdab8 4647{
0acd1110 4648 int pid = get_ptrace_pid (ptid);
7cc662bc 4649 return ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo) == 0;
9f0bdab8
DJ
4650}
4651
7b669087
GB
4652/* See nat/linux-nat.h. */
4653
4654ptid_t
4655current_lwp_ptid (void)
4656{
15a9e13e 4657 gdb_assert (inferior_ptid.lwp_p ());
7b669087
GB
4658 return inferior_ptid;
4659}
4660
0ae5b8fa
AB
4661/* Implement 'maintenance info linux-lwps'. Displays some basic
4662 information about all the current lwp_info objects. */
4663
4664static void
4665maintenance_info_lwps (const char *arg, int from_tty)
4666{
4667 if (all_lwps ().size () == 0)
4668 {
4669 gdb_printf ("No Linux LWPs\n");
4670 return;
4671 }
4672
4673 /* Start the width at 8 to match the column heading below, then
4674 figure out the widest ptid string. We'll use this to build our
4675 output table below. */
4676 size_t ptid_width = 8;
4677 for (lwp_info *lp : all_lwps ())
4678 ptid_width = std::max (ptid_width, lp->ptid.to_string ().size ());
4679
4680 /* Setup the table headers. */
4681 struct ui_out *uiout = current_uiout;
4682 ui_out_emit_table table_emitter (uiout, 2, -1, "linux-lwps");
4683 uiout->table_header (ptid_width, ui_left, "lwp-ptid", _("LWP Ptid"));
4684 uiout->table_header (9, ui_left, "thread-info", _("Thread ID"));
4685 uiout->table_body ();
4686
4687 /* Display one table row for each lwp_info. */
4688 for (lwp_info *lp : all_lwps ())
4689 {
4690 ui_out_emit_tuple tuple_emitter (uiout, "lwp-entry");
4691
4692 thread_info *th = linux_target->find_thread (lp->ptid);
4693
4694 uiout->field_string ("lwp-ptid", lp->ptid.to_string ().c_str ());
4695 if (th == nullptr)
4696 uiout->field_string ("thread-info", "None");
4697 else
4698 uiout->field_string ("thread-info", print_full_thread_id (th));
4699
4700 uiout->message ("\n");
4701 }
4702}
4703
6c265988 4704void _initialize_linux_nat ();
d6b0e80f 4705void
6c265988 4706_initialize_linux_nat ()
d6b0e80f 4707{
8864ef42 4708 add_setshow_boolean_cmd ("linux-nat", class_maintenance,
b6e52a0b 4709 &debug_linux_nat, _("\
6a2dbb74
EZ
4710Set debugging of GNU/Linux native target."), _("\
4711Show debugging of GNU/Linux native target."), _("\
b6e52a0b
AB
4712When on, print debug messages relating to the GNU/Linux native target."),
4713 nullptr,
4714 show_debug_linux_nat,
4715 &setdebuglist, &showdebuglist);
b84876c2 4716
7a6a1731
GB
4717 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
4718 &debug_linux_namespaces, _("\
4719Set debugging of GNU/Linux namespaces module."), _("\
4720Show debugging of GNU/Linux namespaces module."), _("\
4721Enables printf debugging output."),
4722 NULL,
4723 NULL,
4724 &setdebuglist, &showdebuglist);
4725
7feb7d06
PA
4726 /* Install a SIGCHLD handler. */
4727 sigchld_action.sa_handler = sigchld_handler;
4728 sigemptyset (&sigchld_action.sa_mask);
4729 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
4730
4731 /* Make it the default. */
7feb7d06 4732 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
4733
4734 /* Make sure we don't block SIGCHLD during a sigsuspend. */
21987b9c 4735 gdb_sigmask (SIG_SETMASK, NULL, &suspend_mask);
d6b0e80f
AC
4736 sigdelset (&suspend_mask, SIGCHLD);
4737
7feb7d06 4738 sigemptyset (&blocked_mask);
774113b0
PA
4739
4740 lwp_lwpid_htab_create ();
0ae5b8fa
AB
4741
4742 add_cmd ("linux-lwps", class_maintenance, maintenance_info_lwps,
4743 _("List the Linux LWPS."), &maintenanceinfolist);
d6b0e80f
AC
4744}
4745\f
4746
4747/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4748 the GNU/Linux Threads library and therefore doesn't really belong
4749 here. */
4750
089436f7
TV
4751/* NPTL reserves the first two RT signals, but does not provide any
4752 way for the debugger to query the signal numbers - fortunately
4753 they don't change. */
4754static int lin_thread_signals[] = { __SIGRTMIN, __SIGRTMIN + 1 };
d6b0e80f 4755
089436f7
TV
4756/* See linux-nat.h. */
4757
4758unsigned int
4759lin_thread_get_thread_signal_num (void)
d6b0e80f 4760{
089436f7
TV
4761 return sizeof (lin_thread_signals) / sizeof (lin_thread_signals[0]);
4762}
d6b0e80f 4763
089436f7
TV
4764/* See linux-nat.h. */
4765
4766int
4767lin_thread_get_thread_signal (unsigned int i)
4768{
4769 gdb_assert (i < lin_thread_get_thread_signal_num ());
4770 return lin_thread_signals[i];
d6b0e80f 4771}