]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/linux-nat.c
[gdb] Fix segfault in for_each_block, part 1
[thirdparty/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
213516ef 3 Copyright (C) 2001-2023 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
45741a9c 22#include "infrun.h"
3993f6b1 23#include "target.h"
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
268a13a5 26#include "gdbsupport/gdb_wait.h"
d6b0e80f
AC
27#include <unistd.h>
28#include <sys/syscall.h>
5826e159 29#include "nat/gdb_ptrace.h"
0274a8ce 30#include "linux-nat.h"
125f8a3d
GB
31#include "nat/linux-ptrace.h"
32#include "nat/linux-procfs.h"
8cc73a39 33#include "nat/linux-personality.h"
ac264b3b 34#include "linux-fork.h"
d6b0e80f
AC
35#include "gdbthread.h"
36#include "gdbcmd.h"
37#include "regcache.h"
4f844a66 38#include "regset.h"
dab06dbe 39#include "inf-child.h"
10d6c8cd
DJ
40#include "inf-ptrace.h"
41#include "auxv.h"
ef0f16cc
TT
42#include <sys/procfs.h>
43#include "elf-bfd.h"
44#include "gregset.h"
45#include "gdbcore.h"
46#include <ctype.h>
47#include <sys/stat.h>
48#include <fcntl.h>
b84876c2 49#include "inf-loop.h"
400b5eca 50#include "gdbsupport/event-loop.h"
b84876c2 51#include "event-top.h"
07e059b5
VP
52#include <pwd.h>
53#include <sys/types.h>
2978b111 54#include <dirent.h>
07e059b5 55#include "xml-support.h"
efcbbd14 56#include <sys/vfs.h>
6c95b8df 57#include "solib.h"
125f8a3d 58#include "nat/linux-osdata.h"
6432734d 59#include "linux-tdep.h"
7dcd53a0 60#include "symfile.h"
268a13a5 61#include "gdbsupport/agent.h"
5808517f 62#include "tracepoint.h"
6ecd4729 63#include "target-descriptions.h"
268a13a5 64#include "gdbsupport/filestuff.h"
77e371c0 65#include "objfiles.h"
7a6a1731 66#include "nat/linux-namespaces.h"
b146ba14 67#include "gdbsupport/block-signals.h"
268a13a5
TT
68#include "gdbsupport/fileio.h"
69#include "gdbsupport/scope-exit.h"
21987b9c 70#include "gdbsupport/gdb-sigmask.h"
ba988419 71#include "gdbsupport/common-debug.h"
8a89ddbd 72#include <unordered_map>
efcbbd14 73
1777feb0 74/* This comment documents high-level logic of this file.
8a77dff3
VP
75
76Waiting for events in sync mode
77===============================
78
4a6ed09b
PA
79When waiting for an event in a specific thread, we just use waitpid,
80passing the specific pid, and not passing WNOHANG.
81
82When waiting for an event in all threads, waitpid is not quite good:
83
84- If the thread group leader exits while other threads in the thread
85 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
86 return an exit status until the other threads in the group are
87 reaped.
88
89- When a non-leader thread execs, that thread just vanishes without
90 reporting an exit (so we'd hang if we waited for it explicitly in
91 that case). The exec event is instead reported to the TGID pid.
92
93The solution is to always use -1 and WNOHANG, together with
94sigsuspend.
95
96First, we use non-blocking waitpid to check for events. If nothing is
97found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
98it means something happened to a child process. As soon as we know
99there's an event, we get back to calling nonblocking waitpid.
100
101Note that SIGCHLD should be blocked between waitpid and sigsuspend
102calls, so that we don't miss a signal. If SIGCHLD arrives in between,
103when it's blocked, the signal becomes pending and sigsuspend
104immediately notices it and returns.
105
106Waiting for events in async mode (TARGET_WNOHANG)
107=================================================
8a77dff3 108
7feb7d06
PA
109In async mode, GDB should always be ready to handle both user input
110and target events, so neither blocking waitpid nor sigsuspend are
111viable options. Instead, we should asynchronously notify the GDB main
112event loop whenever there's an unprocessed event from the target. We
113detect asynchronous target events by handling SIGCHLD signals. To
c150bdf0
JB
114notify the event loop about target events, an event pipe is used
115--- the pipe is registered as waitable event source in the event loop,
7feb7d06 116the event loop select/poll's on the read end of this pipe (as well on
c150bdf0
JB
117other event sources, e.g., stdin), and the SIGCHLD handler marks the
118event pipe to raise an event. This is more portable than relying on
7feb7d06
PA
119pselect/ppoll, since on kernels that lack those syscalls, libc
120emulates them with select/poll+sigprocmask, and that is racy
121(a.k.a. plain broken).
122
123Obviously, if we fail to notify the event loop if there's a target
124event, it's bad. OTOH, if we notify the event loop when there's no
125event from the target, linux_nat_wait will detect that there's no real
126event to report, and return event of type TARGET_WAITKIND_IGNORE.
127This is mostly harmless, but it will waste time and is better avoided.
128
129The main design point is that every time GDB is outside linux-nat.c,
130we have a SIGCHLD handler installed that is called when something
131happens to the target and notifies the GDB event loop. Whenever GDB
132core decides to handle the event, and calls into linux-nat.c, we
133process things as in sync mode, except that the we never block in
134sigsuspend.
135
136While processing an event, we may end up momentarily blocked in
137waitpid calls. Those waitpid calls, while blocking, are guarantied to
138return quickly. E.g., in all-stop mode, before reporting to the core
139that an LWP hit a breakpoint, all LWPs are stopped by sending them
140SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141Note that this is different from blocking indefinitely waiting for the
142next event --- here, we're already handling an event.
8a77dff3
VP
143
144Use of signals
145==============
146
147We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
148signal is not entirely significant; we just need for a signal to be delivered,
149so that we can intercept it. SIGSTOP's advantage is that it can not be
150blocked. A disadvantage is that it is not a real-time signal, so it can only
151be queued once; we do not keep track of other sources of SIGSTOP.
152
153Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
154use them, because they have special behavior when the signal is generated -
155not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
156kills the entire thread group.
157
158A delivered SIGSTOP would stop the entire thread group, not just the thread we
159tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
160cancel it (by PTRACE_CONT without passing SIGSTOP).
161
162We could use a real-time signal instead. This would solve those problems; we
163could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165generates it, and there are races with trying to find a signal that is not
4a6ed09b
PA
166blocked.
167
168Exec events
169===========
170
171The case of a thread group (process) with 3 or more threads, and a
172thread other than the leader execs is worth detailing:
173
174On an exec, the Linux kernel destroys all threads except the execing
175one in the thread group, and resets the execing thread's tid to the
176tgid. No exit notification is sent for the execing thread -- from the
177ptracer's perspective, it appears as though the execing thread just
178vanishes. Until we reap all other threads except the leader and the
179execing thread, the leader will be zombie, and the execing thread will
180be in `D (disc sleep)' state. As soon as all other threads are
181reaped, the execing thread changes its tid to the tgid, and the
182previous (zombie) leader vanishes, giving place to the "new"
183leader. */
a0ef4274 184
dba24537
AC
185#ifndef O_LARGEFILE
186#define O_LARGEFILE 0
187#endif
0274a8ce 188
f6ac5f3d
PA
189struct linux_nat_target *linux_target;
190
433bbbf8 191/* Does the current host support PTRACE_GETREGSET? */
0bdb2f78 192enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
433bbbf8 193
b6e52a0b
AB
194/* When true, print debug messages relating to the linux native target. */
195
196static bool debug_linux_nat;
197
8864ef42 198/* Implement 'show debug linux-nat'. */
b6e52a0b 199
920d2a44
AC
200static void
201show_debug_linux_nat (struct ui_file *file, int from_tty,
202 struct cmd_list_element *c, const char *value)
203{
6cb06a8c
TT
204 gdb_printf (file, _("Debugging of GNU/Linux native targets is %s.\n"),
205 value);
920d2a44 206}
d6b0e80f 207
17417fb0 208/* Print a linux-nat debug statement. */
9327494e
SM
209
210#define linux_nat_debug_printf(fmt, ...) \
74b773fc 211 debug_prefixed_printf_cond (debug_linux_nat, "linux-nat", fmt, ##__VA_ARGS__)
9327494e 212
b6e52a0b
AB
213/* Print "linux-nat" enter/exit debug statements. */
214
215#define LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT \
216 scoped_debug_enter_exit (debug_linux_nat, "linux-nat")
217
ae087d01
DJ
218struct simple_pid_list
219{
220 int pid;
3d799a95 221 int status;
ae087d01
DJ
222 struct simple_pid_list *next;
223};
05c309a8 224static struct simple_pid_list *stopped_pids;
ae087d01 225
aa01bd36
PA
226/* Whether target_thread_events is in effect. */
227static int report_thread_events;
228
7feb7d06
PA
229static int kill_lwp (int lwpid, int signo);
230
d3a70e03 231static int stop_callback (struct lwp_info *lp);
7feb7d06
PA
232
233static void block_child_signals (sigset_t *prev_mask);
234static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
235
236struct lwp_info;
237static struct lwp_info *add_lwp (ptid_t ptid);
238static void purge_lwp_list (int pid);
4403d8e9 239static void delete_lwp (ptid_t ptid);
2277426b
PA
240static struct lwp_info *find_lwp_pid (ptid_t ptid);
241
8a99810d
PA
242static int lwp_status_pending_p (struct lwp_info *lp);
243
e7ad2f14
PA
244static void save_stop_reason (struct lwp_info *lp);
245
1bcb0708 246static bool proc_mem_file_is_writable ();
8a89ddbd
PA
247static void close_proc_mem_file (pid_t pid);
248static void open_proc_mem_file (ptid_t ptid);
05c06f31 249
6cf20c46
PA
250/* Return TRUE if LWP is the leader thread of the process. */
251
252static bool
253is_leader (lwp_info *lp)
254{
255 return lp->ptid.pid () == lp->ptid.lwp ();
256}
257
57573e54
PA
258/* Convert an LWP's pending status to a std::string. */
259
260static std::string
261pending_status_str (lwp_info *lp)
262{
263 gdb_assert (lwp_status_pending_p (lp));
264
265 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
266 return lp->waitstatus.to_string ();
267 else
268 return status_to_str (lp->status);
269}
270
a51e14ef
PA
271/* Return true if we should report exit events for LP. */
272
273static bool
274report_exit_events_for (lwp_info *lp)
275{
276 thread_info *thr = linux_target->find_thread (lp->ptid);
277 gdb_assert (thr != nullptr);
278
279 return (report_thread_events
280 || (thr->thread_options () & GDB_THREAD_OPTION_EXIT) != 0);
281}
282
cff068da
GB
283\f
284/* LWP accessors. */
285
286/* See nat/linux-nat.h. */
287
288ptid_t
289ptid_of_lwp (struct lwp_info *lwp)
290{
291 return lwp->ptid;
292}
293
294/* See nat/linux-nat.h. */
295
4b134ca1
GB
296void
297lwp_set_arch_private_info (struct lwp_info *lwp,
298 struct arch_lwp_info *info)
299{
300 lwp->arch_private = info;
301}
302
303/* See nat/linux-nat.h. */
304
305struct arch_lwp_info *
306lwp_arch_private_info (struct lwp_info *lwp)
307{
308 return lwp->arch_private;
309}
310
311/* See nat/linux-nat.h. */
312
cff068da
GB
313int
314lwp_is_stopped (struct lwp_info *lwp)
315{
316 return lwp->stopped;
317}
318
319/* See nat/linux-nat.h. */
320
321enum target_stop_reason
322lwp_stop_reason (struct lwp_info *lwp)
323{
324 return lwp->stop_reason;
325}
326
0e00e962
AA
327/* See nat/linux-nat.h. */
328
329int
330lwp_is_stepping (struct lwp_info *lwp)
331{
332 return lwp->step;
333}
334
ae087d01
DJ
335\f
336/* Trivial list manipulation functions to keep track of a list of
337 new stopped processes. */
338static void
3d799a95 339add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01 340{
8d749320 341 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
e0881a8e 342
ae087d01 343 new_pid->pid = pid;
3d799a95 344 new_pid->status = status;
ae087d01
DJ
345 new_pid->next = *listp;
346 *listp = new_pid;
347}
348
349static int
46a96992 350pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
351{
352 struct simple_pid_list **p;
353
354 for (p = listp; *p != NULL; p = &(*p)->next)
355 if ((*p)->pid == pid)
356 {
357 struct simple_pid_list *next = (*p)->next;
e0881a8e 358
46a96992 359 *statusp = (*p)->status;
ae087d01
DJ
360 xfree (*p);
361 *p = next;
362 return 1;
363 }
364 return 0;
365}
366
de0d863e
DB
367/* Return the ptrace options that we want to try to enable. */
368
369static int
370linux_nat_ptrace_options (int attached)
371{
372 int options = 0;
373
374 if (!attached)
375 options |= PTRACE_O_EXITKILL;
376
377 options |= (PTRACE_O_TRACESYSGOOD
378 | PTRACE_O_TRACEVFORKDONE
379 | PTRACE_O_TRACEVFORK
380 | PTRACE_O_TRACEFORK
381 | PTRACE_O_TRACEEXEC);
382
383 return options;
384}
385
1b919490
VB
386/* Initialize ptrace and procfs warnings and check for supported
387 ptrace features given PID.
beed38b8
JB
388
389 ATTACHED should be nonzero iff we attached to the inferior. */
3993f6b1
DJ
390
391static void
1b919490 392linux_init_ptrace_procfs (pid_t pid, int attached)
3993f6b1 393{
de0d863e
DB
394 int options = linux_nat_ptrace_options (attached);
395
396 linux_enable_event_reporting (pid, options);
96d7229d 397 linux_ptrace_init_warnings ();
1b919490 398 linux_proc_init_warnings ();
9dff6a5d 399 proc_mem_file_is_writable ();
4de4c07c
DJ
400}
401
f6ac5f3d
PA
402linux_nat_target::~linux_nat_target ()
403{}
404
405void
406linux_nat_target::post_attach (int pid)
4de4c07c 407{
1b919490 408 linux_init_ptrace_procfs (pid, 1);
4de4c07c
DJ
409}
410
200fd287
AB
411/* Implement the virtual inf_ptrace_target::post_startup_inferior method. */
412
f6ac5f3d
PA
413void
414linux_nat_target::post_startup_inferior (ptid_t ptid)
4de4c07c 415{
1b919490 416 linux_init_ptrace_procfs (ptid.pid (), 0);
4de4c07c
DJ
417}
418
4403d8e9
JK
419/* Return the number of known LWPs in the tgid given by PID. */
420
421static int
422num_lwps (int pid)
423{
424 int count = 0;
4403d8e9 425
901b9821 426 for (const lwp_info *lp ATTRIBUTE_UNUSED : all_lwps ())
e99b03dc 427 if (lp->ptid.pid () == pid)
4403d8e9
JK
428 count++;
429
430 return count;
431}
432
169bb27b 433/* Deleter for lwp_info unique_ptr specialisation. */
4403d8e9 434
169bb27b 435struct lwp_deleter
4403d8e9 436{
169bb27b
AB
437 void operator() (struct lwp_info *lwp) const
438 {
439 delete_lwp (lwp->ptid);
440 }
441};
4403d8e9 442
169bb27b
AB
443/* A unique_ptr specialisation for lwp_info. */
444
445typedef std::unique_ptr<struct lwp_info, lwp_deleter> lwp_info_up;
4403d8e9 446
82d1f134 447/* Target hook for follow_fork. */
d83ad864 448
e97007b6 449void
82d1f134
SM
450linux_nat_target::follow_fork (inferior *child_inf, ptid_t child_ptid,
451 target_waitkind fork_kind, bool follow_child,
452 bool detach_fork)
3993f6b1 453{
82d1f134
SM
454 inf_ptrace_target::follow_fork (child_inf, child_ptid, fork_kind,
455 follow_child, detach_fork);
456
d83ad864 457 if (!follow_child)
4de4c07c 458 {
3a849a34
SM
459 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
460 ptid_t parent_ptid = inferior_ptid;
3a849a34
SM
461 int parent_pid = parent_ptid.lwp ();
462 int child_pid = child_ptid.lwp ();
4de4c07c 463
1777feb0 464 /* We're already attached to the parent, by default. */
3a849a34 465 lwp_info *child_lp = add_lwp (child_ptid);
d83ad864
DB
466 child_lp->stopped = 1;
467 child_lp->last_resume_kind = resume_stop;
4de4c07c 468
ac264b3b
MS
469 /* Detach new forked process? */
470 if (detach_fork)
f75c00e4 471 {
95347337
AB
472 int child_stop_signal = 0;
473 bool detach_child = true;
4403d8e9 474
169bb27b
AB
475 /* Move CHILD_LP into a unique_ptr and clear the source pointer
476 to prevent us doing anything stupid with it. */
477 lwp_info_up child_lp_ptr (child_lp);
478 child_lp = nullptr;
479
480 linux_target->low_prepare_to_resume (child_lp_ptr.get ());
c077881a
HZ
481
482 /* When debugging an inferior in an architecture that supports
483 hardware single stepping on a kernel without commit
484 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
485 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
486 set if the parent process had them set.
487 To work around this, single step the child process
488 once before detaching to clear the flags. */
489
2fd9d7ca
PA
490 /* Note that we consult the parent's architecture instead of
491 the child's because there's no inferior for the child at
492 this point. */
c077881a 493 if (!gdbarch_software_single_step_p (target_thread_architecture
2fd9d7ca 494 (parent_ptid)))
c077881a 495 {
95347337
AB
496 int status;
497
c077881a
HZ
498 linux_disable_event_reporting (child_pid);
499 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
500 perror_with_name (_("Couldn't do single step"));
501 if (my_waitpid (child_pid, &status, 0) < 0)
502 perror_with_name (_("Couldn't wait vfork process"));
95347337
AB
503 else
504 {
505 detach_child = WIFSTOPPED (status);
506 child_stop_signal = WSTOPSIG (status);
507 }
c077881a
HZ
508 }
509
95347337 510 if (detach_child)
9caaaa83 511 {
95347337 512 int signo = child_stop_signal;
9caaaa83 513
9caaaa83
PA
514 if (signo != 0
515 && !signal_pass_state (gdb_signal_from_host (signo)))
516 signo = 0;
517 ptrace (PTRACE_DETACH, child_pid, 0, signo);
8a89ddbd
PA
518
519 close_proc_mem_file (child_pid);
9caaaa83 520 }
ac264b3b 521 }
9016a515
DJ
522
523 if (has_vforked)
524 {
a2885186
SM
525 lwp_info *parent_lp = find_lwp_pid (parent_ptid);
526 linux_nat_debug_printf ("waiting for VFORK_DONE on %d", parent_pid);
527 parent_lp->stopped = 1;
6c95b8df 528
a2885186
SM
529 /* We'll handle the VFORK_DONE event like any other
530 event, in target_wait. */
9016a515 531 }
4de4c07c 532 }
3993f6b1 533 else
4de4c07c 534 {
3ced3da4 535 struct lwp_info *child_lp;
4de4c07c 536
82d1f134 537 child_lp = add_lwp (child_ptid);
3ced3da4 538 child_lp->stopped = 1;
25289eb2 539 child_lp->last_resume_kind = resume_stop;
4de4c07c 540 }
4de4c07c
DJ
541}
542
4de4c07c 543\f
f6ac5f3d
PA
544int
545linux_nat_target::insert_fork_catchpoint (int pid)
4de4c07c 546{
a2885186 547 return 0;
3993f6b1
DJ
548}
549
f6ac5f3d
PA
550int
551linux_nat_target::remove_fork_catchpoint (int pid)
eb73ad13
PA
552{
553 return 0;
554}
555
f6ac5f3d
PA
556int
557linux_nat_target::insert_vfork_catchpoint (int pid)
3993f6b1 558{
a2885186 559 return 0;
3993f6b1
DJ
560}
561
f6ac5f3d
PA
562int
563linux_nat_target::remove_vfork_catchpoint (int pid)
eb73ad13
PA
564{
565 return 0;
566}
567
f6ac5f3d
PA
568int
569linux_nat_target::insert_exec_catchpoint (int pid)
3993f6b1 570{
a2885186 571 return 0;
3993f6b1
DJ
572}
573
f6ac5f3d
PA
574int
575linux_nat_target::remove_exec_catchpoint (int pid)
eb73ad13
PA
576{
577 return 0;
578}
579
f6ac5f3d
PA
580int
581linux_nat_target::set_syscall_catchpoint (int pid, bool needed, int any_count,
582 gdb::array_view<const int> syscall_counts)
a96d9b2e 583{
a96d9b2e
SDJ
584 /* On GNU/Linux, we ignore the arguments. It means that we only
585 enable the syscall catchpoints, but do not disable them.
77b06cd7 586
649a140c 587 Also, we do not use the `syscall_counts' information because we do not
a96d9b2e
SDJ
588 filter system calls here. We let GDB do the logic for us. */
589 return 0;
590}
591
774113b0
PA
592/* List of known LWPs, keyed by LWP PID. This speeds up the common
593 case of mapping a PID returned from the kernel to our corresponding
594 lwp_info data structure. */
595static htab_t lwp_lwpid_htab;
596
597/* Calculate a hash from a lwp_info's LWP PID. */
598
599static hashval_t
600lwp_info_hash (const void *ap)
601{
602 const struct lwp_info *lp = (struct lwp_info *) ap;
e38504b3 603 pid_t pid = lp->ptid.lwp ();
774113b0
PA
604
605 return iterative_hash_object (pid, 0);
606}
607
608/* Equality function for the lwp_info hash table. Compares the LWP's
609 PID. */
610
611static int
612lwp_lwpid_htab_eq (const void *a, const void *b)
613{
614 const struct lwp_info *entry = (const struct lwp_info *) a;
615 const struct lwp_info *element = (const struct lwp_info *) b;
616
e38504b3 617 return entry->ptid.lwp () == element->ptid.lwp ();
774113b0
PA
618}
619
620/* Create the lwp_lwpid_htab hash table. */
621
622static void
623lwp_lwpid_htab_create (void)
624{
625 lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL);
626}
627
628/* Add LP to the hash table. */
629
630static void
631lwp_lwpid_htab_add_lwp (struct lwp_info *lp)
632{
633 void **slot;
634
635 slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT);
636 gdb_assert (slot != NULL && *slot == NULL);
637 *slot = lp;
638}
639
640/* Head of doubly-linked list of known LWPs. Sorted by reverse
641 creation order. This order is assumed in some cases. E.g.,
642 reaping status after killing alls lwps of a process: the leader LWP
643 must be reaped last. */
901b9821
SM
644
645static intrusive_list<lwp_info> lwp_list;
646
647/* See linux-nat.h. */
648
649lwp_info_range
650all_lwps ()
651{
652 return lwp_info_range (lwp_list.begin ());
653}
654
655/* See linux-nat.h. */
656
657lwp_info_safe_range
658all_lwps_safe ()
659{
660 return lwp_info_safe_range (lwp_list.begin ());
661}
774113b0
PA
662
663/* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
664
665static void
666lwp_list_add (struct lwp_info *lp)
667{
901b9821 668 lwp_list.push_front (*lp);
774113b0
PA
669}
670
671/* Remove LP from sorted-by-reverse-creation-order doubly-linked
672 list. */
673
674static void
675lwp_list_remove (struct lwp_info *lp)
676{
677 /* Remove from sorted-by-creation-order list. */
901b9821 678 lwp_list.erase (lwp_list.iterator_to (*lp));
774113b0
PA
679}
680
d6b0e80f
AC
681\f
682
d6b0e80f
AC
683/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
684 _initialize_linux_nat. */
685static sigset_t suspend_mask;
686
7feb7d06
PA
687/* Signals to block to make that sigsuspend work. */
688static sigset_t blocked_mask;
689
690/* SIGCHLD action. */
6bd434d6 691static struct sigaction sigchld_action;
b84876c2 692
7feb7d06
PA
693/* Block child signals (SIGCHLD and linux threads signals), and store
694 the previous mask in PREV_MASK. */
84e46146 695
7feb7d06
PA
696static void
697block_child_signals (sigset_t *prev_mask)
698{
699 /* Make sure SIGCHLD is blocked. */
700 if (!sigismember (&blocked_mask, SIGCHLD))
701 sigaddset (&blocked_mask, SIGCHLD);
702
21987b9c 703 gdb_sigmask (SIG_BLOCK, &blocked_mask, prev_mask);
7feb7d06
PA
704}
705
706/* Restore child signals mask, previously returned by
707 block_child_signals. */
708
709static void
710restore_child_signals_mask (sigset_t *prev_mask)
711{
21987b9c 712 gdb_sigmask (SIG_SETMASK, prev_mask, NULL);
7feb7d06 713}
2455069d
UW
714
715/* Mask of signals to pass directly to the inferior. */
716static sigset_t pass_mask;
717
718/* Update signals to pass to the inferior. */
f6ac5f3d 719void
adc6a863
PA
720linux_nat_target::pass_signals
721 (gdb::array_view<const unsigned char> pass_signals)
2455069d
UW
722{
723 int signo;
724
725 sigemptyset (&pass_mask);
726
727 for (signo = 1; signo < NSIG; signo++)
728 {
2ea28649 729 int target_signo = gdb_signal_from_host (signo);
adc6a863 730 if (target_signo < pass_signals.size () && pass_signals[target_signo])
dda83cd7 731 sigaddset (&pass_mask, signo);
2455069d
UW
732 }
733}
734
d6b0e80f
AC
735\f
736
737/* Prototypes for local functions. */
d3a70e03
TT
738static int stop_wait_callback (struct lwp_info *lp);
739static int resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid);
ced2dffb 740static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
710151dd 741
d6b0e80f 742\f
d6b0e80f 743
7b50312a
PA
744/* Destroy and free LP. */
745
676362df 746lwp_info::~lwp_info ()
7b50312a 747{
466eecee 748 /* Let the arch specific bits release arch_lwp_info. */
676362df 749 linux_target->low_delete_thread (this->arch_private);
7b50312a
PA
750}
751
774113b0 752/* Traversal function for purge_lwp_list. */
d90e17a7 753
774113b0
PA
754static int
755lwp_lwpid_htab_remove_pid (void **slot, void *info)
d90e17a7 756{
774113b0
PA
757 struct lwp_info *lp = (struct lwp_info *) *slot;
758 int pid = *(int *) info;
d90e17a7 759
e99b03dc 760 if (lp->ptid.pid () == pid)
d90e17a7 761 {
774113b0
PA
762 htab_clear_slot (lwp_lwpid_htab, slot);
763 lwp_list_remove (lp);
676362df 764 delete lp;
774113b0 765 }
d90e17a7 766
774113b0
PA
767 return 1;
768}
d90e17a7 769
774113b0
PA
770/* Remove all LWPs belong to PID from the lwp list. */
771
772static void
773purge_lwp_list (int pid)
774{
775 htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid);
d90e17a7
PA
776}
777
26cb8b7c
PA
778/* Add the LWP specified by PTID to the list. PTID is the first LWP
779 in the process. Return a pointer to the structure describing the
780 new LWP.
781
782 This differs from add_lwp in that we don't let the arch specific
783 bits know about this new thread. Current clients of this callback
784 take the opportunity to install watchpoints in the new thread, and
785 we shouldn't do that for the first thread. If we're spawning a
786 child ("run"), the thread executes the shell wrapper first, and we
787 shouldn't touch it until it execs the program we want to debug.
788 For "attach", it'd be okay to call the callback, but it's not
789 necessary, because watchpoints can't yet have been inserted into
790 the inferior. */
d6b0e80f
AC
791
792static struct lwp_info *
26cb8b7c 793add_initial_lwp (ptid_t ptid)
d6b0e80f 794{
15a9e13e 795 gdb_assert (ptid.lwp_p ());
d6b0e80f 796
b0f6c8d2 797 lwp_info *lp = new lwp_info (ptid);
d6b0e80f 798
d6b0e80f 799
774113b0
PA
800 /* Add to sorted-by-reverse-creation-order list. */
801 lwp_list_add (lp);
802
803 /* Add to keyed-by-pid htab. */
804 lwp_lwpid_htab_add_lwp (lp);
d6b0e80f 805
26cb8b7c
PA
806 return lp;
807}
808
809/* Add the LWP specified by PID to the list. Return a pointer to the
810 structure describing the new LWP. The LWP should already be
811 stopped. */
812
813static struct lwp_info *
814add_lwp (ptid_t ptid)
815{
816 struct lwp_info *lp;
817
818 lp = add_initial_lwp (ptid);
819
6e012a6c
PA
820 /* Let the arch specific bits know about this new thread. Current
821 clients of this callback take the opportunity to install
26cb8b7c
PA
822 watchpoints in the new thread. We don't do this for the first
823 thread though. See add_initial_lwp. */
135340af 824 linux_target->low_new_thread (lp);
9f0bdab8 825
d6b0e80f
AC
826 return lp;
827}
828
829/* Remove the LWP specified by PID from the list. */
830
831static void
832delete_lwp (ptid_t ptid)
833{
b0f6c8d2 834 lwp_info dummy (ptid);
d6b0e80f 835
b0f6c8d2 836 void **slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
774113b0
PA
837 if (slot == NULL)
838 return;
d6b0e80f 839
b0f6c8d2 840 lwp_info *lp = *(struct lwp_info **) slot;
774113b0 841 gdb_assert (lp != NULL);
d6b0e80f 842
774113b0 843 htab_clear_slot (lwp_lwpid_htab, slot);
d6b0e80f 844
774113b0
PA
845 /* Remove from sorted-by-creation-order list. */
846 lwp_list_remove (lp);
d6b0e80f 847
774113b0 848 /* Release. */
676362df 849 delete lp;
d6b0e80f
AC
850}
851
852/* Return a pointer to the structure describing the LWP corresponding
853 to PID. If no corresponding LWP could be found, return NULL. */
854
855static struct lwp_info *
856find_lwp_pid (ptid_t ptid)
857{
d6b0e80f
AC
858 int lwp;
859
15a9e13e 860 if (ptid.lwp_p ())
e38504b3 861 lwp = ptid.lwp ();
d6b0e80f 862 else
e99b03dc 863 lwp = ptid.pid ();
d6b0e80f 864
b0f6c8d2
SM
865 lwp_info dummy (ptid_t (0, lwp));
866 return (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
d6b0e80f
AC
867}
868
6d4ee8c6 869/* See nat/linux-nat.h. */
d6b0e80f
AC
870
871struct lwp_info *
d90e17a7 872iterate_over_lwps (ptid_t filter,
d3a70e03 873 gdb::function_view<iterate_over_lwps_ftype> callback)
d6b0e80f 874{
901b9821 875 for (lwp_info *lp : all_lwps_safe ())
d6b0e80f 876 {
26a57c92 877 if (lp->ptid.matches (filter))
d90e17a7 878 {
d3a70e03 879 if (callback (lp) != 0)
d90e17a7
PA
880 return lp;
881 }
d6b0e80f
AC
882 }
883
884 return NULL;
885}
886
2277426b
PA
887/* Update our internal state when changing from one checkpoint to
888 another indicated by NEW_PTID. We can only switch single-threaded
889 applications, so we only create one new LWP, and the previous list
890 is discarded. */
f973ed9c
DJ
891
892void
893linux_nat_switch_fork (ptid_t new_ptid)
894{
895 struct lwp_info *lp;
896
e99b03dc 897 purge_lwp_list (inferior_ptid.pid ());
2277426b 898
f973ed9c
DJ
899 lp = add_lwp (new_ptid);
900 lp->stopped = 1;
e26af52f 901
2277426b
PA
902 /* This changes the thread's ptid while preserving the gdb thread
903 num. Also changes the inferior pid, while preserving the
904 inferior num. */
5b6d1e4f 905 thread_change_ptid (linux_target, inferior_ptid, new_ptid);
2277426b
PA
906
907 /* We've just told GDB core that the thread changed target id, but,
908 in fact, it really is a different thread, with different register
909 contents. */
910 registers_changed ();
e26af52f
DJ
911}
912
7730e5c6
PA
913/* Handle the exit of a single thread LP. If DEL_THREAD is true,
914 delete the thread_info associated to LP, if it exists. */
e26af52f
DJ
915
916static void
7730e5c6 917exit_lwp (struct lwp_info *lp, bool del_thread = true)
e26af52f 918{
9213a6d7 919 struct thread_info *th = linux_target->find_thread (lp->ptid);
063bfe2e 920
7730e5c6 921 if (th != nullptr && del_thread)
9d7d58e7 922 delete_thread (th);
e26af52f
DJ
923
924 delete_lwp (lp->ptid);
925}
926
a0ef4274
DJ
927/* Wait for the LWP specified by LP, which we have just attached to.
928 Returns a wait status for that LWP, to cache. */
929
930static int
22827c51 931linux_nat_post_attach_wait (ptid_t ptid, int *signalled)
a0ef4274 932{
e38504b3 933 pid_t new_pid, pid = ptid.lwp ();
a0ef4274
DJ
934 int status;
935
644cebc9 936 if (linux_proc_pid_is_stopped (pid))
a0ef4274 937 {
9327494e 938 linux_nat_debug_printf ("Attaching to a stopped process");
a0ef4274
DJ
939
940 /* The process is definitely stopped. It is in a job control
941 stop, unless the kernel predates the TASK_STOPPED /
942 TASK_TRACED distinction, in which case it might be in a
943 ptrace stop. Make sure it is in a ptrace stop; from there we
944 can kill it, signal it, et cetera.
945
dda83cd7 946 First make sure there is a pending SIGSTOP. Since we are
a0ef4274
DJ
947 already attached, the process can not transition from stopped
948 to running without a PTRACE_CONT; so we know this signal will
949 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
950 probably already in the queue (unless this kernel is old
951 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
952 is not an RT signal, it can only be queued once. */
953 kill_lwp (pid, SIGSTOP);
954
955 /* Finally, resume the stopped process. This will deliver the SIGSTOP
956 (or a higher priority signal, just like normal PTRACE_ATTACH). */
957 ptrace (PTRACE_CONT, pid, 0, 0);
958 }
959
960 /* Make sure the initial process is stopped. The user-level threads
961 layer might want to poke around in the inferior, and that won't
962 work if things haven't stabilized yet. */
4a6ed09b 963 new_pid = my_waitpid (pid, &status, __WALL);
dacc9cb2
PP
964 gdb_assert (pid == new_pid);
965
966 if (!WIFSTOPPED (status))
967 {
968 /* The pid we tried to attach has apparently just exited. */
9327494e 969 linux_nat_debug_printf ("Failed to stop %d: %s", pid,
8d06918f 970 status_to_str (status).c_str ());
dacc9cb2
PP
971 return status;
972 }
a0ef4274
DJ
973
974 if (WSTOPSIG (status) != SIGSTOP)
975 {
976 *signalled = 1;
9327494e 977 linux_nat_debug_printf ("Received %s after attaching",
8d06918f 978 status_to_str (status).c_str ());
a0ef4274
DJ
979 }
980
981 return status;
982}
983
f6ac5f3d
PA
984void
985linux_nat_target::create_inferior (const char *exec_file,
986 const std::string &allargs,
987 char **env, int from_tty)
b84876c2 988{
41272101
TT
989 maybe_disable_address_space_randomization restore_personality
990 (disable_randomization);
b84876c2
PA
991
992 /* The fork_child mechanism is synchronous and calls target_wait, so
993 we have to mask the async mode. */
994
2455069d 995 /* Make sure we report all signals during startup. */
adc6a863 996 pass_signals ({});
2455069d 997
f6ac5f3d 998 inf_ptrace_target::create_inferior (exec_file, allargs, env, from_tty);
8a89ddbd
PA
999
1000 open_proc_mem_file (inferior_ptid);
b84876c2
PA
1001}
1002
8784d563
PA
1003/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1004 already attached. Returns true if a new LWP is found, false
1005 otherwise. */
1006
1007static int
1008attach_proc_task_lwp_callback (ptid_t ptid)
1009{
1010 struct lwp_info *lp;
1011
1012 /* Ignore LWPs we're already attached to. */
1013 lp = find_lwp_pid (ptid);
1014 if (lp == NULL)
1015 {
e38504b3 1016 int lwpid = ptid.lwp ();
8784d563
PA
1017
1018 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1019 {
1020 int err = errno;
1021
1022 /* Be quiet if we simply raced with the thread exiting.
1023 EPERM is returned if the thread's task still exists, and
1024 is marked as exited or zombie, as well as other
1025 conditions, so in that case, confirm the status in
1026 /proc/PID/status. */
1027 if (err == ESRCH
1028 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1029 {
9327494e
SM
1030 linux_nat_debug_printf
1031 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1032 lwpid, err, safe_strerror (err));
1033
8784d563
PA
1034 }
1035 else
1036 {
4d9b86e1 1037 std::string reason
50fa3001 1038 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1 1039
f71f0b0d 1040 warning (_("Cannot attach to lwp %d: %s"),
4d9b86e1 1041 lwpid, reason.c_str ());
8784d563
PA
1042 }
1043 }
1044 else
1045 {
9327494e 1046 linux_nat_debug_printf ("PTRACE_ATTACH %s, 0, 0 (OK)",
e53c95d4 1047 ptid.to_string ().c_str ());
8784d563
PA
1048
1049 lp = add_lwp (ptid);
8784d563
PA
1050
1051 /* The next time we wait for this LWP we'll see a SIGSTOP as
1052 PTRACE_ATTACH brings it to a halt. */
1053 lp->signalled = 1;
1054
1055 /* We need to wait for a stop before being able to make the
1056 next ptrace call on this LWP. */
1057 lp->must_set_ptrace_flags = 1;
026a9174
PA
1058
1059 /* So that wait collects the SIGSTOP. */
1060 lp->resumed = 1;
1061
1062 /* Also add the LWP to gdb's thread list, in case a
1063 matching libthread_db is not found (or the process uses
1064 raw clone). */
5b6d1e4f 1065 add_thread (linux_target, lp->ptid);
719546c4
SM
1066 set_running (linux_target, lp->ptid, true);
1067 set_executing (linux_target, lp->ptid, true);
8784d563
PA
1068 }
1069
1070 return 1;
1071 }
1072 return 0;
1073}
1074
f6ac5f3d
PA
1075void
1076linux_nat_target::attach (const char *args, int from_tty)
d6b0e80f
AC
1077{
1078 struct lwp_info *lp;
d6b0e80f 1079 int status;
af990527 1080 ptid_t ptid;
d6b0e80f 1081
2455069d 1082 /* Make sure we report all signals during attach. */
adc6a863 1083 pass_signals ({});
2455069d 1084
a70b8144 1085 try
87b0bb13 1086 {
f6ac5f3d 1087 inf_ptrace_target::attach (args, from_tty);
87b0bb13 1088 }
230d2906 1089 catch (const gdb_exception_error &ex)
87b0bb13
JK
1090 {
1091 pid_t pid = parse_pid_to_attach (args);
50fa3001 1092 std::string reason = linux_ptrace_attach_fail_reason (pid);
87b0bb13 1093
4d9b86e1 1094 if (!reason.empty ())
3d6e9d23
TT
1095 throw_error (ex.error, "warning: %s\n%s", reason.c_str (),
1096 ex.what ());
7ae1a6a6 1097 else
3d6e9d23 1098 throw_error (ex.error, "%s", ex.what ());
87b0bb13 1099 }
d6b0e80f 1100
af990527
PA
1101 /* The ptrace base target adds the main thread with (pid,0,0)
1102 format. Decorate it with lwp info. */
e99b03dc 1103 ptid = ptid_t (inferior_ptid.pid (),
184ea2f7 1104 inferior_ptid.pid ());
5b6d1e4f 1105 thread_change_ptid (linux_target, inferior_ptid, ptid);
af990527 1106
9f0bdab8 1107 /* Add the initial process as the first LWP to the list. */
26cb8b7c 1108 lp = add_initial_lwp (ptid);
a0ef4274 1109
22827c51 1110 status = linux_nat_post_attach_wait (lp->ptid, &lp->signalled);
dacc9cb2
PP
1111 if (!WIFSTOPPED (status))
1112 {
1113 if (WIFEXITED (status))
1114 {
1115 int exit_code = WEXITSTATUS (status);
1116
223ffa71 1117 target_terminal::ours ();
bc1e6c81 1118 target_mourn_inferior (inferior_ptid);
dacc9cb2
PP
1119 if (exit_code == 0)
1120 error (_("Unable to attach: program exited normally."));
1121 else
1122 error (_("Unable to attach: program exited with code %d."),
1123 exit_code);
1124 }
1125 else if (WIFSIGNALED (status))
1126 {
2ea28649 1127 enum gdb_signal signo;
dacc9cb2 1128
223ffa71 1129 target_terminal::ours ();
bc1e6c81 1130 target_mourn_inferior (inferior_ptid);
dacc9cb2 1131
2ea28649 1132 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1133 error (_("Unable to attach: program terminated with signal "
1134 "%s, %s."),
2ea28649
PA
1135 gdb_signal_to_name (signo),
1136 gdb_signal_to_string (signo));
dacc9cb2
PP
1137 }
1138
f34652de 1139 internal_error (_("unexpected status %d for PID %ld"),
e38504b3 1140 status, (long) ptid.lwp ());
dacc9cb2
PP
1141 }
1142
a0ef4274 1143 lp->stopped = 1;
9f0bdab8 1144
8a89ddbd
PA
1145 open_proc_mem_file (lp->ptid);
1146
a0ef4274 1147 /* Save the wait status to report later. */
d6b0e80f 1148 lp->resumed = 1;
9327494e 1149 linux_nat_debug_printf ("waitpid %ld, saving status %s",
8d06918f
SM
1150 (long) lp->ptid.pid (),
1151 status_to_str (status).c_str ());
710151dd 1152
7feb7d06
PA
1153 lp->status = status;
1154
8784d563
PA
1155 /* We must attach to every LWP. If /proc is mounted, use that to
1156 find them now. The inferior may be using raw clone instead of
1157 using pthreads. But even if it is using pthreads, thread_db
1158 walks structures in the inferior's address space to find the list
1159 of threads/LWPs, and those structures may well be corrupted.
1160 Note that once thread_db is loaded, we'll still use it to list
1161 threads and associate pthread info with each LWP. */
e99b03dc 1162 linux_proc_attach_tgid_threads (lp->ptid.pid (),
8784d563 1163 attach_proc_task_lwp_callback);
d6b0e80f
AC
1164}
1165
4a3ee32a
SM
1166/* Ptrace-detach the thread with pid PID. */
1167
1168static void
1169detach_one_pid (int pid, int signo)
1170{
1171 if (ptrace (PTRACE_DETACH, pid, 0, signo) < 0)
1172 {
1173 int save_errno = errno;
1174
1175 /* We know the thread exists, so ESRCH must mean the lwp is
1176 zombie. This can happen if one of the already-detached
1177 threads exits the whole thread group. In that case we're
1178 still attached, and must reap the lwp. */
1179 if (save_errno == ESRCH)
1180 {
1181 int ret, status;
1182
1183 ret = my_waitpid (pid, &status, __WALL);
1184 if (ret == -1)
1185 {
1186 warning (_("Couldn't reap LWP %d while detaching: %s"),
1187 pid, safe_strerror (errno));
1188 }
1189 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1190 {
1191 warning (_("Reaping LWP %d while detaching "
1192 "returned unexpected status 0x%x"),
1193 pid, status);
1194 }
1195 }
1196 else
1197 error (_("Can't detach %d: %s"),
1198 pid, safe_strerror (save_errno));
1199 }
1200 else
1201 linux_nat_debug_printf ("PTRACE_DETACH (%d, %s, 0) (OK)",
1202 pid, strsignal (signo));
1203}
1204
ced2dffb
PA
1205/* Get pending signal of THREAD as a host signal number, for detaching
1206 purposes. This is the signal the thread last stopped for, which we
1207 need to deliver to the thread when detaching, otherwise, it'd be
1208 suppressed/lost. */
1209
a0ef4274 1210static int
ced2dffb 1211get_detach_signal (struct lwp_info *lp)
a0ef4274 1212{
a493e3e2 1213 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1214
1215 /* If we paused threads momentarily, we may have stored pending
1216 events in lp->status or lp->waitstatus (see stop_wait_callback),
1217 and GDB core hasn't seen any signal for those threads.
1218 Otherwise, the last signal reported to the core is found in the
1219 thread object's stop_signal.
1220
1221 There's a corner case that isn't handled here at present. Only
1222 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1223 stop_signal make sense as a real signal to pass to the inferior.
1224 Some catchpoint related events, like
1225 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1226 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1227 those traps are debug API (ptrace in our case) related and
1228 induced; the inferior wouldn't see them if it wasn't being
1229 traced. Hence, we should never pass them to the inferior, even
1230 when set to pass state. Since this corner case isn't handled by
1231 infrun.c when proceeding with a signal, for consistency, neither
1232 do we handle it here (or elsewhere in the file we check for
1233 signal pass state). Normally SIGTRAP isn't set to pass state, so
1234 this is really a corner case. */
1235
183be222 1236 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
a493e3e2 1237 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1238 else if (lp->status)
2ea28649 1239 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
00431a78 1240 else
ca2163eb 1241 {
9213a6d7 1242 thread_info *tp = linux_target->find_thread (lp->ptid);
e0881a8e 1243
611841bb 1244 if (target_is_non_stop_p () && !tp->executing ())
ca2163eb 1245 {
1edb66d8 1246 if (tp->has_pending_waitstatus ())
df5ad102
SM
1247 {
1248 /* If the thread has a pending event, and it was stopped with a
287de656 1249 signal, use that signal to resume it. If it has a pending
df5ad102
SM
1250 event of another kind, it was not stopped with a signal, so
1251 resume it without a signal. */
1252 if (tp->pending_waitstatus ().kind () == TARGET_WAITKIND_STOPPED)
1253 signo = tp->pending_waitstatus ().sig ();
1254 else
1255 signo = GDB_SIGNAL_0;
1256 }
00431a78 1257 else
1edb66d8 1258 signo = tp->stop_signal ();
00431a78
PA
1259 }
1260 else if (!target_is_non_stop_p ())
1261 {
00431a78 1262 ptid_t last_ptid;
5b6d1e4f 1263 process_stratum_target *last_target;
00431a78 1264
5b6d1e4f 1265 get_last_target_status (&last_target, &last_ptid, nullptr);
e0881a8e 1266
5b6d1e4f
PA
1267 if (last_target == linux_target
1268 && lp->ptid.lwp () == last_ptid.lwp ())
1edb66d8 1269 signo = tp->stop_signal ();
4c28f408 1270 }
ca2163eb 1271 }
4c28f408 1272
a493e3e2 1273 if (signo == GDB_SIGNAL_0)
ca2163eb 1274 {
9327494e 1275 linux_nat_debug_printf ("lwp %s has no pending signal",
e53c95d4 1276 lp->ptid.to_string ().c_str ());
ca2163eb
PA
1277 }
1278 else if (!signal_pass_state (signo))
1279 {
9327494e
SM
1280 linux_nat_debug_printf
1281 ("lwp %s had signal %s but it is in no pass state",
e53c95d4 1282 lp->ptid.to_string ().c_str (), gdb_signal_to_string (signo));
a0ef4274 1283 }
a0ef4274 1284 else
4c28f408 1285 {
9327494e 1286 linux_nat_debug_printf ("lwp %s has pending signal %s",
e53c95d4 1287 lp->ptid.to_string ().c_str (),
9327494e 1288 gdb_signal_to_string (signo));
ced2dffb
PA
1289
1290 return gdb_signal_to_host (signo);
4c28f408 1291 }
a0ef4274
DJ
1292
1293 return 0;
1294}
1295
0d36baa9 1296/* If LP has a pending fork/vfork/clone status, return it. */
ced2dffb 1297
6b09f134 1298static std::optional<target_waitstatus>
0d36baa9 1299get_pending_child_status (lwp_info *lp)
d6b0e80f 1300{
b26b06dd
AB
1301 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
1302
1303 linux_nat_debug_printf ("lwp %s (stopped = %d)",
1304 lp->ptid.to_string ().c_str (), lp->stopped);
1305
df5ad102
SM
1306 /* Check in lwp_info::status. */
1307 if (WIFSTOPPED (lp->status) && linux_is_extended_waitstatus (lp->status))
1308 {
1309 int event = linux_ptrace_get_extended_event (lp->status);
1310
0d36baa9
PA
1311 if (event == PTRACE_EVENT_FORK
1312 || event == PTRACE_EVENT_VFORK
1313 || event == PTRACE_EVENT_CLONE)
df5ad102
SM
1314 {
1315 unsigned long child_pid;
1316 int ret = ptrace (PTRACE_GETEVENTMSG, lp->ptid.lwp (), 0, &child_pid);
1317 if (ret == 0)
0d36baa9
PA
1318 {
1319 target_waitstatus ws;
1320
1321 if (event == PTRACE_EVENT_FORK)
1322 ws.set_forked (ptid_t (child_pid, child_pid));
1323 else if (event == PTRACE_EVENT_VFORK)
1324 ws.set_vforked (ptid_t (child_pid, child_pid));
1325 else if (event == PTRACE_EVENT_CLONE)
1326 ws.set_thread_cloned (ptid_t (lp->ptid.pid (), child_pid));
1327 else
1328 gdb_assert_not_reached ("unhandled");
1329
1330 return ws;
1331 }
df5ad102 1332 else
0d36baa9
PA
1333 {
1334 perror_warning_with_name (_("Failed to retrieve event msg"));
1335 return {};
1336 }
df5ad102
SM
1337 }
1338 }
1339
1340 /* Check in lwp_info::waitstatus. */
0d36baa9
PA
1341 if (is_new_child_status (lp->waitstatus.kind ()))
1342 return lp->waitstatus;
df5ad102 1343
9213a6d7 1344 thread_info *tp = linux_target->find_thread (lp->ptid);
df5ad102 1345
0d36baa9
PA
1346 /* Check in thread_info::pending_waitstatus. */
1347 if (tp->has_pending_waitstatus ()
1348 && is_new_child_status (tp->pending_waitstatus ().kind ()))
1349 return tp->pending_waitstatus ();
df5ad102
SM
1350
1351 /* Check in thread_info::pending_follow. */
0d36baa9
PA
1352 if (is_new_child_status (tp->pending_follow.kind ()))
1353 return tp->pending_follow;
df5ad102 1354
0d36baa9
PA
1355 return {};
1356}
1357
1358/* Detach from LP. If SIGNO_P is non-NULL, then it points to the
1359 signal number that should be passed to the LWP when detaching.
1360 Otherwise pass any pending signal the LWP may have, if any. */
1361
1362static void
1363detach_one_lwp (struct lwp_info *lp, int *signo_p)
1364{
1365 int lwpid = lp->ptid.lwp ();
1366 int signo;
1367
1368 /* If the lwp/thread we are about to detach has a pending fork/clone
1369 event, there is a process/thread GDB is attached to that the core
1370 of GDB doesn't know about. Detach from it. */
1371
6b09f134 1372 std::optional<target_waitstatus> ws = get_pending_child_status (lp);
0d36baa9
PA
1373 if (ws.has_value ())
1374 detach_one_pid (ws->child_ptid ().lwp (), 0);
d6b0e80f 1375
a0ef4274
DJ
1376 /* If there is a pending SIGSTOP, get rid of it. */
1377 if (lp->signalled)
d6b0e80f 1378 {
9327494e 1379 linux_nat_debug_printf ("Sending SIGCONT to %s",
e53c95d4 1380 lp->ptid.to_string ().c_str ());
d6b0e80f 1381
ced2dffb 1382 kill_lwp (lwpid, SIGCONT);
d6b0e80f 1383 lp->signalled = 0;
d6b0e80f
AC
1384 }
1385
ced2dffb 1386 if (signo_p == NULL)
d6b0e80f 1387 {
a0ef4274 1388 /* Pass on any pending signal for this LWP. */
ced2dffb
PA
1389 signo = get_detach_signal (lp);
1390 }
1391 else
1392 signo = *signo_p;
a0ef4274 1393
b26b06dd
AB
1394 linux_nat_debug_printf ("preparing to resume lwp %s (stopped = %d)",
1395 lp->ptid.to_string ().c_str (),
1396 lp->stopped);
1397
ced2dffb
PA
1398 /* Preparing to resume may try to write registers, and fail if the
1399 lwp is zombie. If that happens, ignore the error. We'll handle
1400 it below, when detach fails with ESRCH. */
a70b8144 1401 try
ced2dffb 1402 {
135340af 1403 linux_target->low_prepare_to_resume (lp);
ced2dffb 1404 }
230d2906 1405 catch (const gdb_exception_error &ex)
ced2dffb
PA
1406 {
1407 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 1408 throw;
ced2dffb 1409 }
d6b0e80f 1410
4a3ee32a 1411 detach_one_pid (lwpid, signo);
ced2dffb
PA
1412
1413 delete_lwp (lp->ptid);
1414}
d6b0e80f 1415
ced2dffb 1416static int
d3a70e03 1417detach_callback (struct lwp_info *lp)
ced2dffb
PA
1418{
1419 /* We don't actually detach from the thread group leader just yet.
1420 If the thread group exits, we must reap the zombie clone lwps
1421 before we're able to reap the leader. */
e38504b3 1422 if (lp->ptid.lwp () != lp->ptid.pid ())
ced2dffb 1423 detach_one_lwp (lp, NULL);
d6b0e80f
AC
1424 return 0;
1425}
1426
f6ac5f3d
PA
1427void
1428linux_nat_target::detach (inferior *inf, int from_tty)
d6b0e80f 1429{
b26b06dd
AB
1430 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
1431
d90e17a7 1432 struct lwp_info *main_lwp;
bc09b0c1 1433 int pid = inf->pid;
a0ef4274 1434
ae5e0686
MK
1435 /* Don't unregister from the event loop, as there may be other
1436 inferiors running. */
b84876c2 1437
4c28f408 1438 /* Stop all threads before detaching. ptrace requires that the
30baf67b 1439 thread is stopped to successfully detach. */
d3a70e03 1440 iterate_over_lwps (ptid_t (pid), stop_callback);
4c28f408
PA
1441 /* ... and wait until all of them have reported back that
1442 they're no longer running. */
d3a70e03 1443 iterate_over_lwps (ptid_t (pid), stop_wait_callback);
4c28f408 1444
e87f0fe8
PA
1445 /* We can now safely remove breakpoints. We don't this in earlier
1446 in common code because this target doesn't currently support
1447 writing memory while the inferior is running. */
1448 remove_breakpoints_inf (current_inferior ());
1449
d3a70e03 1450 iterate_over_lwps (ptid_t (pid), detach_callback);
d6b0e80f 1451
fd492bf1
AB
1452 /* We have detached from everything except the main thread now, so
1453 should only have one thread left. However, in non-stop mode the
1454 main thread might have exited, in which case we'll have no threads
1455 left. */
1456 gdb_assert (num_lwps (pid) == 1
1457 || (target_is_non_stop_p () && num_lwps (pid) == 0));
d6b0e80f 1458
7a7d3353
PA
1459 if (forks_exist_p ())
1460 {
1461 /* Multi-fork case. The current inferior_ptid is being detached
1462 from, but there are other viable forks to debug. Detach from
1463 the current fork, and context-switch to the first
1464 available. */
6bd6f3b6 1465 linux_fork_detach (from_tty);
7a7d3353
PA
1466 }
1467 else
ced2dffb 1468 {
ced2dffb
PA
1469 target_announce_detach (from_tty);
1470
fd492bf1
AB
1471 /* In non-stop mode it is possible that the main thread has exited,
1472 in which case we don't try to detach. */
1473 main_lwp = find_lwp_pid (ptid_t (pid));
1474 if (main_lwp != nullptr)
1475 {
1476 /* Pass on any pending signal for the last LWP. */
1477 int signo = get_detach_signal (main_lwp);
ced2dffb 1478
fd492bf1
AB
1479 detach_one_lwp (main_lwp, &signo);
1480 }
1481 else
1482 gdb_assert (target_is_non_stop_p ());
ced2dffb 1483
f6ac5f3d 1484 detach_success (inf);
ced2dffb 1485 }
05c06f31 1486
8a89ddbd 1487 close_proc_mem_file (pid);
d6b0e80f
AC
1488}
1489
8a99810d
PA
1490/* Resume execution of the inferior process. If STEP is nonzero,
1491 single-step it. If SIGNAL is nonzero, give it that signal. */
1492
1493static void
23f238d3
PA
1494linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1495 enum gdb_signal signo)
8a99810d 1496{
8a99810d 1497 lp->step = step;
9c02b525
PA
1498
1499 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1500 We only presently need that if the LWP is stepped though (to
1501 handle the case of stepping a breakpoint instruction). */
1502 if (step)
1503 {
5b6d1e4f 1504 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
9c02b525
PA
1505
1506 lp->stop_pc = regcache_read_pc (regcache);
1507 }
1508 else
1509 lp->stop_pc = 0;
1510
135340af 1511 linux_target->low_prepare_to_resume (lp);
f6ac5f3d 1512 linux_target->low_resume (lp->ptid, step, signo);
23f238d3
PA
1513
1514 /* Successfully resumed. Clear state that no longer makes sense,
1515 and mark the LWP as running. Must not do this before resuming
1516 otherwise if that fails other code will be confused. E.g., we'd
1517 later try to stop the LWP and hang forever waiting for a stop
1518 status. Note that we must not throw after this is cleared,
1519 otherwise handle_zombie_lwp_error would get confused. */
8a99810d 1520 lp->stopped = 0;
1ad3de98 1521 lp->core = -1;
23f238d3 1522 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
5b6d1e4f 1523 registers_changed_ptid (linux_target, lp->ptid);
8a99810d
PA
1524}
1525
23f238d3
PA
1526/* Called when we try to resume a stopped LWP and that errors out. If
1527 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1528 or about to become), discard the error, clear any pending status
1529 the LWP may have, and return true (we'll collect the exit status
1530 soon enough). Otherwise, return false. */
1531
1532static int
1533check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1534{
1535 /* If we get an error after resuming the LWP successfully, we'd
1536 confuse !T state for the LWP being gone. */
1537 gdb_assert (lp->stopped);
1538
1539 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1540 because even if ptrace failed with ESRCH, the tracee may be "not
1541 yet fully dead", but already refusing ptrace requests. In that
1542 case the tracee has 'R (Running)' state for a little bit
1543 (observed in Linux 3.18). See also the note on ESRCH in the
1544 ptrace(2) man page. Instead, check whether the LWP has any state
1545 other than ptrace-stopped. */
1546
1547 /* Don't assume anything if /proc/PID/status can't be read. */
e38504b3 1548 if (linux_proc_pid_is_trace_stopped_nowarn (lp->ptid.lwp ()) == 0)
23f238d3
PA
1549 {
1550 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1551 lp->status = 0;
183be222 1552 lp->waitstatus.set_ignore ();
23f238d3
PA
1553 return 1;
1554 }
1555 return 0;
1556}
1557
1558/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1559 disappears while we try to resume it. */
1560
1561static void
1562linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1563{
a70b8144 1564 try
23f238d3
PA
1565 {
1566 linux_resume_one_lwp_throw (lp, step, signo);
1567 }
230d2906 1568 catch (const gdb_exception_error &ex)
23f238d3
PA
1569 {
1570 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 1571 throw;
23f238d3 1572 }
23f238d3
PA
1573}
1574
d6b0e80f
AC
1575/* Resume LP. */
1576
25289eb2 1577static void
e5ef252a 1578resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1579{
25289eb2 1580 if (lp->stopped)
6c95b8df 1581 {
5b6d1e4f 1582 struct inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
25289eb2
PA
1583
1584 if (inf->vfork_child != NULL)
1585 {
8a9da63e 1586 linux_nat_debug_printf ("Not resuming sibling %s (vfork parent)",
e53c95d4 1587 lp->ptid.to_string ().c_str ());
25289eb2 1588 }
8a99810d 1589 else if (!lwp_status_pending_p (lp))
25289eb2 1590 {
9327494e 1591 linux_nat_debug_printf ("Resuming sibling %s, %s, %s",
e53c95d4 1592 lp->ptid.to_string ().c_str (),
9327494e
SM
1593 (signo != GDB_SIGNAL_0
1594 ? strsignal (gdb_signal_to_host (signo))
1595 : "0"),
1596 step ? "step" : "resume");
25289eb2 1597
8a99810d 1598 linux_resume_one_lwp (lp, step, signo);
25289eb2
PA
1599 }
1600 else
1601 {
9327494e 1602 linux_nat_debug_printf ("Not resuming sibling %s (has pending)",
e53c95d4 1603 lp->ptid.to_string ().c_str ());
25289eb2 1604 }
6c95b8df 1605 }
25289eb2 1606 else
9327494e 1607 linux_nat_debug_printf ("Not resuming sibling %s (not stopped)",
e53c95d4 1608 lp->ptid.to_string ().c_str ());
25289eb2 1609}
d6b0e80f 1610
8817a6f2
PA
1611/* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1612 Resume LWP with the last stop signal, if it is in pass state. */
e5ef252a 1613
25289eb2 1614static int
d3a70e03 1615linux_nat_resume_callback (struct lwp_info *lp, struct lwp_info *except)
25289eb2 1616{
e5ef252a
PA
1617 enum gdb_signal signo = GDB_SIGNAL_0;
1618
8817a6f2
PA
1619 if (lp == except)
1620 return 0;
1621
e5ef252a
PA
1622 if (lp->stopped)
1623 {
1624 struct thread_info *thread;
1625
9213a6d7 1626 thread = linux_target->find_thread (lp->ptid);
e5ef252a
PA
1627 if (thread != NULL)
1628 {
1edb66d8
SM
1629 signo = thread->stop_signal ();
1630 thread->set_stop_signal (GDB_SIGNAL_0);
e5ef252a
PA
1631 }
1632 }
1633
1634 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1635 return 0;
1636}
1637
1638static int
d3a70e03 1639resume_clear_callback (struct lwp_info *lp)
d6b0e80f
AC
1640{
1641 lp->resumed = 0;
25289eb2 1642 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1643 return 0;
1644}
1645
1646static int
d3a70e03 1647resume_set_callback (struct lwp_info *lp)
d6b0e80f
AC
1648{
1649 lp->resumed = 1;
25289eb2 1650 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1651 return 0;
1652}
1653
f6ac5f3d 1654void
d51926f0 1655linux_nat_target::resume (ptid_t scope_ptid, int step, enum gdb_signal signo)
d6b0e80f
AC
1656{
1657 struct lwp_info *lp;
d6b0e80f 1658
9327494e
SM
1659 linux_nat_debug_printf ("Preparing to %s %s, %s, inferior_ptid %s",
1660 step ? "step" : "resume",
d51926f0 1661 scope_ptid.to_string ().c_str (),
9327494e
SM
1662 (signo != GDB_SIGNAL_0
1663 ? strsignal (gdb_signal_to_host (signo)) : "0"),
e53c95d4 1664 inferior_ptid.to_string ().c_str ());
76f50ad1 1665
7da6a5b9
LM
1666 /* Mark the lwps we're resuming as resumed and update their
1667 last_resume_kind to resume_continue. */
d51926f0 1668 iterate_over_lwps (scope_ptid, resume_set_callback);
d6b0e80f 1669
d51926f0 1670 lp = find_lwp_pid (inferior_ptid);
9f0bdab8 1671 gdb_assert (lp != NULL);
d6b0e80f 1672
9f0bdab8 1673 /* Remember if we're stepping. */
25289eb2 1674 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1675
9f0bdab8
DJ
1676 /* If we have a pending wait status for this thread, there is no
1677 point in resuming the process. But first make sure that
1678 linux_nat_wait won't preemptively handle the event - we
1679 should never take this short-circuit if we are going to
1680 leave LP running, since we have skipped resuming all the
1681 other threads. This bit of code needs to be synchronized
1682 with linux_nat_wait. */
76f50ad1 1683
9f0bdab8
DJ
1684 if (lp->status && WIFSTOPPED (lp->status))
1685 {
2455069d
UW
1686 if (!lp->step
1687 && WSTOPSIG (lp->status)
1688 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1689 {
9327494e
SM
1690 linux_nat_debug_printf
1691 ("Not short circuiting for ignored status 0x%x", lp->status);
9f0bdab8 1692
d6b0e80f
AC
1693 /* FIXME: What should we do if we are supposed to continue
1694 this thread with a signal? */
a493e3e2 1695 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 1696 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1697 lp->status = 0;
1698 }
1699 }
76f50ad1 1700
8a99810d 1701 if (lwp_status_pending_p (lp))
9f0bdab8
DJ
1702 {
1703 /* FIXME: What should we do if we are supposed to continue
1704 this thread with a signal? */
a493e3e2 1705 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 1706
57573e54
PA
1707 linux_nat_debug_printf ("Short circuiting for status %s",
1708 pending_status_str (lp).c_str ());
d6b0e80f 1709
7feb7d06
PA
1710 if (target_can_async_p ())
1711 {
4a570176 1712 target_async (true);
7feb7d06
PA
1713 /* Tell the event loop we have something to process. */
1714 async_file_mark ();
1715 }
9f0bdab8 1716 return;
d6b0e80f
AC
1717 }
1718
d51926f0
PA
1719 /* No use iterating unless we're resuming other threads. */
1720 if (scope_ptid != lp->ptid)
1721 iterate_over_lwps (scope_ptid, [=] (struct lwp_info *info)
1722 {
1723 return linux_nat_resume_callback (info, lp);
1724 });
d90e17a7 1725
9327494e
SM
1726 linux_nat_debug_printf ("%s %s, %s (resume event thread)",
1727 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 1728 lp->ptid.to_string ().c_str (),
9327494e
SM
1729 (signo != GDB_SIGNAL_0
1730 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2 1731
2bf6fb9d 1732 linux_resume_one_lwp (lp, step, signo);
d6b0e80f
AC
1733}
1734
c5f62d5f 1735/* Send a signal to an LWP. */
d6b0e80f
AC
1736
1737static int
1738kill_lwp (int lwpid, int signo)
1739{
4a6ed09b 1740 int ret;
d6b0e80f 1741
4a6ed09b
PA
1742 errno = 0;
1743 ret = syscall (__NR_tkill, lwpid, signo);
1744 if (errno == ENOSYS)
1745 {
1746 /* If tkill fails, then we are not using nptl threads, a
1747 configuration we no longer support. */
1748 perror_with_name (("tkill"));
1749 }
1750 return ret;
d6b0e80f
AC
1751}
1752
ca2163eb
PA
1753/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1754 event, check if the core is interested in it: if not, ignore the
1755 event, and keep waiting; otherwise, we need to toggle the LWP's
1756 syscall entry/exit status, since the ptrace event itself doesn't
1757 indicate it, and report the trap to higher layers. */
1758
1759static int
1760linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1761{
1762 struct target_waitstatus *ourstatus = &lp->waitstatus;
1763 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
9213a6d7 1764 thread_info *thread = linux_target->find_thread (lp->ptid);
00431a78 1765 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, thread);
ca2163eb
PA
1766
1767 if (stopping)
1768 {
1769 /* If we're stopping threads, there's a SIGSTOP pending, which
1770 makes it so that the LWP reports an immediate syscall return,
1771 followed by the SIGSTOP. Skip seeing that "return" using
1772 PTRACE_CONT directly, and let stop_wait_callback collect the
1773 SIGSTOP. Later when the thread is resumed, a new syscall
1774 entry event. If we didn't do this (and returned 0), we'd
1775 leave a syscall entry pending, and our caller, by using
1776 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1777 itself. Later, when the user re-resumes this LWP, we'd see
1778 another syscall entry event and we'd mistake it for a return.
1779
1780 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1781 (leaving immediately with LWP->signalled set, without issuing
1782 a PTRACE_CONT), it would still be problematic to leave this
1783 syscall enter pending, as later when the thread is resumed,
1784 it would then see the same syscall exit mentioned above,
1785 followed by the delayed SIGSTOP, while the syscall didn't
1786 actually get to execute. It seems it would be even more
1787 confusing to the user. */
1788
9327494e
SM
1789 linux_nat_debug_printf
1790 ("ignoring syscall %d for LWP %ld (stopping threads), resuming with "
1791 "PTRACE_CONT for SIGSTOP", syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1792
1793 lp->syscall_state = TARGET_WAITKIND_IGNORE;
e38504b3 1794 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
8817a6f2 1795 lp->stopped = 0;
ca2163eb
PA
1796 return 1;
1797 }
1798
bfd09d20
JS
1799 /* Always update the entry/return state, even if this particular
1800 syscall isn't interesting to the core now. In async mode,
1801 the user could install a new catchpoint for this syscall
1802 between syscall enter/return, and we'll need to know to
1803 report a syscall return if that happens. */
1804 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1805 ? TARGET_WAITKIND_SYSCALL_RETURN
1806 : TARGET_WAITKIND_SYSCALL_ENTRY);
1807
ca2163eb
PA
1808 if (catch_syscall_enabled ())
1809 {
ca2163eb
PA
1810 if (catching_syscall_number (syscall_number))
1811 {
1812 /* Alright, an event to report. */
183be222
SM
1813 if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
1814 ourstatus->set_syscall_entry (syscall_number);
1815 else if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
1816 ourstatus->set_syscall_return (syscall_number);
1817 else
1818 gdb_assert_not_reached ("unexpected syscall state");
ca2163eb 1819
9327494e
SM
1820 linux_nat_debug_printf
1821 ("stopping for %s of syscall %d for LWP %ld",
1822 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1823 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1824
ca2163eb
PA
1825 return 0;
1826 }
1827
9327494e
SM
1828 linux_nat_debug_printf
1829 ("ignoring %s of syscall %d for LWP %ld",
1830 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1831 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1832 }
1833 else
1834 {
1835 /* If we had been syscall tracing, and hence used PT_SYSCALL
1836 before on this LWP, it could happen that the user removes all
1837 syscall catchpoints before we get to process this event.
1838 There are two noteworthy issues here:
1839
1840 - When stopped at a syscall entry event, resuming with
1841 PT_STEP still resumes executing the syscall and reports a
1842 syscall return.
1843
1844 - Only PT_SYSCALL catches syscall enters. If we last
1845 single-stepped this thread, then this event can't be a
1846 syscall enter. If we last single-stepped this thread, this
1847 has to be a syscall exit.
1848
1849 The points above mean that the next resume, be it PT_STEP or
1850 PT_CONTINUE, can not trigger a syscall trace event. */
9327494e
SM
1851 linux_nat_debug_printf
1852 ("caught syscall event with no syscall catchpoints. %d for LWP %ld, "
1853 "ignoring", syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1854 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1855 }
1856
1857 /* The core isn't interested in this event. For efficiency, avoid
1858 stopping all threads only to have the core resume them all again.
1859 Since we're not stopping threads, if we're still syscall tracing
1860 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1861 subsequent syscall. Simply resume using the inf-ptrace layer,
1862 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1863
8a99810d 1864 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
ca2163eb
PA
1865 return 1;
1866}
1867
0d36baa9
PA
1868/* See target.h. */
1869
1870void
1871linux_nat_target::follow_clone (ptid_t child_ptid)
1872{
1873 lwp_info *new_lp = add_lwp (child_ptid);
1874 new_lp->stopped = 1;
1875
1876 /* If the thread_db layer is active, let it record the user
1877 level thread id and status, and add the thread to GDB's
1878 list. */
1879 if (!thread_db_notice_clone (inferior_ptid, new_lp->ptid))
1880 {
1881 /* The process is not using thread_db. Add the LWP to
1882 GDB's list. */
1883 add_thread (linux_target, new_lp->ptid);
1884 }
1885
1886 /* We just created NEW_LP so it cannot yet contain STATUS. */
1887 gdb_assert (new_lp->status == 0);
1888
1889 if (!pull_pid_from_list (&stopped_pids, child_ptid.lwp (), &new_lp->status))
1890 internal_error (_("no saved status for clone lwp"));
1891
1892 if (WSTOPSIG (new_lp->status) != SIGSTOP)
1893 {
1894 /* This can happen if someone starts sending signals to
1895 the new thread before it gets a chance to run, which
1896 have a lower number than SIGSTOP (e.g. SIGUSR1).
1897 This is an unlikely case, and harder to handle for
1898 fork / vfork than for clone, so we do not try - but
1899 we handle it for clone events here. */
1900
1901 new_lp->signalled = 1;
1902
1903 /* Save the wait status to report later. */
1904 linux_nat_debug_printf
1905 ("waitpid of new LWP %ld, saving status %s",
1906 (long) new_lp->ptid.lwp (), status_to_str (new_lp->status).c_str ());
1907 }
1908 else
1909 {
1910 new_lp->status = 0;
1911
1912 if (report_thread_events)
1913 new_lp->waitstatus.set_thread_created ();
1914 }
1915}
1916
3d799a95
DJ
1917/* Handle a GNU/Linux extended wait response. If we see a clone
1918 event, we need to add the new LWP to our list (and not report the
1919 trap to higher layers). This function returns non-zero if the
1920 event should be ignored and we should wait again. If STOPPING is
1921 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1922
1923static int
4dd63d48 1924linux_handle_extended_wait (struct lwp_info *lp, int status)
d6b0e80f 1925{
e38504b3 1926 int pid = lp->ptid.lwp ();
3d799a95 1927 struct target_waitstatus *ourstatus = &lp->waitstatus;
89a5711c 1928 int event = linux_ptrace_get_extended_event (status);
d6b0e80f 1929
bfd09d20
JS
1930 /* All extended events we currently use are mid-syscall. Only
1931 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
1932 you have to be using PTRACE_SEIZE to get that. */
1933 lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
1934
3d799a95
DJ
1935 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1936 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1937 {
3d799a95
DJ
1938 unsigned long new_pid;
1939 int ret;
1940
1941 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1942
3d799a95
DJ
1943 /* If we haven't already seen the new PID stop, wait for it now. */
1944 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1945 {
1946 /* The new child has a pending SIGSTOP. We can't affect it until it
1947 hits the SIGSTOP, but we're already attached. */
4a6ed09b 1948 ret = my_waitpid (new_pid, &status, __WALL);
3d799a95
DJ
1949 if (ret == -1)
1950 perror_with_name (_("waiting for new child"));
1951 else if (ret != new_pid)
f34652de 1952 internal_error (_("wait returned unexpected PID %d"), ret);
3d799a95 1953 else if (!WIFSTOPPED (status))
f34652de 1954 internal_error (_("wait returned unexpected status 0x%x"), status);
3d799a95
DJ
1955 }
1956
26cb8b7c
PA
1957 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1958 {
0d36baa9 1959 open_proc_mem_file (ptid_t (new_pid, new_pid));
8a89ddbd 1960
26cb8b7c
PA
1961 /* The arch-specific native code may need to know about new
1962 forks even if those end up never mapped to an
1963 inferior. */
135340af 1964 linux_target->low_new_fork (lp, new_pid);
26cb8b7c 1965 }
1310c1b0
PFC
1966 else if (event == PTRACE_EVENT_CLONE)
1967 {
1968 linux_target->low_new_clone (lp, new_pid);
1969 }
26cb8b7c 1970
2277426b 1971 if (event == PTRACE_EVENT_FORK
e99b03dc 1972 && linux_fork_checkpointing_p (lp->ptid.pid ()))
2277426b 1973 {
2277426b
PA
1974 /* Handle checkpointing by linux-fork.c here as a special
1975 case. We don't want the follow-fork-mode or 'catch fork'
1976 to interfere with this. */
1977
1978 /* This won't actually modify the breakpoint list, but will
1979 physically remove the breakpoints from the child. */
184ea2f7 1980 detach_breakpoints (ptid_t (new_pid, new_pid));
2277426b
PA
1981
1982 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
1983 if (!find_fork_pid (new_pid))
1984 add_fork (new_pid);
2277426b
PA
1985
1986 /* Report as spurious, so that infrun doesn't want to follow
1987 this fork. We're actually doing an infcall in
1988 linux-fork.c. */
183be222 1989 ourstatus->set_spurious ();
2277426b
PA
1990
1991 /* Report the stop to the core. */
1992 return 0;
1993 }
1994
3d799a95 1995 if (event == PTRACE_EVENT_FORK)
0d36baa9 1996 ourstatus->set_forked (ptid_t (new_pid, new_pid));
3d799a95 1997 else if (event == PTRACE_EVENT_VFORK)
0d36baa9 1998 ourstatus->set_vforked (ptid_t (new_pid, new_pid));
4dd63d48 1999 else if (event == PTRACE_EVENT_CLONE)
3d799a95 2000 {
9327494e
SM
2001 linux_nat_debug_printf
2002 ("Got clone event from LWP %d, new child is LWP %ld", pid, new_pid);
3c4d7e12 2003
0d36baa9
PA
2004 /* Save the status again, we'll use it in follow_clone. */
2005 add_to_pid_list (&stopped_pids, new_pid, status);
4dd63d48 2006
0d36baa9 2007 ourstatus->set_thread_cloned (ptid_t (lp->ptid.pid (), new_pid));
3d799a95
DJ
2008 }
2009
2010 return 0;
d6b0e80f
AC
2011 }
2012
3d799a95
DJ
2013 if (event == PTRACE_EVENT_EXEC)
2014 {
9327494e 2015 linux_nat_debug_printf ("Got exec event from LWP %ld", lp->ptid.lwp ());
a75724bc 2016
8a89ddbd
PA
2017 /* Close the previous /proc/PID/mem file for this inferior,
2018 which was using the address space which is now gone.
2019 Reading/writing from this file would return 0/EOF. */
2020 close_proc_mem_file (lp->ptid.pid ());
2021
2022 /* Open a new file for the new address space. */
2023 open_proc_mem_file (lp->ptid);
05c06f31 2024
183be222
SM
2025 ourstatus->set_execd
2026 (make_unique_xstrdup (linux_proc_pid_to_exec_file (pid)));
3d799a95 2027
8af756ef
PA
2028 /* The thread that execed must have been resumed, but, when a
2029 thread execs, it changes its tid to the tgid, and the old
2030 tgid thread might have not been resumed. */
2031 lp->resumed = 1;
6a534f85
PA
2032
2033 /* All other LWPs are gone now. We'll have received a thread
2034 exit notification for all threads other the execing one.
2035 That one, if it wasn't the leader, just silently changes its
2036 tid to the tgid, and the previous leader vanishes. Since
2037 Linux 3.0, the former thread ID can be retrieved with
2038 PTRACE_GETEVENTMSG, but since we support older kernels, don't
2039 bother with it, and just walk the LWP list. Even with
2040 PTRACE_GETEVENTMSG, we'd still need to lookup the
2041 corresponding LWP object, and it would be an extra ptrace
2042 syscall, so this way may even be more efficient. */
2043 for (lwp_info *other_lp : all_lwps_safe ())
2044 if (other_lp != lp && other_lp->ptid.pid () == lp->ptid.pid ())
2045 exit_lwp (other_lp);
2046
6c95b8df
PA
2047 return 0;
2048 }
2049
2050 if (event == PTRACE_EVENT_VFORK_DONE)
2051 {
9327494e 2052 linux_nat_debug_printf
5a0c4a06
SM
2053 ("Got PTRACE_EVENT_VFORK_DONE from LWP %ld",
2054 lp->ptid.lwp ());
2055 ourstatus->set_vfork_done ();
2056 return 0;
3d799a95
DJ
2057 }
2058
f34652de 2059 internal_error (_("unknown ptrace event %d"), event);
d6b0e80f
AC
2060}
2061
9c3a5d93
PA
2062/* Suspend waiting for a signal. We're mostly interested in
2063 SIGCHLD/SIGINT. */
2064
2065static void
2066wait_for_signal ()
2067{
9327494e 2068 linux_nat_debug_printf ("about to sigsuspend");
9c3a5d93
PA
2069 sigsuspend (&suspend_mask);
2070
2071 /* If the quit flag is set, it means that the user pressed Ctrl-C
2072 and we're debugging a process that is running on a separate
2073 terminal, so we must forward the Ctrl-C to the inferior. (If the
2074 inferior is sharing GDB's terminal, then the Ctrl-C reaches the
2075 inferior directly.) We must do this here because functions that
2076 need to block waiting for a signal loop forever until there's an
2077 event to report before returning back to the event loop. */
2078 if (!target_terminal::is_ours ())
2079 {
2080 if (check_quit_flag ())
2081 target_pass_ctrlc ();
2082 }
2083}
2084
d6b0e80f
AC
2085/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2086 exited. */
2087
2088static int
2089wait_lwp (struct lwp_info *lp)
2090{
2091 pid_t pid;
432b4d03 2092 int status = 0;
d6b0e80f 2093 int thread_dead = 0;
432b4d03 2094 sigset_t prev_mask;
d6b0e80f
AC
2095
2096 gdb_assert (!lp->stopped);
2097 gdb_assert (lp->status == 0);
2098
432b4d03
JK
2099 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2100 block_child_signals (&prev_mask);
2101
2102 for (;;)
d6b0e80f 2103 {
e38504b3 2104 pid = my_waitpid (lp->ptid.lwp (), &status, __WALL | WNOHANG);
a9f4bb21
PA
2105 if (pid == -1 && errno == ECHILD)
2106 {
2107 /* The thread has previously exited. We need to delete it
4a6ed09b
PA
2108 now because if this was a non-leader thread execing, we
2109 won't get an exit event. See comments on exec events at
2110 the top of the file. */
a9f4bb21 2111 thread_dead = 1;
9327494e 2112 linux_nat_debug_printf ("%s vanished.",
e53c95d4 2113 lp->ptid.to_string ().c_str ());
a9f4bb21 2114 }
432b4d03
JK
2115 if (pid != 0)
2116 break;
2117
2118 /* Bugs 10970, 12702.
2119 Thread group leader may have exited in which case we'll lock up in
2120 waitpid if there are other threads, even if they are all zombies too.
2121 Basically, we're not supposed to use waitpid this way.
4a6ed09b
PA
2122 tkill(pid,0) cannot be used here as it gets ESRCH for both
2123 for zombie and running processes.
432b4d03
JK
2124
2125 As a workaround, check if we're waiting for the thread group leader and
2126 if it's a zombie, and avoid calling waitpid if it is.
2127
2128 This is racy, what if the tgl becomes a zombie right after we check?
2129 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2130 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03 2131
e38504b3
TT
2132 if (lp->ptid.pid () == lp->ptid.lwp ()
2133 && linux_proc_pid_is_zombie (lp->ptid.lwp ()))
d6b0e80f 2134 {
d6b0e80f 2135 thread_dead = 1;
9327494e 2136 linux_nat_debug_printf ("Thread group leader %s vanished.",
e53c95d4 2137 lp->ptid.to_string ().c_str ());
432b4d03 2138 break;
d6b0e80f 2139 }
432b4d03
JK
2140
2141 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2142 get invoked despite our caller had them intentionally blocked by
2143 block_child_signals. This is sensitive only to the loop of
2144 linux_nat_wait_1 and there if we get called my_waitpid gets called
2145 again before it gets to sigsuspend so we can safely let the handlers
2146 get executed here. */
9c3a5d93 2147 wait_for_signal ();
432b4d03
JK
2148 }
2149
2150 restore_child_signals_mask (&prev_mask);
2151
d6b0e80f
AC
2152 if (!thread_dead)
2153 {
e38504b3 2154 gdb_assert (pid == lp->ptid.lwp ());
d6b0e80f 2155
9327494e 2156 linux_nat_debug_printf ("waitpid %s received %s",
e53c95d4 2157 lp->ptid.to_string ().c_str (),
8d06918f 2158 status_to_str (status).c_str ());
d6b0e80f 2159
a9f4bb21
PA
2160 /* Check if the thread has exited. */
2161 if (WIFEXITED (status) || WIFSIGNALED (status))
2162 {
a51e14ef 2163 if (report_exit_events_for (lp) || is_leader (lp))
69dde7dc 2164 {
9327494e 2165 linux_nat_debug_printf ("LWP %d exited.", lp->ptid.pid ());
69dde7dc 2166
aa01bd36 2167 /* If this is the leader exiting, it means the whole
69dde7dc
PA
2168 process is gone. Store the status to report to the
2169 core. Store it in lp->waitstatus, because lp->status
2170 would be ambiguous (W_EXITCODE(0,0) == 0). */
7509b829 2171 lp->waitstatus = host_status_to_waitstatus (status);
69dde7dc
PA
2172 return 0;
2173 }
2174
a9f4bb21 2175 thread_dead = 1;
9327494e 2176 linux_nat_debug_printf ("%s exited.",
e53c95d4 2177 lp->ptid.to_string ().c_str ());
a9f4bb21 2178 }
d6b0e80f
AC
2179 }
2180
2181 if (thread_dead)
2182 {
e26af52f 2183 exit_lwp (lp);
d6b0e80f
AC
2184 return 0;
2185 }
2186
2187 gdb_assert (WIFSTOPPED (status));
8817a6f2 2188 lp->stopped = 1;
d6b0e80f 2189
8784d563
PA
2190 if (lp->must_set_ptrace_flags)
2191 {
5b6d1e4f 2192 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
de0d863e 2193 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2194
e38504b3 2195 linux_enable_event_reporting (lp->ptid.lwp (), options);
8784d563
PA
2196 lp->must_set_ptrace_flags = 0;
2197 }
2198
ca2163eb
PA
2199 /* Handle GNU/Linux's syscall SIGTRAPs. */
2200 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2201 {
2202 /* No longer need the sysgood bit. The ptrace event ends up
2203 recorded in lp->waitstatus if we care for it. We can carry
2204 on handling the event like a regular SIGTRAP from here
2205 on. */
2206 status = W_STOPCODE (SIGTRAP);
2207 if (linux_handle_syscall_trap (lp, 1))
2208 return wait_lwp (lp);
2209 }
bfd09d20
JS
2210 else
2211 {
2212 /* Almost all other ptrace-stops are known to be outside of system
2213 calls, with further exceptions in linux_handle_extended_wait. */
2214 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2215 }
ca2163eb 2216
d6b0e80f 2217 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2218 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2219 && linux_is_extended_waitstatus (status))
d6b0e80f 2220 {
9327494e 2221 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
4dd63d48 2222 linux_handle_extended_wait (lp, status);
20ba1ce6 2223 return 0;
d6b0e80f
AC
2224 }
2225
2226 return status;
2227}
2228
2229/* Send a SIGSTOP to LP. */
2230
2231static int
d3a70e03 2232stop_callback (struct lwp_info *lp)
d6b0e80f
AC
2233{
2234 if (!lp->stopped && !lp->signalled)
2235 {
2236 int ret;
2237
9327494e 2238 linux_nat_debug_printf ("kill %s **<SIGSTOP>**",
e53c95d4 2239 lp->ptid.to_string ().c_str ());
9327494e 2240
d6b0e80f 2241 errno = 0;
e38504b3 2242 ret = kill_lwp (lp->ptid.lwp (), SIGSTOP);
9327494e 2243 linux_nat_debug_printf ("lwp kill %d %s", ret,
d6b0e80f 2244 errno ? safe_strerror (errno) : "ERRNO-OK");
d6b0e80f
AC
2245
2246 lp->signalled = 1;
2247 gdb_assert (lp->status == 0);
2248 }
2249
2250 return 0;
2251}
2252
7b50312a
PA
2253/* Request a stop on LWP. */
2254
2255void
2256linux_stop_lwp (struct lwp_info *lwp)
2257{
d3a70e03 2258 stop_callback (lwp);
7b50312a
PA
2259}
2260
2db9a427
PA
2261/* See linux-nat.h */
2262
2263void
2264linux_stop_and_wait_all_lwps (void)
2265{
2266 /* Stop all LWP's ... */
d3a70e03 2267 iterate_over_lwps (minus_one_ptid, stop_callback);
2db9a427
PA
2268
2269 /* ... and wait until all of them have reported back that
2270 they're no longer running. */
d3a70e03 2271 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
2db9a427
PA
2272}
2273
2274/* See linux-nat.h */
2275
2276void
2277linux_unstop_all_lwps (void)
2278{
2279 iterate_over_lwps (minus_one_ptid,
d3a70e03
TT
2280 [] (struct lwp_info *info)
2281 {
2282 return resume_stopped_resumed_lwps (info, minus_one_ptid);
2283 });
2db9a427
PA
2284}
2285
57380f4e 2286/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2287
2288static int
57380f4e
DJ
2289linux_nat_has_pending_sigint (int pid)
2290{
2291 sigset_t pending, blocked, ignored;
57380f4e
DJ
2292
2293 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2294
2295 if (sigismember (&pending, SIGINT)
2296 && !sigismember (&ignored, SIGINT))
2297 return 1;
2298
2299 return 0;
2300}
2301
2302/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2303
2304static int
d3a70e03 2305set_ignore_sigint (struct lwp_info *lp)
d6b0e80f 2306{
57380f4e
DJ
2307 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2308 flag to consume the next one. */
2309 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2310 && WSTOPSIG (lp->status) == SIGINT)
2311 lp->status = 0;
2312 else
2313 lp->ignore_sigint = 1;
2314
2315 return 0;
2316}
2317
2318/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2319 This function is called after we know the LWP has stopped; if the LWP
2320 stopped before the expected SIGINT was delivered, then it will never have
2321 arrived. Also, if the signal was delivered to a shared queue and consumed
2322 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2323
57380f4e
DJ
2324static void
2325maybe_clear_ignore_sigint (struct lwp_info *lp)
2326{
2327 if (!lp->ignore_sigint)
2328 return;
2329
e38504b3 2330 if (!linux_nat_has_pending_sigint (lp->ptid.lwp ()))
57380f4e 2331 {
9327494e 2332 linux_nat_debug_printf ("Clearing bogus flag for %s",
e53c95d4 2333 lp->ptid.to_string ().c_str ());
57380f4e
DJ
2334 lp->ignore_sigint = 0;
2335 }
2336}
2337
ebec9a0f
PA
2338/* Fetch the possible triggered data watchpoint info and store it in
2339 LP.
2340
2341 On some archs, like x86, that use debug registers to set
2342 watchpoints, it's possible that the way to know which watched
2343 address trapped, is to check the register that is used to select
2344 which address to watch. Problem is, between setting the watchpoint
2345 and reading back which data address trapped, the user may change
2346 the set of watchpoints, and, as a consequence, GDB changes the
2347 debug registers in the inferior. To avoid reading back a stale
2348 stopped-data-address when that happens, we cache in LP the fact
2349 that a watchpoint trapped, and the corresponding data address, as
2350 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2351 registers meanwhile, we have the cached data we can rely on. */
2352
9c02b525
PA
2353static int
2354check_stopped_by_watchpoint (struct lwp_info *lp)
ebec9a0f 2355{
2989a365 2356 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
ebec9a0f
PA
2357 inferior_ptid = lp->ptid;
2358
f6ac5f3d 2359 if (linux_target->low_stopped_by_watchpoint ())
ebec9a0f 2360 {
15c66dd6 2361 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
f6ac5f3d
PA
2362 lp->stopped_data_address_p
2363 = linux_target->low_stopped_data_address (&lp->stopped_data_address);
ebec9a0f
PA
2364 }
2365
15c66dd6 2366 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
9c02b525
PA
2367}
2368
9c02b525 2369/* Returns true if the LWP had stopped for a watchpoint. */
ebec9a0f 2370
57810aa7 2371bool
f6ac5f3d 2372linux_nat_target::stopped_by_watchpoint ()
ebec9a0f
PA
2373{
2374 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2375
2376 gdb_assert (lp != NULL);
2377
15c66dd6 2378 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
ebec9a0f
PA
2379}
2380
57810aa7 2381bool
f6ac5f3d 2382linux_nat_target::stopped_data_address (CORE_ADDR *addr_p)
ebec9a0f
PA
2383{
2384 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2385
2386 gdb_assert (lp != NULL);
2387
2388 *addr_p = lp->stopped_data_address;
2389
2390 return lp->stopped_data_address_p;
2391}
2392
26ab7092
JK
2393/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2394
135340af
PA
2395bool
2396linux_nat_target::low_status_is_event (int status)
26ab7092
JK
2397{
2398 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2399}
2400
57380f4e
DJ
2401/* Wait until LP is stopped. */
2402
2403static int
d3a70e03 2404stop_wait_callback (struct lwp_info *lp)
57380f4e 2405{
5b6d1e4f 2406 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
6c95b8df
PA
2407
2408 /* If this is a vfork parent, bail out, it is not going to report
2409 any SIGSTOP until the vfork is done with. */
2410 if (inf->vfork_child != NULL)
2411 return 0;
2412
d6b0e80f
AC
2413 if (!lp->stopped)
2414 {
2415 int status;
2416
2417 status = wait_lwp (lp);
2418 if (status == 0)
2419 return 0;
2420
57380f4e
DJ
2421 if (lp->ignore_sigint && WIFSTOPPED (status)
2422 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2423 {
57380f4e 2424 lp->ignore_sigint = 0;
d6b0e80f
AC
2425
2426 errno = 0;
e38504b3 2427 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
8817a6f2 2428 lp->stopped = 0;
9327494e
SM
2429 linux_nat_debug_printf
2430 ("PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)",
e53c95d4 2431 lp->ptid.to_string ().c_str (),
9327494e 2432 errno ? safe_strerror (errno) : "OK");
d6b0e80f 2433
d3a70e03 2434 return stop_wait_callback (lp);
d6b0e80f
AC
2435 }
2436
57380f4e
DJ
2437 maybe_clear_ignore_sigint (lp);
2438
d6b0e80f
AC
2439 if (WSTOPSIG (status) != SIGSTOP)
2440 {
e5ef252a 2441 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2442
9327494e 2443 linux_nat_debug_printf ("Pending event %s in %s",
8d06918f 2444 status_to_str ((int) status).c_str (),
e53c95d4 2445 lp->ptid.to_string ().c_str ());
e5ef252a
PA
2446
2447 /* Save the sigtrap event. */
2448 lp->status = status;
e5ef252a 2449 gdb_assert (lp->signalled);
e7ad2f14 2450 save_stop_reason (lp);
d6b0e80f
AC
2451 }
2452 else
2453 {
7010835a 2454 /* We caught the SIGSTOP that we intended to catch. */
e5ef252a 2455
9327494e 2456 linux_nat_debug_printf ("Expected SIGSTOP caught for %s.",
e53c95d4 2457 lp->ptid.to_string ().c_str ());
e5ef252a 2458
d6b0e80f 2459 lp->signalled = 0;
7010835a
AB
2460
2461 /* If we are waiting for this stop so we can report the thread
2462 stopped then we need to record this status. Otherwise, we can
2463 now discard this stop event. */
2464 if (lp->last_resume_kind == resume_stop)
2465 {
2466 lp->status = status;
2467 save_stop_reason (lp);
2468 }
d6b0e80f
AC
2469 }
2470 }
2471
2472 return 0;
2473}
2474
74387712
SM
2475/* Get the inferior associated to LWP. Must be called with an LWP that has
2476 an associated inferior. Always return non-nullptr. */
2477
2478static inferior *
2479lwp_inferior (const lwp_info *lwp)
2480{
2481 inferior *inf = find_inferior_ptid (linux_target, lwp->ptid);
2482 gdb_assert (inf != nullptr);
2483 return inf;
2484}
2485
9c02b525
PA
2486/* Return non-zero if LP has a wait status pending. Discard the
2487 pending event and resume the LWP if the event that originally
2488 caused the stop became uninteresting. */
d6b0e80f
AC
2489
2490static int
d3a70e03 2491status_callback (struct lwp_info *lp)
d6b0e80f
AC
2492{
2493 /* Only report a pending wait status if we pretend that this has
2494 indeed been resumed. */
ca2163eb
PA
2495 if (!lp->resumed)
2496 return 0;
2497
eb54c8bf
PA
2498 if (!lwp_status_pending_p (lp))
2499 return 0;
2500
15c66dd6
PA
2501 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2502 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525 2503 {
5b6d1e4f 2504 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
9c02b525
PA
2505 CORE_ADDR pc;
2506 int discard = 0;
2507
9c02b525
PA
2508 pc = regcache_read_pc (regcache);
2509
2510 if (pc != lp->stop_pc)
2511 {
9327494e 2512 linux_nat_debug_printf ("PC of %s changed. was=%s, now=%s",
e53c95d4 2513 lp->ptid.to_string ().c_str (),
99d9c3b9
SM
2514 paddress (current_inferior ()->arch (),
2515 lp->stop_pc),
2516 paddress (current_inferior ()->arch (), pc));
9c02b525
PA
2517 discard = 1;
2518 }
faf09f01
PA
2519
2520#if !USE_SIGTRAP_SIGINFO
74387712 2521 else if (!breakpoint_inserted_here_p (lwp_inferior (lp)->aspace, pc))
9c02b525 2522 {
9327494e 2523 linux_nat_debug_printf ("previous breakpoint of %s, at %s gone",
e53c95d4 2524 lp->ptid.to_string ().c_str (),
99d9c3b9
SM
2525 paddress (current_inferior ()->arch (),
2526 lp->stop_pc));
9c02b525
PA
2527
2528 discard = 1;
2529 }
faf09f01 2530#endif
9c02b525
PA
2531
2532 if (discard)
2533 {
9327494e 2534 linux_nat_debug_printf ("pending event of %s cancelled.",
e53c95d4 2535 lp->ptid.to_string ().c_str ());
9c02b525
PA
2536
2537 lp->status = 0;
2538 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2539 return 0;
2540 }
9c02b525
PA
2541 }
2542
eb54c8bf 2543 return 1;
d6b0e80f
AC
2544}
2545
d6b0e80f
AC
2546/* Count the LWP's that have had events. */
2547
2548static int
d3a70e03 2549count_events_callback (struct lwp_info *lp, int *count)
d6b0e80f 2550{
d6b0e80f
AC
2551 gdb_assert (count != NULL);
2552
9c02b525
PA
2553 /* Select only resumed LWPs that have an event pending. */
2554 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2555 (*count)++;
2556
2557 return 0;
2558}
2559
2560/* Select the LWP (if any) that is currently being single-stepped. */
2561
2562static int
d3a70e03 2563select_singlestep_lwp_callback (struct lwp_info *lp)
d6b0e80f 2564{
25289eb2
PA
2565 if (lp->last_resume_kind == resume_step
2566 && lp->status != 0)
d6b0e80f
AC
2567 return 1;
2568 else
2569 return 0;
2570}
2571
8a99810d
PA
2572/* Returns true if LP has a status pending. */
2573
2574static int
2575lwp_status_pending_p (struct lwp_info *lp)
2576{
2577 /* We check for lp->waitstatus in addition to lp->status, because we
2578 can have pending process exits recorded in lp->status and
2579 W_EXITCODE(0,0) happens to be 0. */
183be222 2580 return lp->status != 0 || lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE;
8a99810d
PA
2581}
2582
b90fc188 2583/* Select the Nth LWP that has had an event. */
d6b0e80f
AC
2584
2585static int
d3a70e03 2586select_event_lwp_callback (struct lwp_info *lp, int *selector)
d6b0e80f 2587{
d6b0e80f
AC
2588 gdb_assert (selector != NULL);
2589
9c02b525
PA
2590 /* Select only resumed LWPs that have an event pending. */
2591 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2592 if ((*selector)-- == 0)
2593 return 1;
2594
2595 return 0;
2596}
2597
e7ad2f14
PA
2598/* Called when the LWP stopped for a signal/trap. If it stopped for a
2599 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2600 and save the result in the LWP's stop_reason field. If it stopped
2601 for a breakpoint, decrement the PC if necessary on the lwp's
2602 architecture. */
9c02b525 2603
e7ad2f14
PA
2604static void
2605save_stop_reason (struct lwp_info *lp)
710151dd 2606{
e7ad2f14
PA
2607 struct regcache *regcache;
2608 struct gdbarch *gdbarch;
515630c5 2609 CORE_ADDR pc;
9c02b525 2610 CORE_ADDR sw_bp_pc;
faf09f01
PA
2611#if USE_SIGTRAP_SIGINFO
2612 siginfo_t siginfo;
2613#endif
9c02b525 2614
e7ad2f14
PA
2615 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
2616 gdb_assert (lp->status != 0);
2617
135340af 2618 if (!linux_target->low_status_is_event (lp->status))
e7ad2f14
PA
2619 return;
2620
74387712 2621 inferior *inf = lwp_inferior (lp);
a9deee17
PA
2622 if (inf->starting_up)
2623 return;
2624
5b6d1e4f 2625 regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 2626 gdbarch = regcache->arch ();
e7ad2f14 2627
9c02b525 2628 pc = regcache_read_pc (regcache);
527a273a 2629 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
515630c5 2630
faf09f01
PA
2631#if USE_SIGTRAP_SIGINFO
2632 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2633 {
2634 if (siginfo.si_signo == SIGTRAP)
2635 {
e7ad2f14
PA
2636 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
2637 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2638 {
e7ad2f14
PA
2639 /* The si_code is ambiguous on this arch -- check debug
2640 registers. */
2641 if (!check_stopped_by_watchpoint (lp))
2642 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2643 }
2644 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
2645 {
2646 /* If we determine the LWP stopped for a SW breakpoint,
2647 trust it. Particularly don't check watchpoint
7da6a5b9 2648 registers, because, at least on s390, we'd find
e7ad2f14
PA
2649 stopped-by-watchpoint as long as there's a watchpoint
2650 set. */
faf09f01 2651 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
faf09f01 2652 }
e7ad2f14 2653 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2654 {
e7ad2f14
PA
2655 /* This can indicate either a hardware breakpoint or
2656 hardware watchpoint. Check debug registers. */
2657 if (!check_stopped_by_watchpoint (lp))
2658 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
faf09f01 2659 }
2bf6fb9d
PA
2660 else if (siginfo.si_code == TRAP_TRACE)
2661 {
9327494e 2662 linux_nat_debug_printf ("%s stopped by trace",
e53c95d4 2663 lp->ptid.to_string ().c_str ());
e7ad2f14
PA
2664
2665 /* We may have single stepped an instruction that
2666 triggered a watchpoint. In that case, on some
2667 architectures (such as x86), instead of TRAP_HWBKPT,
2668 si_code indicates TRAP_TRACE, and we need to check
2669 the debug registers separately. */
2670 check_stopped_by_watchpoint (lp);
2bf6fb9d 2671 }
faf09f01
PA
2672 }
2673 }
2674#else
9c02b525 2675 if ((!lp->step || lp->stop_pc == sw_bp_pc)
74387712 2676 && software_breakpoint_inserted_here_p (inf->aspace, sw_bp_pc))
710151dd 2677 {
9c02b525
PA
2678 /* The LWP was either continued, or stepped a software
2679 breakpoint instruction. */
e7ad2f14
PA
2680 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2681 }
2682
74387712 2683 if (hardware_breakpoint_inserted_here_p (inf->aspace, pc))
e7ad2f14
PA
2684 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2685
2686 if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
2687 check_stopped_by_watchpoint (lp);
2688#endif
2689
2690 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2691 {
9327494e 2692 linux_nat_debug_printf ("%s stopped by software breakpoint",
e53c95d4 2693 lp->ptid.to_string ().c_str ());
710151dd
PA
2694
2695 /* Back up the PC if necessary. */
9c02b525
PA
2696 if (pc != sw_bp_pc)
2697 regcache_write_pc (regcache, sw_bp_pc);
515630c5 2698
e7ad2f14
PA
2699 /* Update this so we record the correct stop PC below. */
2700 pc = sw_bp_pc;
710151dd 2701 }
e7ad2f14 2702 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525 2703 {
9327494e 2704 linux_nat_debug_printf ("%s stopped by hardware breakpoint",
e53c95d4 2705 lp->ptid.to_string ().c_str ());
e7ad2f14
PA
2706 }
2707 else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2708 {
9327494e 2709 linux_nat_debug_printf ("%s stopped by hardware watchpoint",
e53c95d4 2710 lp->ptid.to_string ().c_str ());
9c02b525 2711 }
d6b0e80f 2712
e7ad2f14 2713 lp->stop_pc = pc;
d6b0e80f
AC
2714}
2715
faf09f01
PA
2716
2717/* Returns true if the LWP had stopped for a software breakpoint. */
2718
57810aa7 2719bool
f6ac5f3d 2720linux_nat_target::stopped_by_sw_breakpoint ()
faf09f01
PA
2721{
2722 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2723
2724 gdb_assert (lp != NULL);
2725
2726 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2727}
2728
2729/* Implement the supports_stopped_by_sw_breakpoint method. */
2730
57810aa7 2731bool
f6ac5f3d 2732linux_nat_target::supports_stopped_by_sw_breakpoint ()
faf09f01
PA
2733{
2734 return USE_SIGTRAP_SIGINFO;
2735}
2736
2737/* Returns true if the LWP had stopped for a hardware
2738 breakpoint/watchpoint. */
2739
57810aa7 2740bool
f6ac5f3d 2741linux_nat_target::stopped_by_hw_breakpoint ()
faf09f01
PA
2742{
2743 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2744
2745 gdb_assert (lp != NULL);
2746
2747 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2748}
2749
2750/* Implement the supports_stopped_by_hw_breakpoint method. */
2751
57810aa7 2752bool
f6ac5f3d 2753linux_nat_target::supports_stopped_by_hw_breakpoint ()
faf09f01
PA
2754{
2755 return USE_SIGTRAP_SIGINFO;
2756}
2757
d6b0e80f
AC
2758/* Select one LWP out of those that have events pending. */
2759
2760static void
d90e17a7 2761select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2762{
2763 int num_events = 0;
2764 int random_selector;
9c02b525 2765 struct lwp_info *event_lp = NULL;
d6b0e80f 2766
ac264b3b 2767 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2768 (*orig_lp)->status = *status;
2769
9c02b525
PA
2770 /* In all-stop, give preference to the LWP that is being
2771 single-stepped. There will be at most one, and it will be the
2772 LWP that the core is most interested in. If we didn't do this,
2773 then we'd have to handle pending step SIGTRAPs somehow in case
2774 the core later continues the previously-stepped thread, as
2775 otherwise we'd report the pending SIGTRAP then, and the core, not
2776 having stepped the thread, wouldn't understand what the trap was
2777 for, and therefore would report it to the user as a random
2778 signal. */
fbea99ea 2779 if (!target_is_non_stop_p ())
d6b0e80f 2780 {
d3a70e03 2781 event_lp = iterate_over_lwps (filter, select_singlestep_lwp_callback);
9c02b525
PA
2782 if (event_lp != NULL)
2783 {
9327494e 2784 linux_nat_debug_printf ("Select single-step %s",
e53c95d4 2785 event_lp->ptid.to_string ().c_str ());
9c02b525 2786 }
d6b0e80f 2787 }
9c02b525
PA
2788
2789 if (event_lp == NULL)
d6b0e80f 2790 {
9c02b525 2791 /* Pick one at random, out of those which have had events. */
d6b0e80f 2792
9c02b525 2793 /* First see how many events we have. */
d3a70e03
TT
2794 iterate_over_lwps (filter,
2795 [&] (struct lwp_info *info)
2796 {
2797 return count_events_callback (info, &num_events);
2798 });
8bf3b159 2799 gdb_assert (num_events > 0);
d6b0e80f 2800
9c02b525
PA
2801 /* Now randomly pick a LWP out of those that have had
2802 events. */
d6b0e80f
AC
2803 random_selector = (int)
2804 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2805
9327494e
SM
2806 if (num_events > 1)
2807 linux_nat_debug_printf ("Found %d events, selecting #%d",
2808 num_events, random_selector);
d6b0e80f 2809
d3a70e03
TT
2810 event_lp
2811 = (iterate_over_lwps
2812 (filter,
2813 [&] (struct lwp_info *info)
2814 {
2815 return select_event_lwp_callback (info,
2816 &random_selector);
2817 }));
d6b0e80f
AC
2818 }
2819
2820 if (event_lp != NULL)
2821 {
2822 /* Switch the event LWP. */
2823 *orig_lp = event_lp;
2824 *status = event_lp->status;
2825 }
2826
2827 /* Flush the wait status for the event LWP. */
2828 (*orig_lp)->status = 0;
2829}
2830
2831/* Return non-zero if LP has been resumed. */
2832
2833static int
d3a70e03 2834resumed_callback (struct lwp_info *lp)
d6b0e80f
AC
2835{
2836 return lp->resumed;
2837}
2838
02f3fc28 2839/* Check if we should go on and pass this event to common code.
12d9289a 2840
897608ed
SM
2841 If so, save the status to the lwp_info structure associated to LWPID. */
2842
2843static void
9c02b525 2844linux_nat_filter_event (int lwpid, int status)
02f3fc28
PA
2845{
2846 struct lwp_info *lp;
89a5711c 2847 int event = linux_ptrace_get_extended_event (status);
02f3fc28 2848
f2907e49 2849 lp = find_lwp_pid (ptid_t (lwpid));
02f3fc28 2850
1abeb1e9
PA
2851 /* Check for events reported by anything not in our LWP list. */
2852 if (lp == nullptr)
0e5bf2a8 2853 {
1abeb1e9
PA
2854 if (WIFSTOPPED (status))
2855 {
2856 if (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC)
2857 {
2858 /* A non-leader thread exec'ed after we've seen the
2859 leader zombie, and removed it from our lists (in
2860 check_zombie_leaders). The non-leader thread changes
2861 its tid to the tgid. */
2862 linux_nat_debug_printf
2863 ("Re-adding thread group leader LWP %d after exec.",
2864 lwpid);
0e5bf2a8 2865
1abeb1e9
PA
2866 lp = add_lwp (ptid_t (lwpid, lwpid));
2867 lp->stopped = 1;
2868 lp->resumed = 1;
2869 add_thread (linux_target, lp->ptid);
2870 }
2871 else
2872 {
2873 /* A process we are controlling has forked and the new
2874 child's stop was reported to us by the kernel. Save
2875 its PID and go back to waiting for the fork event to
2876 be reported - the stopped process might be returned
2877 from waitpid before or after the fork event is. */
2878 linux_nat_debug_printf
2879 ("Saving LWP %d status %s in stopped_pids list",
2880 lwpid, status_to_str (status).c_str ());
2881 add_to_pid_list (&stopped_pids, lwpid, status);
2882 }
2883 }
2884 else
2885 {
2886 /* Don't report an event for the exit of an LWP not in our
2887 list, i.e. not part of any inferior we're debugging.
2888 This can happen if we detach from a program we originally
6cf20c46
PA
2889 forked and then it exits. However, note that we may have
2890 earlier deleted a leader of an inferior we're debugging,
2891 in check_zombie_leaders. Re-add it back here if so. */
2892 for (inferior *inf : all_inferiors (linux_target))
2893 {
2894 if (inf->pid == lwpid)
2895 {
2896 linux_nat_debug_printf
2897 ("Re-adding thread group leader LWP %d after exit.",
2898 lwpid);
2899
2900 lp = add_lwp (ptid_t (lwpid, lwpid));
2901 lp->resumed = 1;
2902 add_thread (linux_target, lp->ptid);
2903 break;
2904 }
2905 }
1abeb1e9 2906 }
0e5bf2a8 2907
1abeb1e9
PA
2908 if (lp == nullptr)
2909 return;
02f3fc28
PA
2910 }
2911
8817a6f2
PA
2912 /* This LWP is stopped now. (And if dead, this prevents it from
2913 ever being continued.) */
2914 lp->stopped = 1;
2915
8784d563
PA
2916 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2917 {
5b6d1e4f 2918 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
de0d863e 2919 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2920
e38504b3 2921 linux_enable_event_reporting (lp->ptid.lwp (), options);
8784d563
PA
2922 lp->must_set_ptrace_flags = 0;
2923 }
2924
ca2163eb
PA
2925 /* Handle GNU/Linux's syscall SIGTRAPs. */
2926 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2927 {
2928 /* No longer need the sysgood bit. The ptrace event ends up
2929 recorded in lp->waitstatus if we care for it. We can carry
2930 on handling the event like a regular SIGTRAP from here
2931 on. */
2932 status = W_STOPCODE (SIGTRAP);
2933 if (linux_handle_syscall_trap (lp, 0))
897608ed 2934 return;
ca2163eb 2935 }
bfd09d20
JS
2936 else
2937 {
2938 /* Almost all other ptrace-stops are known to be outside of system
2939 calls, with further exceptions in linux_handle_extended_wait. */
2940 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2941 }
02f3fc28 2942
ca2163eb 2943 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2944 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2945 && linux_is_extended_waitstatus (status))
02f3fc28 2946 {
9327494e
SM
2947 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
2948
4dd63d48 2949 if (linux_handle_extended_wait (lp, status))
897608ed 2950 return;
02f3fc28
PA
2951 }
2952
2953 /* Check if the thread has exited. */
9c02b525
PA
2954 if (WIFEXITED (status) || WIFSIGNALED (status))
2955 {
a51e14ef 2956 if (!report_exit_events_for (lp) && !is_leader (lp))
02f3fc28 2957 {
9327494e 2958 linux_nat_debug_printf ("%s exited.",
e53c95d4 2959 lp->ptid.to_string ().c_str ());
9c02b525 2960
6cf20c46 2961 /* If this was not the leader exiting, then the exit signal
4a6ed09b
PA
2962 was not the end of the debugged application and should be
2963 ignored. */
2964 exit_lwp (lp);
897608ed 2965 return;
02f3fc28
PA
2966 }
2967
77598427
PA
2968 /* Note that even if the leader was ptrace-stopped, it can still
2969 exit, if e.g., some other thread brings down the whole
2970 process (calls `exit'). So don't assert that the lwp is
2971 resumed. */
9327494e
SM
2972 linux_nat_debug_printf ("LWP %ld exited (resumed=%d)",
2973 lp->ptid.lwp (), lp->resumed);
02f3fc28 2974
9c02b525
PA
2975 /* Dead LWP's aren't expected to reported a pending sigstop. */
2976 lp->signalled = 0;
2977
2978 /* Store the pending event in the waitstatus, because
2979 W_EXITCODE(0,0) == 0. */
7509b829 2980 lp->waitstatus = host_status_to_waitstatus (status);
897608ed 2981 return;
02f3fc28
PA
2982 }
2983
02f3fc28
PA
2984 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2985 an attempt to stop an LWP. */
2986 if (lp->signalled
2987 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2988 {
02f3fc28
PA
2989 lp->signalled = 0;
2990
2bf6fb9d 2991 if (lp->last_resume_kind == resume_stop)
25289eb2 2992 {
9327494e 2993 linux_nat_debug_printf ("resume_stop SIGSTOP caught for %s.",
e53c95d4 2994 lp->ptid.to_string ().c_str ());
2bf6fb9d
PA
2995 }
2996 else
2997 {
2998 /* This is a delayed SIGSTOP. Filter out the event. */
02f3fc28 2999
9327494e
SM
3000 linux_nat_debug_printf
3001 ("%s %s, 0, 0 (discard delayed SIGSTOP)",
3002 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 3003 lp->ptid.to_string ().c_str ());
02f3fc28 3004
2bf6fb9d 3005 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
25289eb2 3006 gdb_assert (lp->resumed);
897608ed 3007 return;
25289eb2 3008 }
02f3fc28
PA
3009 }
3010
57380f4e
DJ
3011 /* Make sure we don't report a SIGINT that we have already displayed
3012 for another thread. */
3013 if (lp->ignore_sigint
3014 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3015 {
9327494e 3016 linux_nat_debug_printf ("Delayed SIGINT caught for %s.",
e53c95d4 3017 lp->ptid.to_string ().c_str ());
57380f4e
DJ
3018
3019 /* This is a delayed SIGINT. */
3020 lp->ignore_sigint = 0;
3021
8a99810d 3022 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
9327494e
SM
3023 linux_nat_debug_printf ("%s %s, 0, 0 (discard SIGINT)",
3024 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 3025 lp->ptid.to_string ().c_str ());
57380f4e
DJ
3026 gdb_assert (lp->resumed);
3027
3028 /* Discard the event. */
897608ed 3029 return;
57380f4e
DJ
3030 }
3031
9c02b525
PA
3032 /* Don't report signals that GDB isn't interested in, such as
3033 signals that are neither printed nor stopped upon. Stopping all
7da6a5b9 3034 threads can be a bit time-consuming, so if we want decent
9c02b525
PA
3035 performance with heavily multi-threaded programs, especially when
3036 they're using a high frequency timer, we'd better avoid it if we
3037 can. */
3038 if (WIFSTOPPED (status))
3039 {
3040 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3041
fbea99ea 3042 if (!target_is_non_stop_p ())
9c02b525
PA
3043 {
3044 /* Only do the below in all-stop, as we currently use SIGSTOP
3045 to implement target_stop (see linux_nat_stop) in
3046 non-stop. */
3047 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3048 {
3049 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3050 forwarded to the entire process group, that is, all LWPs
3051 will receive it - unless they're using CLONE_THREAD to
3052 share signals. Since we only want to report it once, we
3053 mark it as ignored for all LWPs except this one. */
d3a70e03 3054 iterate_over_lwps (ptid_t (lp->ptid.pid ()), set_ignore_sigint);
9c02b525
PA
3055 lp->ignore_sigint = 0;
3056 }
3057 else
3058 maybe_clear_ignore_sigint (lp);
3059 }
3060
3061 /* When using hardware single-step, we need to report every signal.
c9587f88 3062 Otherwise, signals in pass_mask may be short-circuited
d8c06f22
AB
3063 except signals that might be caused by a breakpoint, or SIGSTOP
3064 if we sent the SIGSTOP and are waiting for it to arrive. */
9c02b525 3065 if (!lp->step
c9587f88 3066 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
d8c06f22 3067 && (WSTOPSIG (status) != SIGSTOP
9213a6d7 3068 || !linux_target->find_thread (lp->ptid)->stop_requested)
c9587f88 3069 && !linux_wstatus_maybe_breakpoint (status))
9c02b525
PA
3070 {
3071 linux_resume_one_lwp (lp, lp->step, signo);
9327494e
SM
3072 linux_nat_debug_printf
3073 ("%s %s, %s (preempt 'handle')",
3074 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 3075 lp->ptid.to_string ().c_str (),
9327494e
SM
3076 (signo != GDB_SIGNAL_0
3077 ? strsignal (gdb_signal_to_host (signo)) : "0"));
897608ed 3078 return;
9c02b525
PA
3079 }
3080 }
3081
02f3fc28
PA
3082 /* An interesting event. */
3083 gdb_assert (lp);
ca2163eb 3084 lp->status = status;
e7ad2f14 3085 save_stop_reason (lp);
02f3fc28
PA
3086}
3087
0e5bf2a8
PA
3088/* Detect zombie thread group leaders, and "exit" them. We can't reap
3089 their exits until all other threads in the group have exited. */
3090
3091static void
3092check_zombie_leaders (void)
3093{
08036331 3094 for (inferior *inf : all_inferiors ())
0e5bf2a8
PA
3095 {
3096 struct lwp_info *leader_lp;
3097
3098 if (inf->pid == 0)
3099 continue;
3100
f2907e49 3101 leader_lp = find_lwp_pid (ptid_t (inf->pid));
0e5bf2a8
PA
3102 if (leader_lp != NULL
3103 /* Check if there are other threads in the group, as we may
6cf20c46
PA
3104 have raced with the inferior simply exiting. Note this
3105 isn't a watertight check. If the inferior is
3106 multi-threaded and is exiting, it may be we see the
3107 leader as zombie before we reap all the non-leader
3108 threads. See comments below. */
0e5bf2a8 3109 && num_lwps (inf->pid) > 1
5f572dec 3110 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8 3111 {
6cf20c46
PA
3112 /* A zombie leader in a multi-threaded program can mean one
3113 of three things:
3114
3115 #1 - Only the leader exited, not the whole program, e.g.,
3116 with pthread_exit. Since we can't reap the leader's exit
3117 status until all other threads are gone and reaped too,
3118 we want to delete the zombie leader right away, as it
3119 can't be debugged, we can't read its registers, etc.
3120 This is the main reason we check for zombie leaders
3121 disappearing.
3122
3123 #2 - The whole thread-group/process exited (a group exit,
3124 via e.g. exit(3), and there is (or will be shortly) an
3125 exit reported for each thread in the process, and then
3126 finally an exit for the leader once the non-leaders are
3127 reaped.
3128
3129 #3 - There are 3 or more threads in the group, and a
3130 thread other than the leader exec'd. See comments on
3131 exec events at the top of the file.
3132
3133 Ideally we would never delete the leader for case #2.
3134 Instead, we want to collect the exit status of each
3135 non-leader thread, and then finally collect the exit
3136 status of the leader as normal and use its exit code as
3137 whole-process exit code. Unfortunately, there's no
3138 race-free way to distinguish cases #1 and #2. We can't
3139 assume the exit events for the non-leaders threads are
3140 already pending in the kernel, nor can we assume the
3141 non-leader threads are in zombie state already. Between
3142 the leader becoming zombie and the non-leaders exiting
3143 and becoming zombie themselves, there's a small time
3144 window, so such a check would be racy. Temporarily
3145 pausing all threads and checking to see if all threads
3146 exit or not before re-resuming them would work in the
3147 case that all threads are running right now, but it
3148 wouldn't work if some thread is currently already
3149 ptrace-stopped, e.g., due to scheduler-locking.
3150
3151 So what we do is we delete the leader anyhow, and then
3152 later on when we see its exit status, we re-add it back.
3153 We also make sure that we only report a whole-process
3154 exit when we see the leader exiting, as opposed to when
3155 the last LWP in the LWP list exits, which can be a
3156 non-leader if we deleted the leader here. */
9327494e 3157 linux_nat_debug_printf ("Thread group leader %d zombie "
6cf20c46
PA
3158 "(it exited, or another thread execd), "
3159 "deleting it.",
9327494e 3160 inf->pid);
0e5bf2a8
PA
3161 exit_lwp (leader_lp);
3162 }
3163 }
3164}
3165
a51e14ef
PA
3166/* Convenience function that is called when we're about to return an
3167 event to the core. If the event is an exit or signalled event,
3168 then this decides whether to report it as process-wide event, as a
3169 thread exit event, or to suppress it. All other event kinds are
3170 passed through unmodified. */
aa01bd36
PA
3171
3172static ptid_t
3173filter_exit_event (struct lwp_info *event_child,
3174 struct target_waitstatus *ourstatus)
3175{
3176 ptid_t ptid = event_child->ptid;
3177
a51e14ef
PA
3178 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
3179 if a non-leader thread exits with a signal, we'd report it to the
3180 core which would interpret it as the whole-process exiting.
3181 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
3182 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
3183 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
3184 return ptid;
3185
6cf20c46 3186 if (!is_leader (event_child))
aa01bd36 3187 {
a51e14ef 3188 if (report_exit_events_for (event_child))
7730e5c6
PA
3189 {
3190 ourstatus->set_thread_exited (0);
3191 /* Delete lwp, but not thread_info, infrun will need it to
3192 process the event. */
3193 exit_lwp (event_child, false);
3194 }
aa01bd36 3195 else
7730e5c6
PA
3196 {
3197 ourstatus->set_ignore ();
3198 exit_lwp (event_child);
3199 }
aa01bd36
PA
3200 }
3201
3202 return ptid;
3203}
3204
d6b0e80f 3205static ptid_t
f6ac5f3d 3206linux_nat_wait_1 (ptid_t ptid, struct target_waitstatus *ourstatus,
b60cea74 3207 target_wait_flags target_options)
d6b0e80f 3208{
b26b06dd
AB
3209 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3210
fc9b8e47 3211 sigset_t prev_mask;
4b60df3d 3212 enum resume_kind last_resume_kind;
12d9289a 3213 struct lwp_info *lp;
12d9289a 3214 int status;
d6b0e80f 3215
f973ed9c
DJ
3216 /* The first time we get here after starting a new inferior, we may
3217 not have added it to the LWP list yet - this is the earliest
3218 moment at which we know its PID. */
677c92fe 3219 if (ptid.is_pid () && find_lwp_pid (ptid) == nullptr)
f973ed9c 3220 {
677c92fe 3221 ptid_t lwp_ptid (ptid.pid (), ptid.pid ());
27c9d204 3222
677c92fe
SM
3223 /* Upgrade the main thread's ptid. */
3224 thread_change_ptid (linux_target, ptid, lwp_ptid);
3225 lp = add_initial_lwp (lwp_ptid);
f973ed9c
DJ
3226 lp->resumed = 1;
3227 }
3228
12696c10 3229 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
7feb7d06 3230 block_child_signals (&prev_mask);
d6b0e80f 3231
d6b0e80f 3232 /* First check if there is a LWP with a wait status pending. */
d3a70e03 3233 lp = iterate_over_lwps (ptid, status_callback);
8a99810d 3234 if (lp != NULL)
d6b0e80f 3235 {
9327494e 3236 linux_nat_debug_printf ("Using pending wait status %s for %s.",
57573e54 3237 pending_status_str (lp).c_str (),
e53c95d4 3238 lp->ptid.to_string ().c_str ());
d6b0e80f
AC
3239 }
3240
9c02b525
PA
3241 /* But if we don't find a pending event, we'll have to wait. Always
3242 pull all events out of the kernel. We'll randomly select an
3243 event LWP out of all that have events, to prevent starvation. */
7feb7d06 3244
d90e17a7 3245 while (lp == NULL)
d6b0e80f
AC
3246 {
3247 pid_t lwpid;
3248
0e5bf2a8
PA
3249 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3250 quirks:
3251
3252 - If the thread group leader exits while other threads in the
3253 thread group still exist, waitpid(TGID, ...) hangs. That
3254 waitpid won't return an exit status until the other threads
85102364 3255 in the group are reaped.
0e5bf2a8
PA
3256
3257 - When a non-leader thread execs, that thread just vanishes
3258 without reporting an exit (so we'd hang if we waited for it
3259 explicitly in that case). The exec event is reported to
3260 the TGID pid. */
3261
3262 errno = 0;
4a6ed09b 3263 lwpid = my_waitpid (-1, &status, __WALL | WNOHANG);
0e5bf2a8 3264
9327494e
SM
3265 linux_nat_debug_printf ("waitpid(-1, ...) returned %d, %s",
3266 lwpid,
3267 errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3268
d6b0e80f
AC
3269 if (lwpid > 0)
3270 {
9327494e 3271 linux_nat_debug_printf ("waitpid %ld received %s",
8d06918f
SM
3272 (long) lwpid,
3273 status_to_str (status).c_str ());
d6b0e80f 3274
9c02b525 3275 linux_nat_filter_event (lwpid, status);
0e5bf2a8
PA
3276 /* Retry until nothing comes out of waitpid. A single
3277 SIGCHLD can indicate more than one child stopped. */
3278 continue;
d6b0e80f
AC
3279 }
3280
20ba1ce6
PA
3281 /* Now that we've pulled all events out of the kernel, resume
3282 LWPs that don't have an interesting event to report. */
3283 iterate_over_lwps (minus_one_ptid,
d3a70e03
TT
3284 [] (struct lwp_info *info)
3285 {
3286 return resume_stopped_resumed_lwps (info, minus_one_ptid);
3287 });
20ba1ce6
PA
3288
3289 /* ... and find an LWP with a status to report to the core, if
3290 any. */
d3a70e03 3291 lp = iterate_over_lwps (ptid, status_callback);
9c02b525
PA
3292 if (lp != NULL)
3293 break;
3294
0e5bf2a8
PA
3295 /* Check for zombie thread group leaders. Those can't be reaped
3296 until all other threads in the thread group are. */
3297 check_zombie_leaders ();
d6b0e80f 3298
0e5bf2a8
PA
3299 /* If there are no resumed children left, bail. We'd be stuck
3300 forever in the sigsuspend call below otherwise. */
d3a70e03 3301 if (iterate_over_lwps (ptid, resumed_callback) == NULL)
0e5bf2a8 3302 {
9327494e 3303 linux_nat_debug_printf ("exit (no resumed LWP)");
b84876c2 3304
183be222 3305 ourstatus->set_no_resumed ();
b84876c2 3306
0e5bf2a8
PA
3307 restore_child_signals_mask (&prev_mask);
3308 return minus_one_ptid;
d6b0e80f 3309 }
28736962 3310
0e5bf2a8
PA
3311 /* No interesting event to report to the core. */
3312
3313 if (target_options & TARGET_WNOHANG)
3314 {
b26b06dd 3315 linux_nat_debug_printf ("no interesting events found");
28736962 3316
183be222 3317 ourstatus->set_ignore ();
28736962
PA
3318 restore_child_signals_mask (&prev_mask);
3319 return minus_one_ptid;
3320 }
d6b0e80f
AC
3321
3322 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3323 gdb_assert (lp == NULL);
0e5bf2a8
PA
3324
3325 /* Block until we get an event reported with SIGCHLD. */
9c3a5d93 3326 wait_for_signal ();
d6b0e80f
AC
3327 }
3328
d6b0e80f
AC
3329 gdb_assert (lp);
3330
ca2163eb
PA
3331 status = lp->status;
3332 lp->status = 0;
3333
fbea99ea 3334 if (!target_is_non_stop_p ())
4c28f408
PA
3335 {
3336 /* Now stop all other LWP's ... */
d3a70e03 3337 iterate_over_lwps (minus_one_ptid, stop_callback);
4c28f408
PA
3338
3339 /* ... and wait until all of them have reported back that
3340 they're no longer running. */
d3a70e03 3341 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
9c02b525
PA
3342 }
3343
3344 /* If we're not waiting for a specific LWP, choose an event LWP from
3345 among those that have had events. Giving equal priority to all
3346 LWPs that have had events helps prevent starvation. */
d7e15655 3347 if (ptid == minus_one_ptid || ptid.is_pid ())
9c02b525
PA
3348 select_event_lwp (ptid, &lp, &status);
3349
3350 gdb_assert (lp != NULL);
3351
3352 /* Now that we've selected our final event LWP, un-adjust its PC if
faf09f01
PA
3353 it was a software breakpoint, and we can't reliably support the
3354 "stopped by software breakpoint" stop reason. */
3355 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3356 && !USE_SIGTRAP_SIGINFO)
9c02b525 3357 {
5b6d1e4f 3358 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 3359 struct gdbarch *gdbarch = regcache->arch ();
527a273a 3360 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4c28f408 3361
9c02b525
PA
3362 if (decr_pc != 0)
3363 {
3364 CORE_ADDR pc;
d6b0e80f 3365
9c02b525
PA
3366 pc = regcache_read_pc (regcache);
3367 regcache_write_pc (regcache, pc + decr_pc);
3368 }
3369 }
e3e9f5a2 3370
9c02b525
PA
3371 /* We'll need this to determine whether to report a SIGSTOP as
3372 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3373 clears it. */
3374 last_resume_kind = lp->last_resume_kind;
4b60df3d 3375
fbea99ea 3376 if (!target_is_non_stop_p ())
9c02b525 3377 {
e3e9f5a2
PA
3378 /* In all-stop, from the core's perspective, all LWPs are now
3379 stopped until a new resume action is sent over. */
d3a70e03 3380 iterate_over_lwps (minus_one_ptid, resume_clear_callback);
e3e9f5a2
PA
3381 }
3382 else
25289eb2 3383 {
d3a70e03 3384 resume_clear_callback (lp);
25289eb2 3385 }
d6b0e80f 3386
135340af 3387 if (linux_target->low_status_is_event (status))
d6b0e80f 3388 {
9327494e 3389 linux_nat_debug_printf ("trap ptid is %s.",
e53c95d4 3390 lp->ptid.to_string ().c_str ());
d6b0e80f 3391 }
d6b0e80f 3392
183be222 3393 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
d6b0e80f
AC
3394 {
3395 *ourstatus = lp->waitstatus;
183be222 3396 lp->waitstatus.set_ignore ();
d6b0e80f
AC
3397 }
3398 else
7509b829 3399 *ourstatus = host_status_to_waitstatus (status);
d6b0e80f 3400
b26b06dd 3401 linux_nat_debug_printf ("event found");
b84876c2 3402
7feb7d06 3403 restore_child_signals_mask (&prev_mask);
1e225492 3404
4b60df3d 3405 if (last_resume_kind == resume_stop
183be222 3406 && ourstatus->kind () == TARGET_WAITKIND_STOPPED
25289eb2
PA
3407 && WSTOPSIG (status) == SIGSTOP)
3408 {
3409 /* A thread that has been requested to stop by GDB with
3410 target_stop, and it stopped cleanly, so report as SIG0. The
3411 use of SIGSTOP is an implementation detail. */
183be222 3412 ourstatus->set_stopped (GDB_SIGNAL_0);
25289eb2
PA
3413 }
3414
183be222
SM
3415 if (ourstatus->kind () == TARGET_WAITKIND_EXITED
3416 || ourstatus->kind () == TARGET_WAITKIND_SIGNALLED)
1e225492
JK
3417 lp->core = -1;
3418 else
2e794194 3419 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3420
a51e14ef 3421 return filter_exit_event (lp, ourstatus);
d6b0e80f
AC
3422}
3423
e3e9f5a2
PA
3424/* Resume LWPs that are currently stopped without any pending status
3425 to report, but are resumed from the core's perspective. */
3426
3427static int
d3a70e03 3428resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid)
e3e9f5a2 3429{
74387712 3430 inferior *inf = lwp_inferior (lp);
14ec4172 3431
8a9da63e 3432 if (!lp->stopped)
4dd63d48 3433 {
9327494e 3434 linux_nat_debug_printf ("NOT resuming LWP %s, not stopped",
e53c95d4 3435 lp->ptid.to_string ().c_str ());
4dd63d48
PA
3436 }
3437 else if (!lp->resumed)
3438 {
9327494e 3439 linux_nat_debug_printf ("NOT resuming LWP %s, not resumed",
e53c95d4 3440 lp->ptid.to_string ().c_str ());
4dd63d48
PA
3441 }
3442 else if (lwp_status_pending_p (lp))
3443 {
9327494e 3444 linux_nat_debug_printf ("NOT resuming LWP %s, has pending status",
e53c95d4 3445 lp->ptid.to_string ().c_str ());
4dd63d48 3446 }
8a9da63e
AB
3447 else if (inf->vfork_child != nullptr)
3448 {
3449 linux_nat_debug_printf ("NOT resuming LWP %s (vfork parent)",
3450 lp->ptid.to_string ().c_str ());
3451 }
4dd63d48 3452 else
e3e9f5a2 3453 {
5b6d1e4f 3454 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 3455 struct gdbarch *gdbarch = regcache->arch ();
336060f3 3456
a70b8144 3457 try
e3e9f5a2 3458 {
23f238d3
PA
3459 CORE_ADDR pc = regcache_read_pc (regcache);
3460 int leave_stopped = 0;
e3e9f5a2 3461
23f238d3
PA
3462 /* Don't bother if there's a breakpoint at PC that we'd hit
3463 immediately, and we're not waiting for this LWP. */
d3a70e03 3464 if (!lp->ptid.matches (wait_ptid))
23f238d3 3465 {
f9582a22 3466 if (breakpoint_inserted_here_p (inf->aspace.get (), pc))
23f238d3
PA
3467 leave_stopped = 1;
3468 }
e3e9f5a2 3469
23f238d3
PA
3470 if (!leave_stopped)
3471 {
9327494e
SM
3472 linux_nat_debug_printf
3473 ("resuming stopped-resumed LWP %s at %s: step=%d",
e53c95d4 3474 lp->ptid.to_string ().c_str (), paddress (gdbarch, pc),
9327494e 3475 lp->step);
23f238d3
PA
3476
3477 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3478 }
3479 }
230d2906 3480 catch (const gdb_exception_error &ex)
23f238d3
PA
3481 {
3482 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 3483 throw;
23f238d3 3484 }
e3e9f5a2
PA
3485 }
3486
3487 return 0;
3488}
3489
f6ac5f3d
PA
3490ptid_t
3491linux_nat_target::wait (ptid_t ptid, struct target_waitstatus *ourstatus,
b60cea74 3492 target_wait_flags target_options)
7feb7d06 3493{
b26b06dd
AB
3494 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3495
7feb7d06
PA
3496 ptid_t event_ptid;
3497
e53c95d4 3498 linux_nat_debug_printf ("[%s], [%s]", ptid.to_string ().c_str (),
9327494e 3499 target_options_to_string (target_options).c_str ());
7feb7d06
PA
3500
3501 /* Flush the async file first. */
d9d41e78 3502 if (target_is_async_p ())
7feb7d06
PA
3503 async_file_flush ();
3504
e3e9f5a2
PA
3505 /* Resume LWPs that are currently stopped without any pending status
3506 to report, but are resumed from the core's perspective. LWPs get
3507 in this state if we find them stopping at a time we're not
3508 interested in reporting the event (target_wait on a
3509 specific_process, for example, see linux_nat_wait_1), and
3510 meanwhile the event became uninteresting. Don't bother resuming
3511 LWPs we're not going to wait for if they'd stop immediately. */
fbea99ea 3512 if (target_is_non_stop_p ())
d3a70e03
TT
3513 iterate_over_lwps (minus_one_ptid,
3514 [=] (struct lwp_info *info)
3515 {
3516 return resume_stopped_resumed_lwps (info, ptid);
3517 });
e3e9f5a2 3518
f6ac5f3d 3519 event_ptid = linux_nat_wait_1 (ptid, ourstatus, target_options);
7feb7d06
PA
3520
3521 /* If we requested any event, and something came out, assume there
3522 may be more. If we requested a specific lwp or process, also
3523 assume there may be more. */
d9d41e78 3524 if (target_is_async_p ()
183be222
SM
3525 && ((ourstatus->kind () != TARGET_WAITKIND_IGNORE
3526 && ourstatus->kind () != TARGET_WAITKIND_NO_RESUMED)
d7e15655 3527 || ptid != minus_one_ptid))
7feb7d06
PA
3528 async_file_mark ();
3529
7feb7d06
PA
3530 return event_ptid;
3531}
3532
1d2736d4
PA
3533/* Kill one LWP. */
3534
3535static void
3536kill_one_lwp (pid_t pid)
d6b0e80f 3537{
ed731959
JK
3538 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3539
3540 errno = 0;
1d2736d4 3541 kill_lwp (pid, SIGKILL);
9327494e 3542
ed731959 3543 if (debug_linux_nat)
57745c90
PA
3544 {
3545 int save_errno = errno;
3546
9327494e
SM
3547 linux_nat_debug_printf
3548 ("kill (SIGKILL) %ld, 0, 0 (%s)", (long) pid,
3549 save_errno != 0 ? safe_strerror (save_errno) : "OK");
57745c90 3550 }
ed731959
JK
3551
3552 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3553
d6b0e80f 3554 errno = 0;
1d2736d4 3555 ptrace (PTRACE_KILL, pid, 0, 0);
d6b0e80f 3556 if (debug_linux_nat)
57745c90
PA
3557 {
3558 int save_errno = errno;
3559
9327494e
SM
3560 linux_nat_debug_printf
3561 ("PTRACE_KILL %ld, 0, 0 (%s)", (long) pid,
3562 save_errno ? safe_strerror (save_errno) : "OK");
57745c90 3563 }
d6b0e80f
AC
3564}
3565
1d2736d4
PA
3566/* Wait for an LWP to die. */
3567
3568static void
3569kill_wait_one_lwp (pid_t pid)
d6b0e80f 3570{
1d2736d4 3571 pid_t res;
d6b0e80f
AC
3572
3573 /* We must make sure that there are no pending events (delayed
3574 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3575 program doesn't interfere with any following debugging session. */
3576
d6b0e80f
AC
3577 do
3578 {
1d2736d4
PA
3579 res = my_waitpid (pid, NULL, __WALL);
3580 if (res != (pid_t) -1)
d6b0e80f 3581 {
9327494e
SM
3582 linux_nat_debug_printf ("wait %ld received unknown.", (long) pid);
3583
4a6ed09b
PA
3584 /* The Linux kernel sometimes fails to kill a thread
3585 completely after PTRACE_KILL; that goes from the stop
3586 point in do_fork out to the one in get_signal_to_deliver
3587 and waits again. So kill it again. */
1d2736d4 3588 kill_one_lwp (pid);
d6b0e80f
AC
3589 }
3590 }
1d2736d4
PA
3591 while (res == pid);
3592
3593 gdb_assert (res == -1 && errno == ECHILD);
3594}
3595
3596/* Callback for iterate_over_lwps. */
d6b0e80f 3597
1d2736d4 3598static int
d3a70e03 3599kill_callback (struct lwp_info *lp)
1d2736d4 3600{
e38504b3 3601 kill_one_lwp (lp->ptid.lwp ());
d6b0e80f
AC
3602 return 0;
3603}
3604
1d2736d4
PA
3605/* Callback for iterate_over_lwps. */
3606
3607static int
d3a70e03 3608kill_wait_callback (struct lwp_info *lp)
1d2736d4 3609{
e38504b3 3610 kill_wait_one_lwp (lp->ptid.lwp ());
1d2736d4
PA
3611 return 0;
3612}
3613
0d36baa9 3614/* Kill the fork/clone child of LP if it has an unfollowed child. */
1d2736d4 3615
0d36baa9
PA
3616static int
3617kill_unfollowed_child_callback (lwp_info *lp)
1d2736d4 3618{
6b09f134 3619 std::optional<target_waitstatus> ws = get_pending_child_status (lp);
0d36baa9 3620 if (ws.has_value ())
08036331 3621 {
0d36baa9
PA
3622 ptid_t child_ptid = ws->child_ptid ();
3623 int child_pid = child_ptid.pid ();
3624 int child_lwp = child_ptid.lwp ();
08036331 3625
0d36baa9
PA
3626 kill_one_lwp (child_lwp);
3627 kill_wait_one_lwp (child_lwp);
08036331 3628
0d36baa9
PA
3629 /* Let the arch-specific native code know this process is
3630 gone. */
3631 if (ws->kind () != TARGET_WAITKIND_THREAD_CLONED)
3632 linux_target->low_forget_process (child_pid);
08036331 3633 }
0d36baa9
PA
3634
3635 return 0;
1d2736d4
PA
3636}
3637
f6ac5f3d
PA
3638void
3639linux_nat_target::kill ()
d6b0e80f 3640{
0d36baa9
PA
3641 ptid_t pid_ptid (inferior_ptid.pid ());
3642
3643 /* If we're stopped while forking/cloning and we haven't followed
3644 yet, kill the child task. We need to do this first because the
f973ed9c 3645 parent will be sleeping if this is a vfork. */
0d36baa9 3646 iterate_over_lwps (pid_ptid, kill_unfollowed_child_callback);
f973ed9c
DJ
3647
3648 if (forks_exist_p ())
7feb7d06 3649 linux_fork_killall ();
f973ed9c
DJ
3650 else
3651 {
4c28f408 3652 /* Stop all threads before killing them, since ptrace requires
30baf67b 3653 that the thread is stopped to successfully PTRACE_KILL. */
0d36baa9 3654 iterate_over_lwps (pid_ptid, stop_callback);
4c28f408
PA
3655 /* ... and wait until all of them have reported back that
3656 they're no longer running. */
0d36baa9 3657 iterate_over_lwps (pid_ptid, stop_wait_callback);
4c28f408 3658
f973ed9c 3659 /* Kill all LWP's ... */
0d36baa9 3660 iterate_over_lwps (pid_ptid, kill_callback);
f973ed9c
DJ
3661
3662 /* ... and wait until we've flushed all events. */
0d36baa9 3663 iterate_over_lwps (pid_ptid, kill_wait_callback);
f973ed9c
DJ
3664 }
3665
bc1e6c81 3666 target_mourn_inferior (inferior_ptid);
d6b0e80f
AC
3667}
3668
f6ac5f3d
PA
3669void
3670linux_nat_target::mourn_inferior ()
d6b0e80f 3671{
b26b06dd
AB
3672 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3673
e99b03dc 3674 int pid = inferior_ptid.pid ();
26cb8b7c
PA
3675
3676 purge_lwp_list (pid);
d6b0e80f 3677
8a89ddbd 3678 close_proc_mem_file (pid);
05c06f31 3679
f973ed9c 3680 if (! forks_exist_p ())
d90e17a7 3681 /* Normal case, no other forks available. */
f6ac5f3d 3682 inf_ptrace_target::mourn_inferior ();
f973ed9c
DJ
3683 else
3684 /* Multi-fork case. The current inferior_ptid has exited, but
3685 there are other viable forks to debug. Delete the exiting
3686 one and context-switch to the first available. */
3687 linux_fork_mourn_inferior ();
26cb8b7c
PA
3688
3689 /* Let the arch-specific native code know this process is gone. */
135340af 3690 linux_target->low_forget_process (pid);
d6b0e80f
AC
3691}
3692
5b009018
PA
3693/* Convert a native/host siginfo object, into/from the siginfo in the
3694 layout of the inferiors' architecture. */
3695
3696static void
a5362b9a 3697siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018 3698{
135340af
PA
3699 /* If the low target didn't do anything, then just do a straight
3700 memcpy. */
3701 if (!linux_target->low_siginfo_fixup (siginfo, inf_siginfo, direction))
5b009018
PA
3702 {
3703 if (direction == 1)
a5362b9a 3704 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 3705 else
a5362b9a 3706 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
3707 }
3708}
3709
9b409511 3710static enum target_xfer_status
7154e786 3711linux_xfer_siginfo (ptid_t ptid, enum target_object object,
dda83cd7 3712 const char *annex, gdb_byte *readbuf,
9b409511
YQ
3713 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3714 ULONGEST *xfered_len)
4aa995e1 3715{
a5362b9a
TS
3716 siginfo_t siginfo;
3717 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
3718
3719 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3720 gdb_assert (readbuf || writebuf);
3721
4aa995e1 3722 if (offset > sizeof (siginfo))
2ed4b548 3723 return TARGET_XFER_E_IO;
4aa995e1 3724
7154e786 3725 if (!linux_nat_get_siginfo (ptid, &siginfo))
2ed4b548 3726 return TARGET_XFER_E_IO;
4aa995e1 3727
5b009018
PA
3728 /* When GDB is built as a 64-bit application, ptrace writes into
3729 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3730 inferior with a 64-bit GDB should look the same as debugging it
3731 with a 32-bit GDB, we need to convert it. GDB core always sees
3732 the converted layout, so any read/write will have to be done
3733 post-conversion. */
3734 siginfo_fixup (&siginfo, inf_siginfo, 0);
3735
4aa995e1
PA
3736 if (offset + len > sizeof (siginfo))
3737 len = sizeof (siginfo) - offset;
3738
3739 if (readbuf != NULL)
5b009018 3740 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3741 else
3742 {
5b009018
PA
3743 memcpy (inf_siginfo + offset, writebuf, len);
3744
3745 /* Convert back to ptrace layout before flushing it out. */
3746 siginfo_fixup (&siginfo, inf_siginfo, 1);
3747
7154e786 3748 int pid = get_ptrace_pid (ptid);
4aa995e1
PA
3749 errno = 0;
3750 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3751 if (errno != 0)
2ed4b548 3752 return TARGET_XFER_E_IO;
4aa995e1
PA
3753 }
3754
9b409511
YQ
3755 *xfered_len = len;
3756 return TARGET_XFER_OK;
4aa995e1
PA
3757}
3758
9b409511 3759static enum target_xfer_status
f6ac5f3d
PA
3760linux_nat_xfer_osdata (enum target_object object,
3761 const char *annex, gdb_byte *readbuf,
3762 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3763 ULONGEST *xfered_len);
3764
f6ac5f3d 3765static enum target_xfer_status
f9f593dd
SM
3766linux_proc_xfer_memory_partial (int pid, gdb_byte *readbuf,
3767 const gdb_byte *writebuf, ULONGEST offset,
3768 LONGEST len, ULONGEST *xfered_len);
f6ac5f3d
PA
3769
3770enum target_xfer_status
3771linux_nat_target::xfer_partial (enum target_object object,
3772 const char *annex, gdb_byte *readbuf,
3773 const gdb_byte *writebuf,
3774 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
d6b0e80f 3775{
4aa995e1 3776 if (object == TARGET_OBJECT_SIGNAL_INFO)
7154e786 3777 return linux_xfer_siginfo (inferior_ptid, object, annex, readbuf, writebuf,
9b409511 3778 offset, len, xfered_len);
4aa995e1 3779
c35b1492
PA
3780 /* The target is connected but no live inferior is selected. Pass
3781 this request down to a lower stratum (e.g., the executable
3782 file). */
d7e15655 3783 if (object == TARGET_OBJECT_MEMORY && inferior_ptid == null_ptid)
9b409511 3784 return TARGET_XFER_EOF;
c35b1492 3785
f6ac5f3d
PA
3786 if (object == TARGET_OBJECT_AUXV)
3787 return memory_xfer_auxv (this, object, annex, readbuf, writebuf,
3788 offset, len, xfered_len);
3789
3790 if (object == TARGET_OBJECT_OSDATA)
3791 return linux_nat_xfer_osdata (object, annex, readbuf, writebuf,
3792 offset, len, xfered_len);
d6b0e80f 3793
f6ac5f3d
PA
3794 if (object == TARGET_OBJECT_MEMORY)
3795 {
05c06f31
PA
3796 /* GDB calculates all addresses in the largest possible address
3797 width. The address width must be masked before its final use
3798 by linux_proc_xfer_partial.
3799
3800 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
99d9c3b9 3801 int addr_bit = gdbarch_addr_bit (current_inferior ()->arch ());
f6ac5f3d
PA
3802
3803 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
3804 offset &= ((ULONGEST) 1 << addr_bit) - 1;
f6ac5f3d 3805
dd09fe0d
KS
3806 /* If /proc/pid/mem is writable, don't fallback to ptrace. If
3807 the write via /proc/pid/mem fails because the inferior execed
3808 (and we haven't seen the exec event yet), a subsequent ptrace
3809 poke would incorrectly write memory to the post-exec address
3810 space, while the core was trying to write to the pre-exec
3811 address space. */
3812 if (proc_mem_file_is_writable ())
f9f593dd
SM
3813 return linux_proc_xfer_memory_partial (inferior_ptid.pid (), readbuf,
3814 writebuf, offset, len,
3815 xfered_len);
05c06f31 3816 }
f6ac5f3d
PA
3817
3818 return inf_ptrace_target::xfer_partial (object, annex, readbuf, writebuf,
3819 offset, len, xfered_len);
d6b0e80f
AC
3820}
3821
57810aa7 3822bool
f6ac5f3d 3823linux_nat_target::thread_alive (ptid_t ptid)
28439f5e 3824{
4a6ed09b
PA
3825 /* As long as a PTID is in lwp list, consider it alive. */
3826 return find_lwp_pid (ptid) != NULL;
28439f5e
PA
3827}
3828
8a06aea7
PA
3829/* Implement the to_update_thread_list target method for this
3830 target. */
3831
f6ac5f3d
PA
3832void
3833linux_nat_target::update_thread_list ()
8a06aea7 3834{
4a6ed09b
PA
3835 /* We add/delete threads from the list as clone/exit events are
3836 processed, so just try deleting exited threads still in the
3837 thread list. */
3838 delete_exited_threads ();
a6904d5a
PA
3839
3840 /* Update the processor core that each lwp/thread was last seen
3841 running on. */
901b9821 3842 for (lwp_info *lwp : all_lwps ())
1ad3de98
PA
3843 {
3844 /* Avoid accessing /proc if the thread hasn't run since we last
3845 time we fetched the thread's core. Accessing /proc becomes
3846 noticeably expensive when we have thousands of LWPs. */
3847 if (lwp->core == -1)
3848 lwp->core = linux_common_core_of_thread (lwp->ptid);
3849 }
8a06aea7
PA
3850}
3851
a068643d 3852std::string
f6ac5f3d 3853linux_nat_target::pid_to_str (ptid_t ptid)
d6b0e80f 3854{
15a9e13e 3855 if (ptid.lwp_p ()
e38504b3 3856 && (ptid.pid () != ptid.lwp ()
e99b03dc 3857 || num_lwps (ptid.pid ()) > 1))
a068643d 3858 return string_printf ("LWP %ld", ptid.lwp ());
d6b0e80f
AC
3859
3860 return normal_pid_to_str (ptid);
3861}
3862
f6ac5f3d
PA
3863const char *
3864linux_nat_target::thread_name (struct thread_info *thr)
4694da01 3865{
79efa585 3866 return linux_proc_tid_get_name (thr->ptid);
4694da01
TT
3867}
3868
dba24537
AC
3869/* Accepts an integer PID; Returns a string representing a file that
3870 can be opened to get the symbols for the child process. */
3871
0e90c441 3872const char *
f6ac5f3d 3873linux_nat_target::pid_to_exec_file (int pid)
dba24537 3874{
e0d86d2c 3875 return linux_proc_pid_to_exec_file (pid);
dba24537
AC
3876}
3877
8a89ddbd
PA
3878/* Object representing an /proc/PID/mem open file. We keep one such
3879 file open per inferior.
3880
3881 It might be tempting to think about only ever opening one file at
3882 most for all inferiors, closing/reopening the file as we access
3883 memory of different inferiors, to minimize number of file
3884 descriptors open, which can otherwise run into resource limits.
3885 However, that does not work correctly -- if the inferior execs and
3886 we haven't processed the exec event yet, and, we opened a
3887 /proc/PID/mem file, we will get a mem file accessing the post-exec
3888 address space, thinking we're opening it for the pre-exec address
3889 space. That is dangerous as we can poke memory (e.g. clearing
3890 breakpoints) in the post-exec memory by mistake, corrupting the
3891 inferior. For that reason, we open the mem file as early as
3892 possible, right after spawning, forking or attaching to the
3893 inferior, when the inferior is stopped and thus before it has a
3894 chance of execing.
3895
3896 Note that after opening the file, even if the thread we opened it
3897 for subsequently exits, the open file is still usable for accessing
3898 memory. It's only when the whole process exits or execs that the
3899 file becomes invalid, at which point reads/writes return EOF. */
3900
3901class proc_mem_file
3902{
3903public:
3904 proc_mem_file (ptid_t ptid, int fd)
3905 : m_ptid (ptid), m_fd (fd)
3906 {
3907 gdb_assert (m_fd != -1);
3908 }
05c06f31 3909
8a89ddbd 3910 ~proc_mem_file ()
05c06f31 3911 {
89662f69 3912 linux_nat_debug_printf ("closing fd %d for /proc/%d/task/%ld/mem",
8a89ddbd
PA
3913 m_fd, m_ptid.pid (), m_ptid.lwp ());
3914 close (m_fd);
05c06f31 3915 }
05c06f31 3916
8a89ddbd
PA
3917 DISABLE_COPY_AND_ASSIGN (proc_mem_file);
3918
3919 int fd ()
3920 {
3921 return m_fd;
3922 }
3923
3924private:
3925 /* The LWP this file was opened for. Just for debugging
3926 purposes. */
3927 ptid_t m_ptid;
3928
3929 /* The file descriptor. */
3930 int m_fd = -1;
3931};
3932
3933/* The map between an inferior process id, and the open /proc/PID/mem
3934 file. This is stored in a map instead of in a per-inferior
3935 structure because we need to be able to access memory of processes
3936 which don't have a corresponding struct inferior object. E.g.,
3937 with "detach-on-fork on" (the default), and "follow-fork parent"
3938 (also default), we don't create an inferior for the fork child, but
3939 we still need to remove breakpoints from the fork child's
3940 memory. */
3941static std::unordered_map<int, proc_mem_file> proc_mem_file_map;
3942
3943/* Close the /proc/PID/mem file for PID. */
05c06f31
PA
3944
3945static void
8a89ddbd 3946close_proc_mem_file (pid_t pid)
dba24537 3947{
8a89ddbd 3948 proc_mem_file_map.erase (pid);
05c06f31 3949}
dba24537 3950
8a89ddbd
PA
3951/* Open the /proc/PID/mem file for the process (thread group) of PTID.
3952 We actually open /proc/PID/task/LWP/mem, as that's the LWP we know
3953 exists and is stopped right now. We prefer the
3954 /proc/PID/task/LWP/mem form over /proc/LWP/mem to avoid tid-reuse
3955 races, just in case this is ever called on an already-waited
3956 LWP. */
dba24537 3957
8a89ddbd
PA
3958static void
3959open_proc_mem_file (ptid_t ptid)
05c06f31 3960{
8a89ddbd
PA
3961 auto iter = proc_mem_file_map.find (ptid.pid ());
3962 gdb_assert (iter == proc_mem_file_map.end ());
dba24537 3963
8a89ddbd
PA
3964 char filename[64];
3965 xsnprintf (filename, sizeof filename,
3966 "/proc/%d/task/%ld/mem", ptid.pid (), ptid.lwp ());
3967
3968 int fd = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
05c06f31 3969
8a89ddbd
PA
3970 if (fd == -1)
3971 {
3972 warning (_("opening /proc/PID/mem file for lwp %d.%ld failed: %s (%d)"),
3973 ptid.pid (), ptid.lwp (),
3974 safe_strerror (errno), errno);
3975 return;
05c06f31
PA
3976 }
3977
8a89ddbd
PA
3978 proc_mem_file_map.emplace (std::piecewise_construct,
3979 std::forward_as_tuple (ptid.pid ()),
3980 std::forward_as_tuple (ptid, fd));
3981
9221923c 3982 linux_nat_debug_printf ("opened fd %d for lwp %d.%ld",
8a89ddbd
PA
3983 fd, ptid.pid (), ptid.lwp ());
3984}
3985
1bcb0708
PA
3986/* Helper for linux_proc_xfer_memory_partial and
3987 proc_mem_file_is_writable. FD is the already opened /proc/pid/mem
3988 file, and PID is the pid of the corresponding process. The rest of
3989 the arguments are like linux_proc_xfer_memory_partial's. */
8a89ddbd
PA
3990
3991static enum target_xfer_status
1bcb0708
PA
3992linux_proc_xfer_memory_partial_fd (int fd, int pid,
3993 gdb_byte *readbuf, const gdb_byte *writebuf,
3994 ULONGEST offset, LONGEST len,
3995 ULONGEST *xfered_len)
8a89ddbd
PA
3996{
3997 ssize_t ret;
3998
8a89ddbd 3999 gdb_assert (fd != -1);
dba24537 4000
31a56a22
PA
4001 /* Use pread64/pwrite64 if available, since they save a syscall and
4002 can handle 64-bit offsets even on 32-bit platforms (for instance,
4003 SPARC debugging a SPARC64 application). But only use them if the
4004 offset isn't so high that when cast to off_t it'd be negative, as
4005 seen on SPARC64. pread64/pwrite64 outright reject such offsets.
4006 lseek does not. */
dba24537 4007#ifdef HAVE_PREAD64
31a56a22
PA
4008 if ((off_t) offset >= 0)
4009 ret = (readbuf != nullptr
4010 ? pread64 (fd, readbuf, len, offset)
4011 : pwrite64 (fd, writebuf, len, offset));
4012 else
dba24537 4013#endif
31a56a22
PA
4014 {
4015 ret = lseek (fd, offset, SEEK_SET);
4016 if (ret != -1)
4017 ret = (readbuf != nullptr
4018 ? read (fd, readbuf, len)
4019 : write (fd, writebuf, len));
4020 }
dba24537 4021
05c06f31
PA
4022 if (ret == -1)
4023 {
9221923c 4024 linux_nat_debug_printf ("accessing fd %d for pid %d failed: %s (%d)",
1bcb0708 4025 fd, pid, safe_strerror (errno), errno);
284b6bb5 4026 return TARGET_XFER_E_IO;
05c06f31
PA
4027 }
4028 else if (ret == 0)
4029 {
8a89ddbd
PA
4030 /* EOF means the address space is gone, the whole process exited
4031 or execed. */
9221923c 4032 linux_nat_debug_printf ("accessing fd %d for pid %d got EOF",
1bcb0708 4033 fd, pid);
05c06f31
PA
4034 return TARGET_XFER_EOF;
4035 }
9b409511
YQ
4036 else
4037 {
8a89ddbd 4038 *xfered_len = ret;
9b409511
YQ
4039 return TARGET_XFER_OK;
4040 }
05c06f31 4041}
efcbbd14 4042
1bcb0708
PA
4043/* Implement the to_xfer_partial target method using /proc/PID/mem.
4044 Because we can use a single read/write call, this can be much more
4045 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
4046 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running
4047 threads. */
4048
4049static enum target_xfer_status
f9f593dd
SM
4050linux_proc_xfer_memory_partial (int pid, gdb_byte *readbuf,
4051 const gdb_byte *writebuf, ULONGEST offset,
4052 LONGEST len, ULONGEST *xfered_len)
1bcb0708 4053{
1bcb0708
PA
4054 auto iter = proc_mem_file_map.find (pid);
4055 if (iter == proc_mem_file_map.end ())
4056 return TARGET_XFER_EOF;
4057
4058 int fd = iter->second.fd ();
4059
4060 return linux_proc_xfer_memory_partial_fd (fd, pid, readbuf, writebuf, offset,
4061 len, xfered_len);
4062}
4063
4064/* Check whether /proc/pid/mem is writable in the current kernel, and
4065 return true if so. It wasn't writable before Linux 2.6.39, but
4066 there's no way to know whether the feature was backported to older
4067 kernels. So we check to see if it works. The result is cached,
3bfdcabb 4068 and this is guaranteed to be called once early during inferior
9dff6a5d
PA
4069 startup, so that any warning is printed out consistently between
4070 GDB invocations. Note we don't call it during GDB startup instead
4071 though, because then we might warn with e.g. just "gdb --version"
4072 on sandboxed systems. See PR gdb/29907. */
1bcb0708
PA
4073
4074static bool
4075proc_mem_file_is_writable ()
4076{
6b09f134 4077 static std::optional<bool> writable;
1bcb0708
PA
4078
4079 if (writable.has_value ())
4080 return *writable;
4081
4082 writable.emplace (false);
4083
4084 /* We check whether /proc/pid/mem is writable by trying to write to
4085 one of our variables via /proc/self/mem. */
4086
4087 int fd = gdb_open_cloexec ("/proc/self/mem", O_RDWR | O_LARGEFILE, 0).release ();
4088
4089 if (fd == -1)
4090 {
4091 warning (_("opening /proc/self/mem file failed: %s (%d)"),
4092 safe_strerror (errno), errno);
4093 return *writable;
4094 }
4095
4096 SCOPE_EXIT { close (fd); };
4097
4098 /* This is the variable we try to write to. Note OFFSET below. */
4099 volatile gdb_byte test_var = 0;
4100
4101 gdb_byte writebuf[] = {0x55};
4102 ULONGEST offset = (uintptr_t) &test_var;
4103 ULONGEST xfered_len;
4104
4105 enum target_xfer_status res
4106 = linux_proc_xfer_memory_partial_fd (fd, getpid (), nullptr, writebuf,
4107 offset, 1, &xfered_len);
4108
4109 if (res == TARGET_XFER_OK)
4110 {
4111 gdb_assert (xfered_len == 1);
4112 gdb_assert (test_var == 0x55);
4113 /* Success. */
4114 *writable = true;
4115 }
4116
4117 return *writable;
4118}
4119
dba24537
AC
4120/* Parse LINE as a signal set and add its set bits to SIGS. */
4121
4122static void
4123add_line_to_sigset (const char *line, sigset_t *sigs)
4124{
4125 int len = strlen (line) - 1;
4126 const char *p;
4127 int signum;
4128
4129 if (line[len] != '\n')
8a3fe4f8 4130 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4131
4132 p = line;
4133 signum = len * 4;
4134 while (len-- > 0)
4135 {
4136 int digit;
4137
4138 if (*p >= '0' && *p <= '9')
4139 digit = *p - '0';
4140 else if (*p >= 'a' && *p <= 'f')
4141 digit = *p - 'a' + 10;
4142 else
8a3fe4f8 4143 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4144
4145 signum -= 4;
4146
4147 if (digit & 1)
4148 sigaddset (sigs, signum + 1);
4149 if (digit & 2)
4150 sigaddset (sigs, signum + 2);
4151 if (digit & 4)
4152 sigaddset (sigs, signum + 3);
4153 if (digit & 8)
4154 sigaddset (sigs, signum + 4);
4155
4156 p++;
4157 }
4158}
4159
4160/* Find process PID's pending signals from /proc/pid/status and set
4161 SIGS to match. */
4162
4163void
3e43a32a
MS
4164linux_proc_pending_signals (int pid, sigset_t *pending,
4165 sigset_t *blocked, sigset_t *ignored)
dba24537 4166{
d8d2a3ee 4167 char buffer[PATH_MAX], fname[PATH_MAX];
dba24537
AC
4168
4169 sigemptyset (pending);
4170 sigemptyset (blocked);
4171 sigemptyset (ignored);
cde33bf1 4172 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
d419f42d 4173 gdb_file_up procfile = gdb_fopen_cloexec (fname, "r");
dba24537 4174 if (procfile == NULL)
8a3fe4f8 4175 error (_("Could not open %s"), fname);
dba24537 4176
d419f42d 4177 while (fgets (buffer, PATH_MAX, procfile.get ()) != NULL)
dba24537
AC
4178 {
4179 /* Normal queued signals are on the SigPnd line in the status
4180 file. However, 2.6 kernels also have a "shared" pending
4181 queue for delivering signals to a thread group, so check for
4182 a ShdPnd line also.
4183
4184 Unfortunately some Red Hat kernels include the shared pending
4185 queue but not the ShdPnd status field. */
4186
61012eef 4187 if (startswith (buffer, "SigPnd:\t"))
dba24537 4188 add_line_to_sigset (buffer + 8, pending);
61012eef 4189 else if (startswith (buffer, "ShdPnd:\t"))
dba24537 4190 add_line_to_sigset (buffer + 8, pending);
61012eef 4191 else if (startswith (buffer, "SigBlk:\t"))
dba24537 4192 add_line_to_sigset (buffer + 8, blocked);
61012eef 4193 else if (startswith (buffer, "SigIgn:\t"))
dba24537
AC
4194 add_line_to_sigset (buffer + 8, ignored);
4195 }
dba24537
AC
4196}
4197
9b409511 4198static enum target_xfer_status
f6ac5f3d 4199linux_nat_xfer_osdata (enum target_object object,
e0881a8e 4200 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4201 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4202 ULONGEST *xfered_len)
07e059b5 4203{
07e059b5
VP
4204 gdb_assert (object == TARGET_OBJECT_OSDATA);
4205
9b409511
YQ
4206 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4207 if (*xfered_len == 0)
4208 return TARGET_XFER_EOF;
4209 else
4210 return TARGET_XFER_OK;
07e059b5
VP
4211}
4212
f6ac5f3d
PA
4213std::vector<static_tracepoint_marker>
4214linux_nat_target::static_tracepoint_markers_by_strid (const char *strid)
5808517f
YQ
4215{
4216 char s[IPA_CMD_BUF_SIZE];
e99b03dc 4217 int pid = inferior_ptid.pid ();
5d9310c4 4218 std::vector<static_tracepoint_marker> markers;
256642e8 4219 const char *p = s;
184ea2f7 4220 ptid_t ptid = ptid_t (pid, 0);
5d9310c4 4221 static_tracepoint_marker marker;
5808517f
YQ
4222
4223 /* Pause all */
4224 target_stop (ptid);
4225
81aa19c3 4226 strcpy (s, "qTfSTM");
42476b70 4227 agent_run_command (pid, s, strlen (s) + 1);
5808517f 4228
1db93f14
TT
4229 /* Unpause all. */
4230 SCOPE_EXIT { target_continue_no_signal (ptid); };
5808517f
YQ
4231
4232 while (*p++ == 'm')
4233 {
5808517f
YQ
4234 do
4235 {
5d9310c4 4236 parse_static_tracepoint_marker_definition (p, &p, &marker);
5808517f 4237
5d9310c4
SM
4238 if (strid == NULL || marker.str_id == strid)
4239 markers.push_back (std::move (marker));
5808517f
YQ
4240 }
4241 while (*p++ == ','); /* comma-separated list */
4242
81aa19c3 4243 strcpy (s, "qTsSTM");
42476b70 4244 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4245 p = s;
4246 }
4247
5808517f
YQ
4248 return markers;
4249}
4250
b84876c2
PA
4251/* target_can_async_p implementation. */
4252
57810aa7 4253bool
f6ac5f3d 4254linux_nat_target::can_async_p ()
b84876c2 4255{
fce6cd34
AB
4256 /* This flag should be checked in the common target.c code. */
4257 gdb_assert (target_async_permitted);
4258
4259 /* Otherwise, this targets is always able to support async mode. */
4260 return true;
b84876c2
PA
4261}
4262
57810aa7 4263bool
f6ac5f3d 4264linux_nat_target::supports_non_stop ()
9908b566 4265{
f80c8ec4 4266 return true;
9908b566
VP
4267}
4268
fbea99ea
PA
4269/* to_always_non_stop_p implementation. */
4270
57810aa7 4271bool
f6ac5f3d 4272linux_nat_target::always_non_stop_p ()
fbea99ea 4273{
f80c8ec4 4274 return true;
fbea99ea
PA
4275}
4276
57810aa7 4277bool
f6ac5f3d 4278linux_nat_target::supports_multi_process ()
d90e17a7 4279{
aee91db3 4280 return true;
d90e17a7
PA
4281}
4282
57810aa7 4283bool
f6ac5f3d 4284linux_nat_target::supports_disable_randomization ()
03583c20 4285{
f80c8ec4 4286 return true;
03583c20
UW
4287}
4288
7feb7d06
PA
4289/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4290 so we notice when any child changes state, and notify the
4291 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4292 above to wait for the arrival of a SIGCHLD. */
4293
b84876c2 4294static void
7feb7d06 4295sigchld_handler (int signo)
b84876c2 4296{
7feb7d06
PA
4297 int old_errno = errno;
4298
01124a23 4299 if (debug_linux_nat)
da5bd37e 4300 gdb_stdlog->write_async_safe ("sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06 4301
b146ba14
JB
4302 if (signo == SIGCHLD)
4303 {
4304 /* Let the event loop know that there are events to handle. */
4305 linux_nat_target::async_file_mark_if_open ();
4306 }
7feb7d06
PA
4307
4308 errno = old_errno;
4309}
4310
4311/* Callback registered with the target events file descriptor. */
4312
4313static void
4314handle_target_event (int error, gdb_client_data client_data)
4315{
b1a35af2 4316 inferior_event_handler (INF_REG_EVENT);
7feb7d06
PA
4317}
4318
b84876c2
PA
4319/* target_async implementation. */
4320
f6ac5f3d 4321void
4a570176 4322linux_nat_target::async (bool enable)
b84876c2 4323{
4a570176 4324 if (enable == is_async_p ())
b146ba14
JB
4325 return;
4326
4327 /* Block child signals while we create/destroy the pipe, as their
4328 handler writes to it. */
4329 gdb::block_signals blocker;
4330
6a3753b3 4331 if (enable)
b84876c2 4332 {
b146ba14 4333 if (!async_file_open ())
f34652de 4334 internal_error ("creating event pipe failed.");
b146ba14
JB
4335
4336 add_file_handler (async_wait_fd (), handle_target_event, NULL,
4337 "linux-nat");
4338
4339 /* There may be pending events to handle. Tell the event loop
4340 to poll them. */
4341 async_file_mark ();
b84876c2
PA
4342 }
4343 else
4344 {
b146ba14
JB
4345 delete_file_handler (async_wait_fd ());
4346 async_file_close ();
b84876c2 4347 }
b84876c2
PA
4348}
4349
a493e3e2 4350/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4351 event came out. */
4352
4c28f408 4353static int
d3a70e03 4354linux_nat_stop_lwp (struct lwp_info *lwp)
4c28f408 4355{
d90e17a7 4356 if (!lwp->stopped)
252fbfc8 4357 {
9327494e 4358 linux_nat_debug_printf ("running -> suspending %s",
e53c95d4 4359 lwp->ptid.to_string ().c_str ());
252fbfc8 4360
252fbfc8 4361
25289eb2
PA
4362 if (lwp->last_resume_kind == resume_stop)
4363 {
9327494e
SM
4364 linux_nat_debug_printf ("already stopping LWP %ld at GDB's request",
4365 lwp->ptid.lwp ());
25289eb2
PA
4366 return 0;
4367 }
252fbfc8 4368
d3a70e03 4369 stop_callback (lwp);
25289eb2 4370 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
4371 }
4372 else
4373 {
4374 /* Already known to be stopped; do nothing. */
252fbfc8 4375
d90e17a7
PA
4376 if (debug_linux_nat)
4377 {
9213a6d7 4378 if (linux_target->find_thread (lwp->ptid)->stop_requested)
9327494e 4379 linux_nat_debug_printf ("already stopped/stop_requested %s",
e53c95d4 4380 lwp->ptid.to_string ().c_str ());
d90e17a7 4381 else
9327494e 4382 linux_nat_debug_printf ("already stopped/no stop_requested yet %s",
e53c95d4 4383 lwp->ptid.to_string ().c_str ());
252fbfc8
PA
4384 }
4385 }
4c28f408
PA
4386 return 0;
4387}
4388
f6ac5f3d
PA
4389void
4390linux_nat_target::stop (ptid_t ptid)
4c28f408 4391{
b6e52a0b 4392 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
d3a70e03 4393 iterate_over_lwps (ptid, linux_nat_stop_lwp);
bfedc46a
PA
4394}
4395
dc146f7c
VP
4396/* Return the cached value of the processor core for thread PTID. */
4397
f6ac5f3d
PA
4398int
4399linux_nat_target::core_of_thread (ptid_t ptid)
dc146f7c
VP
4400{
4401 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 4402
dc146f7c
VP
4403 if (info)
4404 return info->core;
4405 return -1;
4406}
4407
7a6a1731
GB
4408/* Implementation of to_filesystem_is_local. */
4409
57810aa7 4410bool
f6ac5f3d 4411linux_nat_target::filesystem_is_local ()
7a6a1731
GB
4412{
4413 struct inferior *inf = current_inferior ();
4414
4415 if (inf->fake_pid_p || inf->pid == 0)
57810aa7 4416 return true;
7a6a1731
GB
4417
4418 return linux_ns_same (inf->pid, LINUX_NS_MNT);
4419}
4420
4421/* Convert the INF argument passed to a to_fileio_* method
4422 to a process ID suitable for passing to its corresponding
4423 linux_mntns_* function. If INF is non-NULL then the
4424 caller is requesting the filesystem seen by INF. If INF
4425 is NULL then the caller is requesting the filesystem seen
4426 by the GDB. We fall back to GDB's filesystem in the case
4427 that INF is non-NULL but its PID is unknown. */
4428
4429static pid_t
4430linux_nat_fileio_pid_of (struct inferior *inf)
4431{
4432 if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4433 return getpid ();
4434 else
4435 return inf->pid;
4436}
4437
4438/* Implementation of to_fileio_open. */
4439
f6ac5f3d
PA
4440int
4441linux_nat_target::fileio_open (struct inferior *inf, const char *filename,
4442 int flags, int mode, int warn_if_slow,
b872057a 4443 fileio_error *target_errno)
7a6a1731
GB
4444{
4445 int nat_flags;
4446 mode_t nat_mode;
4447 int fd;
4448
4449 if (fileio_to_host_openflags (flags, &nat_flags) == -1
4450 || fileio_to_host_mode (mode, &nat_mode) == -1)
4451 {
4452 *target_errno = FILEIO_EINVAL;
4453 return -1;
4454 }
4455
4456 fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
4457 filename, nat_flags, nat_mode);
4458 if (fd == -1)
4459 *target_errno = host_to_fileio_error (errno);
4460
4461 return fd;
4462}
4463
4464/* Implementation of to_fileio_readlink. */
4465
6b09f134 4466std::optional<std::string>
f6ac5f3d 4467linux_nat_target::fileio_readlink (struct inferior *inf, const char *filename,
b872057a 4468 fileio_error *target_errno)
7a6a1731
GB
4469{
4470 char buf[PATH_MAX];
4471 int len;
7a6a1731
GB
4472
4473 len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
4474 filename, buf, sizeof (buf));
4475 if (len < 0)
4476 {
4477 *target_errno = host_to_fileio_error (errno);
e0d3522b 4478 return {};
7a6a1731
GB
4479 }
4480
e0d3522b 4481 return std::string (buf, len);
7a6a1731
GB
4482}
4483
4484/* Implementation of to_fileio_unlink. */
4485
f6ac5f3d
PA
4486int
4487linux_nat_target::fileio_unlink (struct inferior *inf, const char *filename,
b872057a 4488 fileio_error *target_errno)
7a6a1731
GB
4489{
4490 int ret;
4491
4492 ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
4493 filename);
4494 if (ret == -1)
4495 *target_errno = host_to_fileio_error (errno);
4496
4497 return ret;
4498}
4499
aa01bd36
PA
4500/* Implementation of the to_thread_events method. */
4501
f6ac5f3d
PA
4502void
4503linux_nat_target::thread_events (int enable)
aa01bd36
PA
4504{
4505 report_thread_events = enable;
4506}
4507
25b16bc9
PA
4508bool
4509linux_nat_target::supports_set_thread_options (gdb_thread_options options)
4510{
a51e14ef
PA
4511 constexpr gdb_thread_options supported_options
4512 = GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
25b16bc9
PA
4513 return ((options & supported_options) == options);
4514}
4515
f6ac5f3d
PA
4516linux_nat_target::linux_nat_target ()
4517{
f973ed9c
DJ
4518 /* We don't change the stratum; this target will sit at
4519 process_stratum and thread_db will set at thread_stratum. This
4520 is a little strange, since this is a multi-threaded-capable
4521 target, but we want to be on the stack below thread_db, and we
4522 also want to be used for single-threaded processes. */
f973ed9c
DJ
4523}
4524
f865ee35
JK
4525/* See linux-nat.h. */
4526
ef632b4b 4527bool
f865ee35 4528linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
9f0bdab8 4529{
0acd1110 4530 int pid = get_ptrace_pid (ptid);
7cc662bc 4531 return ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo) == 0;
9f0bdab8
DJ
4532}
4533
7b669087
GB
4534/* See nat/linux-nat.h. */
4535
4536ptid_t
4537current_lwp_ptid (void)
4538{
15a9e13e 4539 gdb_assert (inferior_ptid.lwp_p ());
7b669087
GB
4540 return inferior_ptid;
4541}
4542
0ae5b8fa
AB
4543/* Implement 'maintenance info linux-lwps'. Displays some basic
4544 information about all the current lwp_info objects. */
4545
4546static void
4547maintenance_info_lwps (const char *arg, int from_tty)
4548{
4549 if (all_lwps ().size () == 0)
4550 {
4551 gdb_printf ("No Linux LWPs\n");
4552 return;
4553 }
4554
4555 /* Start the width at 8 to match the column heading below, then
4556 figure out the widest ptid string. We'll use this to build our
4557 output table below. */
4558 size_t ptid_width = 8;
4559 for (lwp_info *lp : all_lwps ())
4560 ptid_width = std::max (ptid_width, lp->ptid.to_string ().size ());
4561
4562 /* Setup the table headers. */
4563 struct ui_out *uiout = current_uiout;
4564 ui_out_emit_table table_emitter (uiout, 2, -1, "linux-lwps");
4565 uiout->table_header (ptid_width, ui_left, "lwp-ptid", _("LWP Ptid"));
4566 uiout->table_header (9, ui_left, "thread-info", _("Thread ID"));
4567 uiout->table_body ();
4568
4569 /* Display one table row for each lwp_info. */
4570 for (lwp_info *lp : all_lwps ())
4571 {
4572 ui_out_emit_tuple tuple_emitter (uiout, "lwp-entry");
4573
4574 thread_info *th = linux_target->find_thread (lp->ptid);
4575
4576 uiout->field_string ("lwp-ptid", lp->ptid.to_string ().c_str ());
4577 if (th == nullptr)
4578 uiout->field_string ("thread-info", "None");
4579 else
4580 uiout->field_string ("thread-info", print_full_thread_id (th));
4581
4582 uiout->message ("\n");
4583 }
4584}
4585
6c265988 4586void _initialize_linux_nat ();
d6b0e80f 4587void
6c265988 4588_initialize_linux_nat ()
d6b0e80f 4589{
8864ef42 4590 add_setshow_boolean_cmd ("linux-nat", class_maintenance,
b6e52a0b
AB
4591 &debug_linux_nat, _("\
4592Set debugging of GNU/Linux native target."), _(" \
4593Show debugging of GNU/Linux native target."), _(" \
4594When on, print debug messages relating to the GNU/Linux native target."),
4595 nullptr,
4596 show_debug_linux_nat,
4597 &setdebuglist, &showdebuglist);
b84876c2 4598
7a6a1731
GB
4599 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
4600 &debug_linux_namespaces, _("\
4601Set debugging of GNU/Linux namespaces module."), _("\
4602Show debugging of GNU/Linux namespaces module."), _("\
4603Enables printf debugging output."),
4604 NULL,
4605 NULL,
4606 &setdebuglist, &showdebuglist);
4607
7feb7d06
PA
4608 /* Install a SIGCHLD handler. */
4609 sigchld_action.sa_handler = sigchld_handler;
4610 sigemptyset (&sigchld_action.sa_mask);
4611 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
4612
4613 /* Make it the default. */
7feb7d06 4614 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
4615
4616 /* Make sure we don't block SIGCHLD during a sigsuspend. */
21987b9c 4617 gdb_sigmask (SIG_SETMASK, NULL, &suspend_mask);
d6b0e80f
AC
4618 sigdelset (&suspend_mask, SIGCHLD);
4619
7feb7d06 4620 sigemptyset (&blocked_mask);
774113b0
PA
4621
4622 lwp_lwpid_htab_create ();
0ae5b8fa
AB
4623
4624 add_cmd ("linux-lwps", class_maintenance, maintenance_info_lwps,
4625 _("List the Linux LWPS."), &maintenanceinfolist);
d6b0e80f
AC
4626}
4627\f
4628
4629/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4630 the GNU/Linux Threads library and therefore doesn't really belong
4631 here. */
4632
089436f7
TV
4633/* NPTL reserves the first two RT signals, but does not provide any
4634 way for the debugger to query the signal numbers - fortunately
4635 they don't change. */
4636static int lin_thread_signals[] = { __SIGRTMIN, __SIGRTMIN + 1 };
d6b0e80f 4637
089436f7
TV
4638/* See linux-nat.h. */
4639
4640unsigned int
4641lin_thread_get_thread_signal_num (void)
d6b0e80f 4642{
089436f7
TV
4643 return sizeof (lin_thread_signals) / sizeof (lin_thread_signals[0]);
4644}
d6b0e80f 4645
089436f7
TV
4646/* See linux-nat.h. */
4647
4648int
4649lin_thread_get_thread_signal (unsigned int i)
4650{
4651 gdb_assert (i < lin_thread_get_thread_signal_num ());
4652 return lin_thread_signals[i];
d6b0e80f 4653}