]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/linux-nat.c
gdb: remove target_gdbarch
[thirdparty/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
213516ef 3 Copyright (C) 2001-2023 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
45741a9c 22#include "infrun.h"
3993f6b1 23#include "target.h"
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
268a13a5 26#include "gdbsupport/gdb_wait.h"
d6b0e80f
AC
27#include <unistd.h>
28#include <sys/syscall.h>
5826e159 29#include "nat/gdb_ptrace.h"
0274a8ce 30#include "linux-nat.h"
125f8a3d
GB
31#include "nat/linux-ptrace.h"
32#include "nat/linux-procfs.h"
8cc73a39 33#include "nat/linux-personality.h"
ac264b3b 34#include "linux-fork.h"
d6b0e80f
AC
35#include "gdbthread.h"
36#include "gdbcmd.h"
37#include "regcache.h"
4f844a66 38#include "regset.h"
dab06dbe 39#include "inf-child.h"
10d6c8cd
DJ
40#include "inf-ptrace.h"
41#include "auxv.h"
ef0f16cc
TT
42#include <sys/procfs.h>
43#include "elf-bfd.h"
44#include "gregset.h"
45#include "gdbcore.h"
46#include <ctype.h>
47#include <sys/stat.h>
48#include <fcntl.h>
b84876c2 49#include "inf-loop.h"
400b5eca 50#include "gdbsupport/event-loop.h"
b84876c2 51#include "event-top.h"
07e059b5
VP
52#include <pwd.h>
53#include <sys/types.h>
2978b111 54#include <dirent.h>
07e059b5 55#include "xml-support.h"
efcbbd14 56#include <sys/vfs.h>
6c95b8df 57#include "solib.h"
125f8a3d 58#include "nat/linux-osdata.h"
6432734d 59#include "linux-tdep.h"
7dcd53a0 60#include "symfile.h"
268a13a5 61#include "gdbsupport/agent.h"
5808517f 62#include "tracepoint.h"
6ecd4729 63#include "target-descriptions.h"
268a13a5 64#include "gdbsupport/filestuff.h"
77e371c0 65#include "objfiles.h"
7a6a1731 66#include "nat/linux-namespaces.h"
b146ba14 67#include "gdbsupport/block-signals.h"
268a13a5
TT
68#include "gdbsupport/fileio.h"
69#include "gdbsupport/scope-exit.h"
21987b9c 70#include "gdbsupport/gdb-sigmask.h"
ba988419 71#include "gdbsupport/common-debug.h"
8a89ddbd 72#include <unordered_map>
efcbbd14 73
1777feb0 74/* This comment documents high-level logic of this file.
8a77dff3
VP
75
76Waiting for events in sync mode
77===============================
78
4a6ed09b
PA
79When waiting for an event in a specific thread, we just use waitpid,
80passing the specific pid, and not passing WNOHANG.
81
82When waiting for an event in all threads, waitpid is not quite good:
83
84- If the thread group leader exits while other threads in the thread
85 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
86 return an exit status until the other threads in the group are
87 reaped.
88
89- When a non-leader thread execs, that thread just vanishes without
90 reporting an exit (so we'd hang if we waited for it explicitly in
91 that case). The exec event is instead reported to the TGID pid.
92
93The solution is to always use -1 and WNOHANG, together with
94sigsuspend.
95
96First, we use non-blocking waitpid to check for events. If nothing is
97found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
98it means something happened to a child process. As soon as we know
99there's an event, we get back to calling nonblocking waitpid.
100
101Note that SIGCHLD should be blocked between waitpid and sigsuspend
102calls, so that we don't miss a signal. If SIGCHLD arrives in between,
103when it's blocked, the signal becomes pending and sigsuspend
104immediately notices it and returns.
105
106Waiting for events in async mode (TARGET_WNOHANG)
107=================================================
8a77dff3 108
7feb7d06
PA
109In async mode, GDB should always be ready to handle both user input
110and target events, so neither blocking waitpid nor sigsuspend are
111viable options. Instead, we should asynchronously notify the GDB main
112event loop whenever there's an unprocessed event from the target. We
113detect asynchronous target events by handling SIGCHLD signals. To
c150bdf0
JB
114notify the event loop about target events, an event pipe is used
115--- the pipe is registered as waitable event source in the event loop,
7feb7d06 116the event loop select/poll's on the read end of this pipe (as well on
c150bdf0
JB
117other event sources, e.g., stdin), and the SIGCHLD handler marks the
118event pipe to raise an event. This is more portable than relying on
7feb7d06
PA
119pselect/ppoll, since on kernels that lack those syscalls, libc
120emulates them with select/poll+sigprocmask, and that is racy
121(a.k.a. plain broken).
122
123Obviously, if we fail to notify the event loop if there's a target
124event, it's bad. OTOH, if we notify the event loop when there's no
125event from the target, linux_nat_wait will detect that there's no real
126event to report, and return event of type TARGET_WAITKIND_IGNORE.
127This is mostly harmless, but it will waste time and is better avoided.
128
129The main design point is that every time GDB is outside linux-nat.c,
130we have a SIGCHLD handler installed that is called when something
131happens to the target and notifies the GDB event loop. Whenever GDB
132core decides to handle the event, and calls into linux-nat.c, we
133process things as in sync mode, except that the we never block in
134sigsuspend.
135
136While processing an event, we may end up momentarily blocked in
137waitpid calls. Those waitpid calls, while blocking, are guarantied to
138return quickly. E.g., in all-stop mode, before reporting to the core
139that an LWP hit a breakpoint, all LWPs are stopped by sending them
140SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141Note that this is different from blocking indefinitely waiting for the
142next event --- here, we're already handling an event.
8a77dff3
VP
143
144Use of signals
145==============
146
147We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
148signal is not entirely significant; we just need for a signal to be delivered,
149so that we can intercept it. SIGSTOP's advantage is that it can not be
150blocked. A disadvantage is that it is not a real-time signal, so it can only
151be queued once; we do not keep track of other sources of SIGSTOP.
152
153Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
154use them, because they have special behavior when the signal is generated -
155not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
156kills the entire thread group.
157
158A delivered SIGSTOP would stop the entire thread group, not just the thread we
159tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
160cancel it (by PTRACE_CONT without passing SIGSTOP).
161
162We could use a real-time signal instead. This would solve those problems; we
163could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165generates it, and there are races with trying to find a signal that is not
4a6ed09b
PA
166blocked.
167
168Exec events
169===========
170
171The case of a thread group (process) with 3 or more threads, and a
172thread other than the leader execs is worth detailing:
173
174On an exec, the Linux kernel destroys all threads except the execing
175one in the thread group, and resets the execing thread's tid to the
176tgid. No exit notification is sent for the execing thread -- from the
177ptracer's perspective, it appears as though the execing thread just
178vanishes. Until we reap all other threads except the leader and the
179execing thread, the leader will be zombie, and the execing thread will
180be in `D (disc sleep)' state. As soon as all other threads are
181reaped, the execing thread changes its tid to the tgid, and the
182previous (zombie) leader vanishes, giving place to the "new"
183leader. */
a0ef4274 184
dba24537
AC
185#ifndef O_LARGEFILE
186#define O_LARGEFILE 0
187#endif
0274a8ce 188
f6ac5f3d
PA
189struct linux_nat_target *linux_target;
190
433bbbf8 191/* Does the current host support PTRACE_GETREGSET? */
0bdb2f78 192enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
433bbbf8 193
b6e52a0b
AB
194/* When true, print debug messages relating to the linux native target. */
195
196static bool debug_linux_nat;
197
8864ef42 198/* Implement 'show debug linux-nat'. */
b6e52a0b 199
920d2a44
AC
200static void
201show_debug_linux_nat (struct ui_file *file, int from_tty,
202 struct cmd_list_element *c, const char *value)
203{
6cb06a8c
TT
204 gdb_printf (file, _("Debugging of GNU/Linux native targets is %s.\n"),
205 value);
920d2a44 206}
d6b0e80f 207
17417fb0 208/* Print a linux-nat debug statement. */
9327494e
SM
209
210#define linux_nat_debug_printf(fmt, ...) \
74b773fc 211 debug_prefixed_printf_cond (debug_linux_nat, "linux-nat", fmt, ##__VA_ARGS__)
9327494e 212
b6e52a0b
AB
213/* Print "linux-nat" enter/exit debug statements. */
214
215#define LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT \
216 scoped_debug_enter_exit (debug_linux_nat, "linux-nat")
217
ae087d01
DJ
218struct simple_pid_list
219{
220 int pid;
3d799a95 221 int status;
ae087d01
DJ
222 struct simple_pid_list *next;
223};
05c309a8 224static struct simple_pid_list *stopped_pids;
ae087d01 225
aa01bd36
PA
226/* Whether target_thread_events is in effect. */
227static int report_thread_events;
228
7feb7d06
PA
229static int kill_lwp (int lwpid, int signo);
230
d3a70e03 231static int stop_callback (struct lwp_info *lp);
7feb7d06
PA
232
233static void block_child_signals (sigset_t *prev_mask);
234static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
235
236struct lwp_info;
237static struct lwp_info *add_lwp (ptid_t ptid);
238static void purge_lwp_list (int pid);
4403d8e9 239static void delete_lwp (ptid_t ptid);
2277426b
PA
240static struct lwp_info *find_lwp_pid (ptid_t ptid);
241
8a99810d
PA
242static int lwp_status_pending_p (struct lwp_info *lp);
243
e7ad2f14
PA
244static void save_stop_reason (struct lwp_info *lp);
245
1bcb0708 246static bool proc_mem_file_is_writable ();
8a89ddbd
PA
247static void close_proc_mem_file (pid_t pid);
248static void open_proc_mem_file (ptid_t ptid);
05c06f31 249
6cf20c46
PA
250/* Return TRUE if LWP is the leader thread of the process. */
251
252static bool
253is_leader (lwp_info *lp)
254{
255 return lp->ptid.pid () == lp->ptid.lwp ();
256}
257
57573e54
PA
258/* Convert an LWP's pending status to a std::string. */
259
260static std::string
261pending_status_str (lwp_info *lp)
262{
263 gdb_assert (lwp_status_pending_p (lp));
264
265 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
266 return lp->waitstatus.to_string ();
267 else
268 return status_to_str (lp->status);
269}
270
cff068da
GB
271\f
272/* LWP accessors. */
273
274/* See nat/linux-nat.h. */
275
276ptid_t
277ptid_of_lwp (struct lwp_info *lwp)
278{
279 return lwp->ptid;
280}
281
282/* See nat/linux-nat.h. */
283
4b134ca1
GB
284void
285lwp_set_arch_private_info (struct lwp_info *lwp,
286 struct arch_lwp_info *info)
287{
288 lwp->arch_private = info;
289}
290
291/* See nat/linux-nat.h. */
292
293struct arch_lwp_info *
294lwp_arch_private_info (struct lwp_info *lwp)
295{
296 return lwp->arch_private;
297}
298
299/* See nat/linux-nat.h. */
300
cff068da
GB
301int
302lwp_is_stopped (struct lwp_info *lwp)
303{
304 return lwp->stopped;
305}
306
307/* See nat/linux-nat.h. */
308
309enum target_stop_reason
310lwp_stop_reason (struct lwp_info *lwp)
311{
312 return lwp->stop_reason;
313}
314
0e00e962
AA
315/* See nat/linux-nat.h. */
316
317int
318lwp_is_stepping (struct lwp_info *lwp)
319{
320 return lwp->step;
321}
322
ae087d01
DJ
323\f
324/* Trivial list manipulation functions to keep track of a list of
325 new stopped processes. */
326static void
3d799a95 327add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01 328{
8d749320 329 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
e0881a8e 330
ae087d01 331 new_pid->pid = pid;
3d799a95 332 new_pid->status = status;
ae087d01
DJ
333 new_pid->next = *listp;
334 *listp = new_pid;
335}
336
337static int
46a96992 338pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
339{
340 struct simple_pid_list **p;
341
342 for (p = listp; *p != NULL; p = &(*p)->next)
343 if ((*p)->pid == pid)
344 {
345 struct simple_pid_list *next = (*p)->next;
e0881a8e 346
46a96992 347 *statusp = (*p)->status;
ae087d01
DJ
348 xfree (*p);
349 *p = next;
350 return 1;
351 }
352 return 0;
353}
354
de0d863e
DB
355/* Return the ptrace options that we want to try to enable. */
356
357static int
358linux_nat_ptrace_options (int attached)
359{
360 int options = 0;
361
362 if (!attached)
363 options |= PTRACE_O_EXITKILL;
364
365 options |= (PTRACE_O_TRACESYSGOOD
366 | PTRACE_O_TRACEVFORKDONE
367 | PTRACE_O_TRACEVFORK
368 | PTRACE_O_TRACEFORK
369 | PTRACE_O_TRACEEXEC);
370
371 return options;
372}
373
1b919490
VB
374/* Initialize ptrace and procfs warnings and check for supported
375 ptrace features given PID.
beed38b8
JB
376
377 ATTACHED should be nonzero iff we attached to the inferior. */
3993f6b1
DJ
378
379static void
1b919490 380linux_init_ptrace_procfs (pid_t pid, int attached)
3993f6b1 381{
de0d863e
DB
382 int options = linux_nat_ptrace_options (attached);
383
384 linux_enable_event_reporting (pid, options);
96d7229d 385 linux_ptrace_init_warnings ();
1b919490 386 linux_proc_init_warnings ();
9dff6a5d 387 proc_mem_file_is_writable ();
4de4c07c
DJ
388}
389
f6ac5f3d
PA
390linux_nat_target::~linux_nat_target ()
391{}
392
393void
394linux_nat_target::post_attach (int pid)
4de4c07c 395{
1b919490 396 linux_init_ptrace_procfs (pid, 1);
4de4c07c
DJ
397}
398
200fd287
AB
399/* Implement the virtual inf_ptrace_target::post_startup_inferior method. */
400
f6ac5f3d
PA
401void
402linux_nat_target::post_startup_inferior (ptid_t ptid)
4de4c07c 403{
1b919490 404 linux_init_ptrace_procfs (ptid.pid (), 0);
4de4c07c
DJ
405}
406
4403d8e9
JK
407/* Return the number of known LWPs in the tgid given by PID. */
408
409static int
410num_lwps (int pid)
411{
412 int count = 0;
4403d8e9 413
901b9821 414 for (const lwp_info *lp ATTRIBUTE_UNUSED : all_lwps ())
e99b03dc 415 if (lp->ptid.pid () == pid)
4403d8e9
JK
416 count++;
417
418 return count;
419}
420
169bb27b 421/* Deleter for lwp_info unique_ptr specialisation. */
4403d8e9 422
169bb27b 423struct lwp_deleter
4403d8e9 424{
169bb27b
AB
425 void operator() (struct lwp_info *lwp) const
426 {
427 delete_lwp (lwp->ptid);
428 }
429};
4403d8e9 430
169bb27b
AB
431/* A unique_ptr specialisation for lwp_info. */
432
433typedef std::unique_ptr<struct lwp_info, lwp_deleter> lwp_info_up;
4403d8e9 434
82d1f134 435/* Target hook for follow_fork. */
d83ad864 436
e97007b6 437void
82d1f134
SM
438linux_nat_target::follow_fork (inferior *child_inf, ptid_t child_ptid,
439 target_waitkind fork_kind, bool follow_child,
440 bool detach_fork)
3993f6b1 441{
82d1f134
SM
442 inf_ptrace_target::follow_fork (child_inf, child_ptid, fork_kind,
443 follow_child, detach_fork);
444
d83ad864 445 if (!follow_child)
4de4c07c 446 {
3a849a34
SM
447 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
448 ptid_t parent_ptid = inferior_ptid;
3a849a34
SM
449 int parent_pid = parent_ptid.lwp ();
450 int child_pid = child_ptid.lwp ();
4de4c07c 451
1777feb0 452 /* We're already attached to the parent, by default. */
3a849a34 453 lwp_info *child_lp = add_lwp (child_ptid);
d83ad864
DB
454 child_lp->stopped = 1;
455 child_lp->last_resume_kind = resume_stop;
4de4c07c 456
ac264b3b
MS
457 /* Detach new forked process? */
458 if (detach_fork)
f75c00e4 459 {
95347337
AB
460 int child_stop_signal = 0;
461 bool detach_child = true;
4403d8e9 462
169bb27b
AB
463 /* Move CHILD_LP into a unique_ptr and clear the source pointer
464 to prevent us doing anything stupid with it. */
465 lwp_info_up child_lp_ptr (child_lp);
466 child_lp = nullptr;
467
468 linux_target->low_prepare_to_resume (child_lp_ptr.get ());
c077881a
HZ
469
470 /* When debugging an inferior in an architecture that supports
471 hardware single stepping on a kernel without commit
472 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
473 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
474 set if the parent process had them set.
475 To work around this, single step the child process
476 once before detaching to clear the flags. */
477
2fd9d7ca
PA
478 /* Note that we consult the parent's architecture instead of
479 the child's because there's no inferior for the child at
480 this point. */
c077881a 481 if (!gdbarch_software_single_step_p (target_thread_architecture
2fd9d7ca 482 (parent_ptid)))
c077881a 483 {
95347337
AB
484 int status;
485
c077881a
HZ
486 linux_disable_event_reporting (child_pid);
487 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
488 perror_with_name (_("Couldn't do single step"));
489 if (my_waitpid (child_pid, &status, 0) < 0)
490 perror_with_name (_("Couldn't wait vfork process"));
95347337
AB
491 else
492 {
493 detach_child = WIFSTOPPED (status);
494 child_stop_signal = WSTOPSIG (status);
495 }
c077881a
HZ
496 }
497
95347337 498 if (detach_child)
9caaaa83 499 {
95347337 500 int signo = child_stop_signal;
9caaaa83 501
9caaaa83
PA
502 if (signo != 0
503 && !signal_pass_state (gdb_signal_from_host (signo)))
504 signo = 0;
505 ptrace (PTRACE_DETACH, child_pid, 0, signo);
8a89ddbd
PA
506
507 close_proc_mem_file (child_pid);
9caaaa83 508 }
ac264b3b 509 }
9016a515
DJ
510
511 if (has_vforked)
512 {
a2885186
SM
513 lwp_info *parent_lp = find_lwp_pid (parent_ptid);
514 linux_nat_debug_printf ("waiting for VFORK_DONE on %d", parent_pid);
515 parent_lp->stopped = 1;
6c95b8df 516
a2885186
SM
517 /* We'll handle the VFORK_DONE event like any other
518 event, in target_wait. */
9016a515 519 }
4de4c07c 520 }
3993f6b1 521 else
4de4c07c 522 {
3ced3da4 523 struct lwp_info *child_lp;
4de4c07c 524
82d1f134 525 child_lp = add_lwp (child_ptid);
3ced3da4 526 child_lp->stopped = 1;
25289eb2 527 child_lp->last_resume_kind = resume_stop;
4de4c07c 528 }
4de4c07c
DJ
529}
530
4de4c07c 531\f
f6ac5f3d
PA
532int
533linux_nat_target::insert_fork_catchpoint (int pid)
4de4c07c 534{
a2885186 535 return 0;
3993f6b1
DJ
536}
537
f6ac5f3d
PA
538int
539linux_nat_target::remove_fork_catchpoint (int pid)
eb73ad13
PA
540{
541 return 0;
542}
543
f6ac5f3d
PA
544int
545linux_nat_target::insert_vfork_catchpoint (int pid)
3993f6b1 546{
a2885186 547 return 0;
3993f6b1
DJ
548}
549
f6ac5f3d
PA
550int
551linux_nat_target::remove_vfork_catchpoint (int pid)
eb73ad13
PA
552{
553 return 0;
554}
555
f6ac5f3d
PA
556int
557linux_nat_target::insert_exec_catchpoint (int pid)
3993f6b1 558{
a2885186 559 return 0;
3993f6b1
DJ
560}
561
f6ac5f3d
PA
562int
563linux_nat_target::remove_exec_catchpoint (int pid)
eb73ad13
PA
564{
565 return 0;
566}
567
f6ac5f3d
PA
568int
569linux_nat_target::set_syscall_catchpoint (int pid, bool needed, int any_count,
570 gdb::array_view<const int> syscall_counts)
a96d9b2e 571{
a96d9b2e
SDJ
572 /* On GNU/Linux, we ignore the arguments. It means that we only
573 enable the syscall catchpoints, but do not disable them.
77b06cd7 574
649a140c 575 Also, we do not use the `syscall_counts' information because we do not
a96d9b2e
SDJ
576 filter system calls here. We let GDB do the logic for us. */
577 return 0;
578}
579
774113b0
PA
580/* List of known LWPs, keyed by LWP PID. This speeds up the common
581 case of mapping a PID returned from the kernel to our corresponding
582 lwp_info data structure. */
583static htab_t lwp_lwpid_htab;
584
585/* Calculate a hash from a lwp_info's LWP PID. */
586
587static hashval_t
588lwp_info_hash (const void *ap)
589{
590 const struct lwp_info *lp = (struct lwp_info *) ap;
e38504b3 591 pid_t pid = lp->ptid.lwp ();
774113b0
PA
592
593 return iterative_hash_object (pid, 0);
594}
595
596/* Equality function for the lwp_info hash table. Compares the LWP's
597 PID. */
598
599static int
600lwp_lwpid_htab_eq (const void *a, const void *b)
601{
602 const struct lwp_info *entry = (const struct lwp_info *) a;
603 const struct lwp_info *element = (const struct lwp_info *) b;
604
e38504b3 605 return entry->ptid.lwp () == element->ptid.lwp ();
774113b0
PA
606}
607
608/* Create the lwp_lwpid_htab hash table. */
609
610static void
611lwp_lwpid_htab_create (void)
612{
613 lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL);
614}
615
616/* Add LP to the hash table. */
617
618static void
619lwp_lwpid_htab_add_lwp (struct lwp_info *lp)
620{
621 void **slot;
622
623 slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT);
624 gdb_assert (slot != NULL && *slot == NULL);
625 *slot = lp;
626}
627
628/* Head of doubly-linked list of known LWPs. Sorted by reverse
629 creation order. This order is assumed in some cases. E.g.,
630 reaping status after killing alls lwps of a process: the leader LWP
631 must be reaped last. */
901b9821
SM
632
633static intrusive_list<lwp_info> lwp_list;
634
635/* See linux-nat.h. */
636
637lwp_info_range
638all_lwps ()
639{
640 return lwp_info_range (lwp_list.begin ());
641}
642
643/* See linux-nat.h. */
644
645lwp_info_safe_range
646all_lwps_safe ()
647{
648 return lwp_info_safe_range (lwp_list.begin ());
649}
774113b0
PA
650
651/* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
652
653static void
654lwp_list_add (struct lwp_info *lp)
655{
901b9821 656 lwp_list.push_front (*lp);
774113b0
PA
657}
658
659/* Remove LP from sorted-by-reverse-creation-order doubly-linked
660 list. */
661
662static void
663lwp_list_remove (struct lwp_info *lp)
664{
665 /* Remove from sorted-by-creation-order list. */
901b9821 666 lwp_list.erase (lwp_list.iterator_to (*lp));
774113b0
PA
667}
668
d6b0e80f
AC
669\f
670
d6b0e80f
AC
671/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
672 _initialize_linux_nat. */
673static sigset_t suspend_mask;
674
7feb7d06
PA
675/* Signals to block to make that sigsuspend work. */
676static sigset_t blocked_mask;
677
678/* SIGCHLD action. */
6bd434d6 679static struct sigaction sigchld_action;
b84876c2 680
7feb7d06
PA
681/* Block child signals (SIGCHLD and linux threads signals), and store
682 the previous mask in PREV_MASK. */
84e46146 683
7feb7d06
PA
684static void
685block_child_signals (sigset_t *prev_mask)
686{
687 /* Make sure SIGCHLD is blocked. */
688 if (!sigismember (&blocked_mask, SIGCHLD))
689 sigaddset (&blocked_mask, SIGCHLD);
690
21987b9c 691 gdb_sigmask (SIG_BLOCK, &blocked_mask, prev_mask);
7feb7d06
PA
692}
693
694/* Restore child signals mask, previously returned by
695 block_child_signals. */
696
697static void
698restore_child_signals_mask (sigset_t *prev_mask)
699{
21987b9c 700 gdb_sigmask (SIG_SETMASK, prev_mask, NULL);
7feb7d06 701}
2455069d
UW
702
703/* Mask of signals to pass directly to the inferior. */
704static sigset_t pass_mask;
705
706/* Update signals to pass to the inferior. */
f6ac5f3d 707void
adc6a863
PA
708linux_nat_target::pass_signals
709 (gdb::array_view<const unsigned char> pass_signals)
2455069d
UW
710{
711 int signo;
712
713 sigemptyset (&pass_mask);
714
715 for (signo = 1; signo < NSIG; signo++)
716 {
2ea28649 717 int target_signo = gdb_signal_from_host (signo);
adc6a863 718 if (target_signo < pass_signals.size () && pass_signals[target_signo])
dda83cd7 719 sigaddset (&pass_mask, signo);
2455069d
UW
720 }
721}
722
d6b0e80f
AC
723\f
724
725/* Prototypes for local functions. */
d3a70e03
TT
726static int stop_wait_callback (struct lwp_info *lp);
727static int resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid);
ced2dffb 728static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
710151dd 729
d6b0e80f 730\f
d6b0e80f 731
7b50312a
PA
732/* Destroy and free LP. */
733
676362df 734lwp_info::~lwp_info ()
7b50312a 735{
466eecee 736 /* Let the arch specific bits release arch_lwp_info. */
676362df 737 linux_target->low_delete_thread (this->arch_private);
7b50312a
PA
738}
739
774113b0 740/* Traversal function for purge_lwp_list. */
d90e17a7 741
774113b0
PA
742static int
743lwp_lwpid_htab_remove_pid (void **slot, void *info)
d90e17a7 744{
774113b0
PA
745 struct lwp_info *lp = (struct lwp_info *) *slot;
746 int pid = *(int *) info;
d90e17a7 747
e99b03dc 748 if (lp->ptid.pid () == pid)
d90e17a7 749 {
774113b0
PA
750 htab_clear_slot (lwp_lwpid_htab, slot);
751 lwp_list_remove (lp);
676362df 752 delete lp;
774113b0 753 }
d90e17a7 754
774113b0
PA
755 return 1;
756}
d90e17a7 757
774113b0
PA
758/* Remove all LWPs belong to PID from the lwp list. */
759
760static void
761purge_lwp_list (int pid)
762{
763 htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid);
d90e17a7
PA
764}
765
26cb8b7c
PA
766/* Add the LWP specified by PTID to the list. PTID is the first LWP
767 in the process. Return a pointer to the structure describing the
768 new LWP.
769
770 This differs from add_lwp in that we don't let the arch specific
771 bits know about this new thread. Current clients of this callback
772 take the opportunity to install watchpoints in the new thread, and
773 we shouldn't do that for the first thread. If we're spawning a
774 child ("run"), the thread executes the shell wrapper first, and we
775 shouldn't touch it until it execs the program we want to debug.
776 For "attach", it'd be okay to call the callback, but it's not
777 necessary, because watchpoints can't yet have been inserted into
778 the inferior. */
d6b0e80f
AC
779
780static struct lwp_info *
26cb8b7c 781add_initial_lwp (ptid_t ptid)
d6b0e80f 782{
15a9e13e 783 gdb_assert (ptid.lwp_p ());
d6b0e80f 784
b0f6c8d2 785 lwp_info *lp = new lwp_info (ptid);
d6b0e80f 786
d6b0e80f 787
774113b0
PA
788 /* Add to sorted-by-reverse-creation-order list. */
789 lwp_list_add (lp);
790
791 /* Add to keyed-by-pid htab. */
792 lwp_lwpid_htab_add_lwp (lp);
d6b0e80f 793
26cb8b7c
PA
794 return lp;
795}
796
797/* Add the LWP specified by PID to the list. Return a pointer to the
798 structure describing the new LWP. The LWP should already be
799 stopped. */
800
801static struct lwp_info *
802add_lwp (ptid_t ptid)
803{
804 struct lwp_info *lp;
805
806 lp = add_initial_lwp (ptid);
807
6e012a6c
PA
808 /* Let the arch specific bits know about this new thread. Current
809 clients of this callback take the opportunity to install
26cb8b7c
PA
810 watchpoints in the new thread. We don't do this for the first
811 thread though. See add_initial_lwp. */
135340af 812 linux_target->low_new_thread (lp);
9f0bdab8 813
d6b0e80f
AC
814 return lp;
815}
816
817/* Remove the LWP specified by PID from the list. */
818
819static void
820delete_lwp (ptid_t ptid)
821{
b0f6c8d2 822 lwp_info dummy (ptid);
d6b0e80f 823
b0f6c8d2 824 void **slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
774113b0
PA
825 if (slot == NULL)
826 return;
d6b0e80f 827
b0f6c8d2 828 lwp_info *lp = *(struct lwp_info **) slot;
774113b0 829 gdb_assert (lp != NULL);
d6b0e80f 830
774113b0 831 htab_clear_slot (lwp_lwpid_htab, slot);
d6b0e80f 832
774113b0
PA
833 /* Remove from sorted-by-creation-order list. */
834 lwp_list_remove (lp);
d6b0e80f 835
774113b0 836 /* Release. */
676362df 837 delete lp;
d6b0e80f
AC
838}
839
840/* Return a pointer to the structure describing the LWP corresponding
841 to PID. If no corresponding LWP could be found, return NULL. */
842
843static struct lwp_info *
844find_lwp_pid (ptid_t ptid)
845{
d6b0e80f
AC
846 int lwp;
847
15a9e13e 848 if (ptid.lwp_p ())
e38504b3 849 lwp = ptid.lwp ();
d6b0e80f 850 else
e99b03dc 851 lwp = ptid.pid ();
d6b0e80f 852
b0f6c8d2
SM
853 lwp_info dummy (ptid_t (0, lwp));
854 return (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
d6b0e80f
AC
855}
856
6d4ee8c6 857/* See nat/linux-nat.h. */
d6b0e80f
AC
858
859struct lwp_info *
d90e17a7 860iterate_over_lwps (ptid_t filter,
d3a70e03 861 gdb::function_view<iterate_over_lwps_ftype> callback)
d6b0e80f 862{
901b9821 863 for (lwp_info *lp : all_lwps_safe ())
d6b0e80f 864 {
26a57c92 865 if (lp->ptid.matches (filter))
d90e17a7 866 {
d3a70e03 867 if (callback (lp) != 0)
d90e17a7
PA
868 return lp;
869 }
d6b0e80f
AC
870 }
871
872 return NULL;
873}
874
2277426b
PA
875/* Update our internal state when changing from one checkpoint to
876 another indicated by NEW_PTID. We can only switch single-threaded
877 applications, so we only create one new LWP, and the previous list
878 is discarded. */
f973ed9c
DJ
879
880void
881linux_nat_switch_fork (ptid_t new_ptid)
882{
883 struct lwp_info *lp;
884
e99b03dc 885 purge_lwp_list (inferior_ptid.pid ());
2277426b 886
f973ed9c
DJ
887 lp = add_lwp (new_ptid);
888 lp->stopped = 1;
e26af52f 889
2277426b
PA
890 /* This changes the thread's ptid while preserving the gdb thread
891 num. Also changes the inferior pid, while preserving the
892 inferior num. */
5b6d1e4f 893 thread_change_ptid (linux_target, inferior_ptid, new_ptid);
2277426b
PA
894
895 /* We've just told GDB core that the thread changed target id, but,
896 in fact, it really is a different thread, with different register
897 contents. */
898 registers_changed ();
e26af52f
DJ
899}
900
e26af52f
DJ
901/* Handle the exit of a single thread LP. */
902
903static void
904exit_lwp (struct lwp_info *lp)
905{
9213a6d7 906 struct thread_info *th = linux_target->find_thread (lp->ptid);
063bfe2e
VP
907
908 if (th)
9d7d58e7 909 delete_thread (th);
e26af52f
DJ
910
911 delete_lwp (lp->ptid);
912}
913
a0ef4274
DJ
914/* Wait for the LWP specified by LP, which we have just attached to.
915 Returns a wait status for that LWP, to cache. */
916
917static int
22827c51 918linux_nat_post_attach_wait (ptid_t ptid, int *signalled)
a0ef4274 919{
e38504b3 920 pid_t new_pid, pid = ptid.lwp ();
a0ef4274
DJ
921 int status;
922
644cebc9 923 if (linux_proc_pid_is_stopped (pid))
a0ef4274 924 {
9327494e 925 linux_nat_debug_printf ("Attaching to a stopped process");
a0ef4274
DJ
926
927 /* The process is definitely stopped. It is in a job control
928 stop, unless the kernel predates the TASK_STOPPED /
929 TASK_TRACED distinction, in which case it might be in a
930 ptrace stop. Make sure it is in a ptrace stop; from there we
931 can kill it, signal it, et cetera.
932
dda83cd7 933 First make sure there is a pending SIGSTOP. Since we are
a0ef4274
DJ
934 already attached, the process can not transition from stopped
935 to running without a PTRACE_CONT; so we know this signal will
936 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
937 probably already in the queue (unless this kernel is old
938 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
939 is not an RT signal, it can only be queued once. */
940 kill_lwp (pid, SIGSTOP);
941
942 /* Finally, resume the stopped process. This will deliver the SIGSTOP
943 (or a higher priority signal, just like normal PTRACE_ATTACH). */
944 ptrace (PTRACE_CONT, pid, 0, 0);
945 }
946
947 /* Make sure the initial process is stopped. The user-level threads
948 layer might want to poke around in the inferior, and that won't
949 work if things haven't stabilized yet. */
4a6ed09b 950 new_pid = my_waitpid (pid, &status, __WALL);
dacc9cb2
PP
951 gdb_assert (pid == new_pid);
952
953 if (!WIFSTOPPED (status))
954 {
955 /* The pid we tried to attach has apparently just exited. */
9327494e 956 linux_nat_debug_printf ("Failed to stop %d: %s", pid,
8d06918f 957 status_to_str (status).c_str ());
dacc9cb2
PP
958 return status;
959 }
a0ef4274
DJ
960
961 if (WSTOPSIG (status) != SIGSTOP)
962 {
963 *signalled = 1;
9327494e 964 linux_nat_debug_printf ("Received %s after attaching",
8d06918f 965 status_to_str (status).c_str ());
a0ef4274
DJ
966 }
967
968 return status;
969}
970
f6ac5f3d
PA
971void
972linux_nat_target::create_inferior (const char *exec_file,
973 const std::string &allargs,
974 char **env, int from_tty)
b84876c2 975{
41272101
TT
976 maybe_disable_address_space_randomization restore_personality
977 (disable_randomization);
b84876c2
PA
978
979 /* The fork_child mechanism is synchronous and calls target_wait, so
980 we have to mask the async mode. */
981
2455069d 982 /* Make sure we report all signals during startup. */
adc6a863 983 pass_signals ({});
2455069d 984
f6ac5f3d 985 inf_ptrace_target::create_inferior (exec_file, allargs, env, from_tty);
8a89ddbd
PA
986
987 open_proc_mem_file (inferior_ptid);
b84876c2
PA
988}
989
8784d563
PA
990/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
991 already attached. Returns true if a new LWP is found, false
992 otherwise. */
993
994static int
995attach_proc_task_lwp_callback (ptid_t ptid)
996{
997 struct lwp_info *lp;
998
999 /* Ignore LWPs we're already attached to. */
1000 lp = find_lwp_pid (ptid);
1001 if (lp == NULL)
1002 {
e38504b3 1003 int lwpid = ptid.lwp ();
8784d563
PA
1004
1005 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1006 {
1007 int err = errno;
1008
1009 /* Be quiet if we simply raced with the thread exiting.
1010 EPERM is returned if the thread's task still exists, and
1011 is marked as exited or zombie, as well as other
1012 conditions, so in that case, confirm the status in
1013 /proc/PID/status. */
1014 if (err == ESRCH
1015 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1016 {
9327494e
SM
1017 linux_nat_debug_printf
1018 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1019 lwpid, err, safe_strerror (err));
1020
8784d563
PA
1021 }
1022 else
1023 {
4d9b86e1 1024 std::string reason
50fa3001 1025 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1 1026
f71f0b0d 1027 warning (_("Cannot attach to lwp %d: %s"),
4d9b86e1 1028 lwpid, reason.c_str ());
8784d563
PA
1029 }
1030 }
1031 else
1032 {
9327494e 1033 linux_nat_debug_printf ("PTRACE_ATTACH %s, 0, 0 (OK)",
e53c95d4 1034 ptid.to_string ().c_str ());
8784d563
PA
1035
1036 lp = add_lwp (ptid);
8784d563
PA
1037
1038 /* The next time we wait for this LWP we'll see a SIGSTOP as
1039 PTRACE_ATTACH brings it to a halt. */
1040 lp->signalled = 1;
1041
1042 /* We need to wait for a stop before being able to make the
1043 next ptrace call on this LWP. */
1044 lp->must_set_ptrace_flags = 1;
026a9174
PA
1045
1046 /* So that wait collects the SIGSTOP. */
1047 lp->resumed = 1;
1048
1049 /* Also add the LWP to gdb's thread list, in case a
1050 matching libthread_db is not found (or the process uses
1051 raw clone). */
5b6d1e4f 1052 add_thread (linux_target, lp->ptid);
719546c4
SM
1053 set_running (linux_target, lp->ptid, true);
1054 set_executing (linux_target, lp->ptid, true);
8784d563
PA
1055 }
1056
1057 return 1;
1058 }
1059 return 0;
1060}
1061
f6ac5f3d
PA
1062void
1063linux_nat_target::attach (const char *args, int from_tty)
d6b0e80f
AC
1064{
1065 struct lwp_info *lp;
d6b0e80f 1066 int status;
af990527 1067 ptid_t ptid;
d6b0e80f 1068
2455069d 1069 /* Make sure we report all signals during attach. */
adc6a863 1070 pass_signals ({});
2455069d 1071
a70b8144 1072 try
87b0bb13 1073 {
f6ac5f3d 1074 inf_ptrace_target::attach (args, from_tty);
87b0bb13 1075 }
230d2906 1076 catch (const gdb_exception_error &ex)
87b0bb13
JK
1077 {
1078 pid_t pid = parse_pid_to_attach (args);
50fa3001 1079 std::string reason = linux_ptrace_attach_fail_reason (pid);
87b0bb13 1080
4d9b86e1 1081 if (!reason.empty ())
3d6e9d23
TT
1082 throw_error (ex.error, "warning: %s\n%s", reason.c_str (),
1083 ex.what ());
7ae1a6a6 1084 else
3d6e9d23 1085 throw_error (ex.error, "%s", ex.what ());
87b0bb13 1086 }
d6b0e80f 1087
af990527
PA
1088 /* The ptrace base target adds the main thread with (pid,0,0)
1089 format. Decorate it with lwp info. */
e99b03dc 1090 ptid = ptid_t (inferior_ptid.pid (),
184ea2f7 1091 inferior_ptid.pid ());
5b6d1e4f 1092 thread_change_ptid (linux_target, inferior_ptid, ptid);
af990527 1093
9f0bdab8 1094 /* Add the initial process as the first LWP to the list. */
26cb8b7c 1095 lp = add_initial_lwp (ptid);
a0ef4274 1096
22827c51 1097 status = linux_nat_post_attach_wait (lp->ptid, &lp->signalled);
dacc9cb2
PP
1098 if (!WIFSTOPPED (status))
1099 {
1100 if (WIFEXITED (status))
1101 {
1102 int exit_code = WEXITSTATUS (status);
1103
223ffa71 1104 target_terminal::ours ();
bc1e6c81 1105 target_mourn_inferior (inferior_ptid);
dacc9cb2
PP
1106 if (exit_code == 0)
1107 error (_("Unable to attach: program exited normally."));
1108 else
1109 error (_("Unable to attach: program exited with code %d."),
1110 exit_code);
1111 }
1112 else if (WIFSIGNALED (status))
1113 {
2ea28649 1114 enum gdb_signal signo;
dacc9cb2 1115
223ffa71 1116 target_terminal::ours ();
bc1e6c81 1117 target_mourn_inferior (inferior_ptid);
dacc9cb2 1118
2ea28649 1119 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1120 error (_("Unable to attach: program terminated with signal "
1121 "%s, %s."),
2ea28649
PA
1122 gdb_signal_to_name (signo),
1123 gdb_signal_to_string (signo));
dacc9cb2
PP
1124 }
1125
f34652de 1126 internal_error (_("unexpected status %d for PID %ld"),
e38504b3 1127 status, (long) ptid.lwp ());
dacc9cb2
PP
1128 }
1129
a0ef4274 1130 lp->stopped = 1;
9f0bdab8 1131
8a89ddbd
PA
1132 open_proc_mem_file (lp->ptid);
1133
a0ef4274 1134 /* Save the wait status to report later. */
d6b0e80f 1135 lp->resumed = 1;
9327494e 1136 linux_nat_debug_printf ("waitpid %ld, saving status %s",
8d06918f
SM
1137 (long) lp->ptid.pid (),
1138 status_to_str (status).c_str ());
710151dd 1139
7feb7d06
PA
1140 lp->status = status;
1141
8784d563
PA
1142 /* We must attach to every LWP. If /proc is mounted, use that to
1143 find them now. The inferior may be using raw clone instead of
1144 using pthreads. But even if it is using pthreads, thread_db
1145 walks structures in the inferior's address space to find the list
1146 of threads/LWPs, and those structures may well be corrupted.
1147 Note that once thread_db is loaded, we'll still use it to list
1148 threads and associate pthread info with each LWP. */
e99b03dc 1149 linux_proc_attach_tgid_threads (lp->ptid.pid (),
8784d563 1150 attach_proc_task_lwp_callback);
d6b0e80f
AC
1151}
1152
4a3ee32a
SM
1153/* Ptrace-detach the thread with pid PID. */
1154
1155static void
1156detach_one_pid (int pid, int signo)
1157{
1158 if (ptrace (PTRACE_DETACH, pid, 0, signo) < 0)
1159 {
1160 int save_errno = errno;
1161
1162 /* We know the thread exists, so ESRCH must mean the lwp is
1163 zombie. This can happen if one of the already-detached
1164 threads exits the whole thread group. In that case we're
1165 still attached, and must reap the lwp. */
1166 if (save_errno == ESRCH)
1167 {
1168 int ret, status;
1169
1170 ret = my_waitpid (pid, &status, __WALL);
1171 if (ret == -1)
1172 {
1173 warning (_("Couldn't reap LWP %d while detaching: %s"),
1174 pid, safe_strerror (errno));
1175 }
1176 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1177 {
1178 warning (_("Reaping LWP %d while detaching "
1179 "returned unexpected status 0x%x"),
1180 pid, status);
1181 }
1182 }
1183 else
1184 error (_("Can't detach %d: %s"),
1185 pid, safe_strerror (save_errno));
1186 }
1187 else
1188 linux_nat_debug_printf ("PTRACE_DETACH (%d, %s, 0) (OK)",
1189 pid, strsignal (signo));
1190}
1191
ced2dffb
PA
1192/* Get pending signal of THREAD as a host signal number, for detaching
1193 purposes. This is the signal the thread last stopped for, which we
1194 need to deliver to the thread when detaching, otherwise, it'd be
1195 suppressed/lost. */
1196
a0ef4274 1197static int
ced2dffb 1198get_detach_signal (struct lwp_info *lp)
a0ef4274 1199{
a493e3e2 1200 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1201
1202 /* If we paused threads momentarily, we may have stored pending
1203 events in lp->status or lp->waitstatus (see stop_wait_callback),
1204 and GDB core hasn't seen any signal for those threads.
1205 Otherwise, the last signal reported to the core is found in the
1206 thread object's stop_signal.
1207
1208 There's a corner case that isn't handled here at present. Only
1209 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1210 stop_signal make sense as a real signal to pass to the inferior.
1211 Some catchpoint related events, like
1212 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1213 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1214 those traps are debug API (ptrace in our case) related and
1215 induced; the inferior wouldn't see them if it wasn't being
1216 traced. Hence, we should never pass them to the inferior, even
1217 when set to pass state. Since this corner case isn't handled by
1218 infrun.c when proceeding with a signal, for consistency, neither
1219 do we handle it here (or elsewhere in the file we check for
1220 signal pass state). Normally SIGTRAP isn't set to pass state, so
1221 this is really a corner case. */
1222
183be222 1223 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
a493e3e2 1224 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1225 else if (lp->status)
2ea28649 1226 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
00431a78 1227 else
ca2163eb 1228 {
9213a6d7 1229 thread_info *tp = linux_target->find_thread (lp->ptid);
e0881a8e 1230
611841bb 1231 if (target_is_non_stop_p () && !tp->executing ())
ca2163eb 1232 {
1edb66d8 1233 if (tp->has_pending_waitstatus ())
df5ad102
SM
1234 {
1235 /* If the thread has a pending event, and it was stopped with a
287de656 1236 signal, use that signal to resume it. If it has a pending
df5ad102
SM
1237 event of another kind, it was not stopped with a signal, so
1238 resume it without a signal. */
1239 if (tp->pending_waitstatus ().kind () == TARGET_WAITKIND_STOPPED)
1240 signo = tp->pending_waitstatus ().sig ();
1241 else
1242 signo = GDB_SIGNAL_0;
1243 }
00431a78 1244 else
1edb66d8 1245 signo = tp->stop_signal ();
00431a78
PA
1246 }
1247 else if (!target_is_non_stop_p ())
1248 {
00431a78 1249 ptid_t last_ptid;
5b6d1e4f 1250 process_stratum_target *last_target;
00431a78 1251
5b6d1e4f 1252 get_last_target_status (&last_target, &last_ptid, nullptr);
e0881a8e 1253
5b6d1e4f
PA
1254 if (last_target == linux_target
1255 && lp->ptid.lwp () == last_ptid.lwp ())
1edb66d8 1256 signo = tp->stop_signal ();
4c28f408 1257 }
ca2163eb 1258 }
4c28f408 1259
a493e3e2 1260 if (signo == GDB_SIGNAL_0)
ca2163eb 1261 {
9327494e 1262 linux_nat_debug_printf ("lwp %s has no pending signal",
e53c95d4 1263 lp->ptid.to_string ().c_str ());
ca2163eb
PA
1264 }
1265 else if (!signal_pass_state (signo))
1266 {
9327494e
SM
1267 linux_nat_debug_printf
1268 ("lwp %s had signal %s but it is in no pass state",
e53c95d4 1269 lp->ptid.to_string ().c_str (), gdb_signal_to_string (signo));
a0ef4274 1270 }
a0ef4274 1271 else
4c28f408 1272 {
9327494e 1273 linux_nat_debug_printf ("lwp %s has pending signal %s",
e53c95d4 1274 lp->ptid.to_string ().c_str (),
9327494e 1275 gdb_signal_to_string (signo));
ced2dffb
PA
1276
1277 return gdb_signal_to_host (signo);
4c28f408 1278 }
a0ef4274
DJ
1279
1280 return 0;
1281}
1282
ced2dffb
PA
1283/* Detach from LP. If SIGNO_P is non-NULL, then it points to the
1284 signal number that should be passed to the LWP when detaching.
1285 Otherwise pass any pending signal the LWP may have, if any. */
1286
1287static void
1288detach_one_lwp (struct lwp_info *lp, int *signo_p)
d6b0e80f 1289{
b26b06dd
AB
1290 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
1291
1292 linux_nat_debug_printf ("lwp %s (stopped = %d)",
1293 lp->ptid.to_string ().c_str (), lp->stopped);
1294
e38504b3 1295 int lwpid = lp->ptid.lwp ();
ced2dffb
PA
1296 int signo;
1297
d6b0e80f
AC
1298 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1299
df5ad102
SM
1300 /* If the lwp/thread we are about to detach has a pending fork event,
1301 there is a process GDB is attached to that the core of GDB doesn't know
1302 about. Detach from it. */
1303
1304 /* Check in lwp_info::status. */
1305 if (WIFSTOPPED (lp->status) && linux_is_extended_waitstatus (lp->status))
1306 {
1307 int event = linux_ptrace_get_extended_event (lp->status);
1308
1309 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1310 {
1311 unsigned long child_pid;
1312 int ret = ptrace (PTRACE_GETEVENTMSG, lp->ptid.lwp (), 0, &child_pid);
1313 if (ret == 0)
1314 detach_one_pid (child_pid, 0);
1315 else
1316 perror_warning_with_name (_("Failed to detach fork child"));
1317 }
1318 }
1319
1320 /* Check in lwp_info::waitstatus. */
1321 if (lp->waitstatus.kind () == TARGET_WAITKIND_VFORKED
1322 || lp->waitstatus.kind () == TARGET_WAITKIND_FORKED)
1323 detach_one_pid (lp->waitstatus.child_ptid ().pid (), 0);
1324
1325
1326 /* Check in thread_info::pending_waitstatus. */
9213a6d7 1327 thread_info *tp = linux_target->find_thread (lp->ptid);
df5ad102
SM
1328 if (tp->has_pending_waitstatus ())
1329 {
1330 const target_waitstatus &ws = tp->pending_waitstatus ();
1331
1332 if (ws.kind () == TARGET_WAITKIND_VFORKED
1333 || ws.kind () == TARGET_WAITKIND_FORKED)
1334 detach_one_pid (ws.child_ptid ().pid (), 0);
1335 }
1336
1337 /* Check in thread_info::pending_follow. */
1338 if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED
1339 || tp->pending_follow.kind () == TARGET_WAITKIND_FORKED)
1340 detach_one_pid (tp->pending_follow.child_ptid ().pid (), 0);
1341
9327494e
SM
1342 if (lp->status != 0)
1343 linux_nat_debug_printf ("Pending %s for %s on detach.",
1344 strsignal (WSTOPSIG (lp->status)),
e53c95d4 1345 lp->ptid.to_string ().c_str ());
d6b0e80f 1346
a0ef4274
DJ
1347 /* If there is a pending SIGSTOP, get rid of it. */
1348 if (lp->signalled)
d6b0e80f 1349 {
9327494e 1350 linux_nat_debug_printf ("Sending SIGCONT to %s",
e53c95d4 1351 lp->ptid.to_string ().c_str ());
d6b0e80f 1352
ced2dffb 1353 kill_lwp (lwpid, SIGCONT);
d6b0e80f 1354 lp->signalled = 0;
d6b0e80f
AC
1355 }
1356
ced2dffb 1357 if (signo_p == NULL)
d6b0e80f 1358 {
a0ef4274 1359 /* Pass on any pending signal for this LWP. */
ced2dffb
PA
1360 signo = get_detach_signal (lp);
1361 }
1362 else
1363 signo = *signo_p;
a0ef4274 1364
b26b06dd
AB
1365 linux_nat_debug_printf ("preparing to resume lwp %s (stopped = %d)",
1366 lp->ptid.to_string ().c_str (),
1367 lp->stopped);
1368
ced2dffb
PA
1369 /* Preparing to resume may try to write registers, and fail if the
1370 lwp is zombie. If that happens, ignore the error. We'll handle
1371 it below, when detach fails with ESRCH. */
a70b8144 1372 try
ced2dffb 1373 {
135340af 1374 linux_target->low_prepare_to_resume (lp);
ced2dffb 1375 }
230d2906 1376 catch (const gdb_exception_error &ex)
ced2dffb
PA
1377 {
1378 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 1379 throw;
ced2dffb 1380 }
d6b0e80f 1381
4a3ee32a 1382 detach_one_pid (lwpid, signo);
ced2dffb
PA
1383
1384 delete_lwp (lp->ptid);
1385}
d6b0e80f 1386
ced2dffb 1387static int
d3a70e03 1388detach_callback (struct lwp_info *lp)
ced2dffb
PA
1389{
1390 /* We don't actually detach from the thread group leader just yet.
1391 If the thread group exits, we must reap the zombie clone lwps
1392 before we're able to reap the leader. */
e38504b3 1393 if (lp->ptid.lwp () != lp->ptid.pid ())
ced2dffb 1394 detach_one_lwp (lp, NULL);
d6b0e80f
AC
1395 return 0;
1396}
1397
f6ac5f3d
PA
1398void
1399linux_nat_target::detach (inferior *inf, int from_tty)
d6b0e80f 1400{
b26b06dd
AB
1401 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
1402
d90e17a7 1403 struct lwp_info *main_lwp;
bc09b0c1 1404 int pid = inf->pid;
a0ef4274 1405
ae5e0686
MK
1406 /* Don't unregister from the event loop, as there may be other
1407 inferiors running. */
b84876c2 1408
4c28f408 1409 /* Stop all threads before detaching. ptrace requires that the
30baf67b 1410 thread is stopped to successfully detach. */
d3a70e03 1411 iterate_over_lwps (ptid_t (pid), stop_callback);
4c28f408
PA
1412 /* ... and wait until all of them have reported back that
1413 they're no longer running. */
d3a70e03 1414 iterate_over_lwps (ptid_t (pid), stop_wait_callback);
4c28f408 1415
e87f0fe8
PA
1416 /* We can now safely remove breakpoints. We don't this in earlier
1417 in common code because this target doesn't currently support
1418 writing memory while the inferior is running. */
1419 remove_breakpoints_inf (current_inferior ());
1420
d3a70e03 1421 iterate_over_lwps (ptid_t (pid), detach_callback);
d6b0e80f
AC
1422
1423 /* Only the initial process should be left right now. */
bc09b0c1 1424 gdb_assert (num_lwps (pid) == 1);
d90e17a7 1425
f2907e49 1426 main_lwp = find_lwp_pid (ptid_t (pid));
d6b0e80f 1427
7a7d3353
PA
1428 if (forks_exist_p ())
1429 {
1430 /* Multi-fork case. The current inferior_ptid is being detached
1431 from, but there are other viable forks to debug. Detach from
1432 the current fork, and context-switch to the first
1433 available. */
6bd6f3b6 1434 linux_fork_detach (from_tty);
7a7d3353
PA
1435 }
1436 else
ced2dffb 1437 {
ced2dffb
PA
1438 target_announce_detach (from_tty);
1439
6bd6f3b6
SM
1440 /* Pass on any pending signal for the last LWP. */
1441 int signo = get_detach_signal (main_lwp);
ced2dffb
PA
1442
1443 detach_one_lwp (main_lwp, &signo);
1444
f6ac5f3d 1445 detach_success (inf);
ced2dffb 1446 }
05c06f31 1447
8a89ddbd 1448 close_proc_mem_file (pid);
d6b0e80f
AC
1449}
1450
8a99810d
PA
1451/* Resume execution of the inferior process. If STEP is nonzero,
1452 single-step it. If SIGNAL is nonzero, give it that signal. */
1453
1454static void
23f238d3
PA
1455linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1456 enum gdb_signal signo)
8a99810d 1457{
8a99810d 1458 lp->step = step;
9c02b525
PA
1459
1460 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1461 We only presently need that if the LWP is stepped though (to
1462 handle the case of stepping a breakpoint instruction). */
1463 if (step)
1464 {
5b6d1e4f 1465 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
9c02b525
PA
1466
1467 lp->stop_pc = regcache_read_pc (regcache);
1468 }
1469 else
1470 lp->stop_pc = 0;
1471
135340af 1472 linux_target->low_prepare_to_resume (lp);
f6ac5f3d 1473 linux_target->low_resume (lp->ptid, step, signo);
23f238d3
PA
1474
1475 /* Successfully resumed. Clear state that no longer makes sense,
1476 and mark the LWP as running. Must not do this before resuming
1477 otherwise if that fails other code will be confused. E.g., we'd
1478 later try to stop the LWP and hang forever waiting for a stop
1479 status. Note that we must not throw after this is cleared,
1480 otherwise handle_zombie_lwp_error would get confused. */
8a99810d 1481 lp->stopped = 0;
1ad3de98 1482 lp->core = -1;
23f238d3 1483 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
5b6d1e4f 1484 registers_changed_ptid (linux_target, lp->ptid);
8a99810d
PA
1485}
1486
23f238d3
PA
1487/* Called when we try to resume a stopped LWP and that errors out. If
1488 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1489 or about to become), discard the error, clear any pending status
1490 the LWP may have, and return true (we'll collect the exit status
1491 soon enough). Otherwise, return false. */
1492
1493static int
1494check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1495{
1496 /* If we get an error after resuming the LWP successfully, we'd
1497 confuse !T state for the LWP being gone. */
1498 gdb_assert (lp->stopped);
1499
1500 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1501 because even if ptrace failed with ESRCH, the tracee may be "not
1502 yet fully dead", but already refusing ptrace requests. In that
1503 case the tracee has 'R (Running)' state for a little bit
1504 (observed in Linux 3.18). See also the note on ESRCH in the
1505 ptrace(2) man page. Instead, check whether the LWP has any state
1506 other than ptrace-stopped. */
1507
1508 /* Don't assume anything if /proc/PID/status can't be read. */
e38504b3 1509 if (linux_proc_pid_is_trace_stopped_nowarn (lp->ptid.lwp ()) == 0)
23f238d3
PA
1510 {
1511 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1512 lp->status = 0;
183be222 1513 lp->waitstatus.set_ignore ();
23f238d3
PA
1514 return 1;
1515 }
1516 return 0;
1517}
1518
1519/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1520 disappears while we try to resume it. */
1521
1522static void
1523linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1524{
a70b8144 1525 try
23f238d3
PA
1526 {
1527 linux_resume_one_lwp_throw (lp, step, signo);
1528 }
230d2906 1529 catch (const gdb_exception_error &ex)
23f238d3
PA
1530 {
1531 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 1532 throw;
23f238d3 1533 }
23f238d3
PA
1534}
1535
d6b0e80f
AC
1536/* Resume LP. */
1537
25289eb2 1538static void
e5ef252a 1539resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1540{
25289eb2 1541 if (lp->stopped)
6c95b8df 1542 {
5b6d1e4f 1543 struct inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
25289eb2
PA
1544
1545 if (inf->vfork_child != NULL)
1546 {
8a9da63e 1547 linux_nat_debug_printf ("Not resuming sibling %s (vfork parent)",
e53c95d4 1548 lp->ptid.to_string ().c_str ());
25289eb2 1549 }
8a99810d 1550 else if (!lwp_status_pending_p (lp))
25289eb2 1551 {
9327494e 1552 linux_nat_debug_printf ("Resuming sibling %s, %s, %s",
e53c95d4 1553 lp->ptid.to_string ().c_str (),
9327494e
SM
1554 (signo != GDB_SIGNAL_0
1555 ? strsignal (gdb_signal_to_host (signo))
1556 : "0"),
1557 step ? "step" : "resume");
25289eb2 1558
8a99810d 1559 linux_resume_one_lwp (lp, step, signo);
25289eb2
PA
1560 }
1561 else
1562 {
9327494e 1563 linux_nat_debug_printf ("Not resuming sibling %s (has pending)",
e53c95d4 1564 lp->ptid.to_string ().c_str ());
25289eb2 1565 }
6c95b8df 1566 }
25289eb2 1567 else
9327494e 1568 linux_nat_debug_printf ("Not resuming sibling %s (not stopped)",
e53c95d4 1569 lp->ptid.to_string ().c_str ());
25289eb2 1570}
d6b0e80f 1571
8817a6f2
PA
1572/* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1573 Resume LWP with the last stop signal, if it is in pass state. */
e5ef252a 1574
25289eb2 1575static int
d3a70e03 1576linux_nat_resume_callback (struct lwp_info *lp, struct lwp_info *except)
25289eb2 1577{
e5ef252a
PA
1578 enum gdb_signal signo = GDB_SIGNAL_0;
1579
8817a6f2
PA
1580 if (lp == except)
1581 return 0;
1582
e5ef252a
PA
1583 if (lp->stopped)
1584 {
1585 struct thread_info *thread;
1586
9213a6d7 1587 thread = linux_target->find_thread (lp->ptid);
e5ef252a
PA
1588 if (thread != NULL)
1589 {
1edb66d8
SM
1590 signo = thread->stop_signal ();
1591 thread->set_stop_signal (GDB_SIGNAL_0);
e5ef252a
PA
1592 }
1593 }
1594
1595 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1596 return 0;
1597}
1598
1599static int
d3a70e03 1600resume_clear_callback (struct lwp_info *lp)
d6b0e80f
AC
1601{
1602 lp->resumed = 0;
25289eb2 1603 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1604 return 0;
1605}
1606
1607static int
d3a70e03 1608resume_set_callback (struct lwp_info *lp)
d6b0e80f
AC
1609{
1610 lp->resumed = 1;
25289eb2 1611 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1612 return 0;
1613}
1614
f6ac5f3d 1615void
d51926f0 1616linux_nat_target::resume (ptid_t scope_ptid, int step, enum gdb_signal signo)
d6b0e80f
AC
1617{
1618 struct lwp_info *lp;
d6b0e80f 1619
9327494e
SM
1620 linux_nat_debug_printf ("Preparing to %s %s, %s, inferior_ptid %s",
1621 step ? "step" : "resume",
d51926f0 1622 scope_ptid.to_string ().c_str (),
9327494e
SM
1623 (signo != GDB_SIGNAL_0
1624 ? strsignal (gdb_signal_to_host (signo)) : "0"),
e53c95d4 1625 inferior_ptid.to_string ().c_str ());
76f50ad1 1626
7da6a5b9
LM
1627 /* Mark the lwps we're resuming as resumed and update their
1628 last_resume_kind to resume_continue. */
d51926f0 1629 iterate_over_lwps (scope_ptid, resume_set_callback);
d6b0e80f 1630
d51926f0 1631 lp = find_lwp_pid (inferior_ptid);
9f0bdab8 1632 gdb_assert (lp != NULL);
d6b0e80f 1633
9f0bdab8 1634 /* Remember if we're stepping. */
25289eb2 1635 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1636
9f0bdab8
DJ
1637 /* If we have a pending wait status for this thread, there is no
1638 point in resuming the process. But first make sure that
1639 linux_nat_wait won't preemptively handle the event - we
1640 should never take this short-circuit if we are going to
1641 leave LP running, since we have skipped resuming all the
1642 other threads. This bit of code needs to be synchronized
1643 with linux_nat_wait. */
76f50ad1 1644
9f0bdab8
DJ
1645 if (lp->status && WIFSTOPPED (lp->status))
1646 {
2455069d
UW
1647 if (!lp->step
1648 && WSTOPSIG (lp->status)
1649 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1650 {
9327494e
SM
1651 linux_nat_debug_printf
1652 ("Not short circuiting for ignored status 0x%x", lp->status);
9f0bdab8 1653
d6b0e80f
AC
1654 /* FIXME: What should we do if we are supposed to continue
1655 this thread with a signal? */
a493e3e2 1656 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 1657 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1658 lp->status = 0;
1659 }
1660 }
76f50ad1 1661
8a99810d 1662 if (lwp_status_pending_p (lp))
9f0bdab8
DJ
1663 {
1664 /* FIXME: What should we do if we are supposed to continue
1665 this thread with a signal? */
a493e3e2 1666 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 1667
57573e54
PA
1668 linux_nat_debug_printf ("Short circuiting for status %s",
1669 pending_status_str (lp).c_str ());
d6b0e80f 1670
7feb7d06
PA
1671 if (target_can_async_p ())
1672 {
4a570176 1673 target_async (true);
7feb7d06
PA
1674 /* Tell the event loop we have something to process. */
1675 async_file_mark ();
1676 }
9f0bdab8 1677 return;
d6b0e80f
AC
1678 }
1679
d51926f0
PA
1680 /* No use iterating unless we're resuming other threads. */
1681 if (scope_ptid != lp->ptid)
1682 iterate_over_lwps (scope_ptid, [=] (struct lwp_info *info)
1683 {
1684 return linux_nat_resume_callback (info, lp);
1685 });
d90e17a7 1686
9327494e
SM
1687 linux_nat_debug_printf ("%s %s, %s (resume event thread)",
1688 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 1689 lp->ptid.to_string ().c_str (),
9327494e
SM
1690 (signo != GDB_SIGNAL_0
1691 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2 1692
2bf6fb9d 1693 linux_resume_one_lwp (lp, step, signo);
d6b0e80f
AC
1694}
1695
c5f62d5f 1696/* Send a signal to an LWP. */
d6b0e80f
AC
1697
1698static int
1699kill_lwp (int lwpid, int signo)
1700{
4a6ed09b 1701 int ret;
d6b0e80f 1702
4a6ed09b
PA
1703 errno = 0;
1704 ret = syscall (__NR_tkill, lwpid, signo);
1705 if (errno == ENOSYS)
1706 {
1707 /* If tkill fails, then we are not using nptl threads, a
1708 configuration we no longer support. */
1709 perror_with_name (("tkill"));
1710 }
1711 return ret;
d6b0e80f
AC
1712}
1713
ca2163eb
PA
1714/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1715 event, check if the core is interested in it: if not, ignore the
1716 event, and keep waiting; otherwise, we need to toggle the LWP's
1717 syscall entry/exit status, since the ptrace event itself doesn't
1718 indicate it, and report the trap to higher layers. */
1719
1720static int
1721linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1722{
1723 struct target_waitstatus *ourstatus = &lp->waitstatus;
1724 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
9213a6d7 1725 thread_info *thread = linux_target->find_thread (lp->ptid);
00431a78 1726 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, thread);
ca2163eb
PA
1727
1728 if (stopping)
1729 {
1730 /* If we're stopping threads, there's a SIGSTOP pending, which
1731 makes it so that the LWP reports an immediate syscall return,
1732 followed by the SIGSTOP. Skip seeing that "return" using
1733 PTRACE_CONT directly, and let stop_wait_callback collect the
1734 SIGSTOP. Later when the thread is resumed, a new syscall
1735 entry event. If we didn't do this (and returned 0), we'd
1736 leave a syscall entry pending, and our caller, by using
1737 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1738 itself. Later, when the user re-resumes this LWP, we'd see
1739 another syscall entry event and we'd mistake it for a return.
1740
1741 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1742 (leaving immediately with LWP->signalled set, without issuing
1743 a PTRACE_CONT), it would still be problematic to leave this
1744 syscall enter pending, as later when the thread is resumed,
1745 it would then see the same syscall exit mentioned above,
1746 followed by the delayed SIGSTOP, while the syscall didn't
1747 actually get to execute. It seems it would be even more
1748 confusing to the user. */
1749
9327494e
SM
1750 linux_nat_debug_printf
1751 ("ignoring syscall %d for LWP %ld (stopping threads), resuming with "
1752 "PTRACE_CONT for SIGSTOP", syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1753
1754 lp->syscall_state = TARGET_WAITKIND_IGNORE;
e38504b3 1755 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
8817a6f2 1756 lp->stopped = 0;
ca2163eb
PA
1757 return 1;
1758 }
1759
bfd09d20
JS
1760 /* Always update the entry/return state, even if this particular
1761 syscall isn't interesting to the core now. In async mode,
1762 the user could install a new catchpoint for this syscall
1763 between syscall enter/return, and we'll need to know to
1764 report a syscall return if that happens. */
1765 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1766 ? TARGET_WAITKIND_SYSCALL_RETURN
1767 : TARGET_WAITKIND_SYSCALL_ENTRY);
1768
ca2163eb
PA
1769 if (catch_syscall_enabled ())
1770 {
ca2163eb
PA
1771 if (catching_syscall_number (syscall_number))
1772 {
1773 /* Alright, an event to report. */
183be222
SM
1774 if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
1775 ourstatus->set_syscall_entry (syscall_number);
1776 else if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
1777 ourstatus->set_syscall_return (syscall_number);
1778 else
1779 gdb_assert_not_reached ("unexpected syscall state");
ca2163eb 1780
9327494e
SM
1781 linux_nat_debug_printf
1782 ("stopping for %s of syscall %d for LWP %ld",
1783 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1784 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1785
ca2163eb
PA
1786 return 0;
1787 }
1788
9327494e
SM
1789 linux_nat_debug_printf
1790 ("ignoring %s of syscall %d for LWP %ld",
1791 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1792 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1793 }
1794 else
1795 {
1796 /* If we had been syscall tracing, and hence used PT_SYSCALL
1797 before on this LWP, it could happen that the user removes all
1798 syscall catchpoints before we get to process this event.
1799 There are two noteworthy issues here:
1800
1801 - When stopped at a syscall entry event, resuming with
1802 PT_STEP still resumes executing the syscall and reports a
1803 syscall return.
1804
1805 - Only PT_SYSCALL catches syscall enters. If we last
1806 single-stepped this thread, then this event can't be a
1807 syscall enter. If we last single-stepped this thread, this
1808 has to be a syscall exit.
1809
1810 The points above mean that the next resume, be it PT_STEP or
1811 PT_CONTINUE, can not trigger a syscall trace event. */
9327494e
SM
1812 linux_nat_debug_printf
1813 ("caught syscall event with no syscall catchpoints. %d for LWP %ld, "
1814 "ignoring", syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1815 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1816 }
1817
1818 /* The core isn't interested in this event. For efficiency, avoid
1819 stopping all threads only to have the core resume them all again.
1820 Since we're not stopping threads, if we're still syscall tracing
1821 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1822 subsequent syscall. Simply resume using the inf-ptrace layer,
1823 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1824
8a99810d 1825 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
ca2163eb
PA
1826 return 1;
1827}
1828
3d799a95
DJ
1829/* Handle a GNU/Linux extended wait response. If we see a clone
1830 event, we need to add the new LWP to our list (and not report the
1831 trap to higher layers). This function returns non-zero if the
1832 event should be ignored and we should wait again. If STOPPING is
1833 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1834
1835static int
4dd63d48 1836linux_handle_extended_wait (struct lwp_info *lp, int status)
d6b0e80f 1837{
e38504b3 1838 int pid = lp->ptid.lwp ();
3d799a95 1839 struct target_waitstatus *ourstatus = &lp->waitstatus;
89a5711c 1840 int event = linux_ptrace_get_extended_event (status);
d6b0e80f 1841
bfd09d20
JS
1842 /* All extended events we currently use are mid-syscall. Only
1843 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
1844 you have to be using PTRACE_SEIZE to get that. */
1845 lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
1846
3d799a95
DJ
1847 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1848 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1849 {
3d799a95
DJ
1850 unsigned long new_pid;
1851 int ret;
1852
1853 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1854
3d799a95
DJ
1855 /* If we haven't already seen the new PID stop, wait for it now. */
1856 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1857 {
1858 /* The new child has a pending SIGSTOP. We can't affect it until it
1859 hits the SIGSTOP, but we're already attached. */
4a6ed09b 1860 ret = my_waitpid (new_pid, &status, __WALL);
3d799a95
DJ
1861 if (ret == -1)
1862 perror_with_name (_("waiting for new child"));
1863 else if (ret != new_pid)
f34652de 1864 internal_error (_("wait returned unexpected PID %d"), ret);
3d799a95 1865 else if (!WIFSTOPPED (status))
f34652de 1866 internal_error (_("wait returned unexpected status 0x%x"), status);
3d799a95
DJ
1867 }
1868
183be222 1869 ptid_t child_ptid (new_pid, new_pid);
3d799a95 1870
26cb8b7c
PA
1871 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1872 {
8a89ddbd
PA
1873 open_proc_mem_file (child_ptid);
1874
26cb8b7c
PA
1875 /* The arch-specific native code may need to know about new
1876 forks even if those end up never mapped to an
1877 inferior. */
135340af 1878 linux_target->low_new_fork (lp, new_pid);
26cb8b7c 1879 }
1310c1b0
PFC
1880 else if (event == PTRACE_EVENT_CLONE)
1881 {
1882 linux_target->low_new_clone (lp, new_pid);
1883 }
26cb8b7c 1884
2277426b 1885 if (event == PTRACE_EVENT_FORK
e99b03dc 1886 && linux_fork_checkpointing_p (lp->ptid.pid ()))
2277426b 1887 {
2277426b
PA
1888 /* Handle checkpointing by linux-fork.c here as a special
1889 case. We don't want the follow-fork-mode or 'catch fork'
1890 to interfere with this. */
1891
1892 /* This won't actually modify the breakpoint list, but will
1893 physically remove the breakpoints from the child. */
184ea2f7 1894 detach_breakpoints (ptid_t (new_pid, new_pid));
2277426b
PA
1895
1896 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
1897 if (!find_fork_pid (new_pid))
1898 add_fork (new_pid);
2277426b
PA
1899
1900 /* Report as spurious, so that infrun doesn't want to follow
1901 this fork. We're actually doing an infcall in
1902 linux-fork.c. */
183be222 1903 ourstatus->set_spurious ();
2277426b
PA
1904
1905 /* Report the stop to the core. */
1906 return 0;
1907 }
1908
3d799a95 1909 if (event == PTRACE_EVENT_FORK)
183be222 1910 ourstatus->set_forked (child_ptid);
3d799a95 1911 else if (event == PTRACE_EVENT_VFORK)
183be222 1912 ourstatus->set_vforked (child_ptid);
4dd63d48 1913 else if (event == PTRACE_EVENT_CLONE)
3d799a95 1914 {
78768c4a
JK
1915 struct lwp_info *new_lp;
1916
183be222 1917 ourstatus->set_ignore ();
78768c4a 1918
9327494e
SM
1919 linux_nat_debug_printf
1920 ("Got clone event from LWP %d, new child is LWP %ld", pid, new_pid);
3c4d7e12 1921
184ea2f7 1922 new_lp = add_lwp (ptid_t (lp->ptid.pid (), new_pid));
4c28f408 1923 new_lp->stopped = 1;
4dd63d48 1924 new_lp->resumed = 1;
d6b0e80f 1925
2db9a427
PA
1926 /* If the thread_db layer is active, let it record the user
1927 level thread id and status, and add the thread to GDB's
1928 list. */
1929 if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
3d799a95 1930 {
2db9a427
PA
1931 /* The process is not using thread_db. Add the LWP to
1932 GDB's list. */
5b6d1e4f 1933 add_thread (linux_target, new_lp->ptid);
2db9a427 1934 }
4c28f408 1935
2ee52aa4 1936 /* Even if we're stopping the thread for some reason
4dd63d48
PA
1937 internal to this module, from the perspective of infrun
1938 and the user/frontend, this new thread is running until
1939 it next reports a stop. */
719546c4
SM
1940 set_running (linux_target, new_lp->ptid, true);
1941 set_executing (linux_target, new_lp->ptid, true);
4c28f408 1942
4dd63d48 1943 if (WSTOPSIG (status) != SIGSTOP)
79395f92 1944 {
4dd63d48
PA
1945 /* This can happen if someone starts sending signals to
1946 the new thread before it gets a chance to run, which
1947 have a lower number than SIGSTOP (e.g. SIGUSR1).
1948 This is an unlikely case, and harder to handle for
1949 fork / vfork than for clone, so we do not try - but
1950 we handle it for clone events here. */
1951
1952 new_lp->signalled = 1;
1953
79395f92
PA
1954 /* We created NEW_LP so it cannot yet contain STATUS. */
1955 gdb_assert (new_lp->status == 0);
1956
1957 /* Save the wait status to report later. */
9327494e
SM
1958 linux_nat_debug_printf
1959 ("waitpid of new LWP %ld, saving status %s",
8d06918f 1960 (long) new_lp->ptid.lwp (), status_to_str (status).c_str ());
79395f92
PA
1961 new_lp->status = status;
1962 }
aa01bd36
PA
1963 else if (report_thread_events)
1964 {
183be222 1965 new_lp->waitstatus.set_thread_created ();
aa01bd36
PA
1966 new_lp->status = status;
1967 }
79395f92 1968
3d799a95
DJ
1969 return 1;
1970 }
1971
1972 return 0;
d6b0e80f
AC
1973 }
1974
3d799a95
DJ
1975 if (event == PTRACE_EVENT_EXEC)
1976 {
9327494e 1977 linux_nat_debug_printf ("Got exec event from LWP %ld", lp->ptid.lwp ());
a75724bc 1978
8a89ddbd
PA
1979 /* Close the previous /proc/PID/mem file for this inferior,
1980 which was using the address space which is now gone.
1981 Reading/writing from this file would return 0/EOF. */
1982 close_proc_mem_file (lp->ptid.pid ());
1983
1984 /* Open a new file for the new address space. */
1985 open_proc_mem_file (lp->ptid);
05c06f31 1986
183be222
SM
1987 ourstatus->set_execd
1988 (make_unique_xstrdup (linux_proc_pid_to_exec_file (pid)));
3d799a95 1989
8af756ef
PA
1990 /* The thread that execed must have been resumed, but, when a
1991 thread execs, it changes its tid to the tgid, and the old
1992 tgid thread might have not been resumed. */
1993 lp->resumed = 1;
6c95b8df
PA
1994 return 0;
1995 }
1996
1997 if (event == PTRACE_EVENT_VFORK_DONE)
1998 {
9327494e 1999 linux_nat_debug_printf
5a0c4a06
SM
2000 ("Got PTRACE_EVENT_VFORK_DONE from LWP %ld",
2001 lp->ptid.lwp ());
2002 ourstatus->set_vfork_done ();
2003 return 0;
3d799a95
DJ
2004 }
2005
f34652de 2006 internal_error (_("unknown ptrace event %d"), event);
d6b0e80f
AC
2007}
2008
9c3a5d93
PA
2009/* Suspend waiting for a signal. We're mostly interested in
2010 SIGCHLD/SIGINT. */
2011
2012static void
2013wait_for_signal ()
2014{
9327494e 2015 linux_nat_debug_printf ("about to sigsuspend");
9c3a5d93
PA
2016 sigsuspend (&suspend_mask);
2017
2018 /* If the quit flag is set, it means that the user pressed Ctrl-C
2019 and we're debugging a process that is running on a separate
2020 terminal, so we must forward the Ctrl-C to the inferior. (If the
2021 inferior is sharing GDB's terminal, then the Ctrl-C reaches the
2022 inferior directly.) We must do this here because functions that
2023 need to block waiting for a signal loop forever until there's an
2024 event to report before returning back to the event loop. */
2025 if (!target_terminal::is_ours ())
2026 {
2027 if (check_quit_flag ())
2028 target_pass_ctrlc ();
2029 }
2030}
2031
d6b0e80f
AC
2032/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2033 exited. */
2034
2035static int
2036wait_lwp (struct lwp_info *lp)
2037{
2038 pid_t pid;
432b4d03 2039 int status = 0;
d6b0e80f 2040 int thread_dead = 0;
432b4d03 2041 sigset_t prev_mask;
d6b0e80f
AC
2042
2043 gdb_assert (!lp->stopped);
2044 gdb_assert (lp->status == 0);
2045
432b4d03
JK
2046 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2047 block_child_signals (&prev_mask);
2048
2049 for (;;)
d6b0e80f 2050 {
e38504b3 2051 pid = my_waitpid (lp->ptid.lwp (), &status, __WALL | WNOHANG);
a9f4bb21
PA
2052 if (pid == -1 && errno == ECHILD)
2053 {
2054 /* The thread has previously exited. We need to delete it
4a6ed09b
PA
2055 now because if this was a non-leader thread execing, we
2056 won't get an exit event. See comments on exec events at
2057 the top of the file. */
a9f4bb21 2058 thread_dead = 1;
9327494e 2059 linux_nat_debug_printf ("%s vanished.",
e53c95d4 2060 lp->ptid.to_string ().c_str ());
a9f4bb21 2061 }
432b4d03
JK
2062 if (pid != 0)
2063 break;
2064
2065 /* Bugs 10970, 12702.
2066 Thread group leader may have exited in which case we'll lock up in
2067 waitpid if there are other threads, even if they are all zombies too.
2068 Basically, we're not supposed to use waitpid this way.
4a6ed09b
PA
2069 tkill(pid,0) cannot be used here as it gets ESRCH for both
2070 for zombie and running processes.
432b4d03
JK
2071
2072 As a workaround, check if we're waiting for the thread group leader and
2073 if it's a zombie, and avoid calling waitpid if it is.
2074
2075 This is racy, what if the tgl becomes a zombie right after we check?
2076 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2077 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03 2078
e38504b3
TT
2079 if (lp->ptid.pid () == lp->ptid.lwp ()
2080 && linux_proc_pid_is_zombie (lp->ptid.lwp ()))
d6b0e80f 2081 {
d6b0e80f 2082 thread_dead = 1;
9327494e 2083 linux_nat_debug_printf ("Thread group leader %s vanished.",
e53c95d4 2084 lp->ptid.to_string ().c_str ());
432b4d03 2085 break;
d6b0e80f 2086 }
432b4d03
JK
2087
2088 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2089 get invoked despite our caller had them intentionally blocked by
2090 block_child_signals. This is sensitive only to the loop of
2091 linux_nat_wait_1 and there if we get called my_waitpid gets called
2092 again before it gets to sigsuspend so we can safely let the handlers
2093 get executed here. */
9c3a5d93 2094 wait_for_signal ();
432b4d03
JK
2095 }
2096
2097 restore_child_signals_mask (&prev_mask);
2098
d6b0e80f
AC
2099 if (!thread_dead)
2100 {
e38504b3 2101 gdb_assert (pid == lp->ptid.lwp ());
d6b0e80f 2102
9327494e 2103 linux_nat_debug_printf ("waitpid %s received %s",
e53c95d4 2104 lp->ptid.to_string ().c_str (),
8d06918f 2105 status_to_str (status).c_str ());
d6b0e80f 2106
a9f4bb21
PA
2107 /* Check if the thread has exited. */
2108 if (WIFEXITED (status) || WIFSIGNALED (status))
2109 {
aa01bd36 2110 if (report_thread_events
e38504b3 2111 || lp->ptid.pid () == lp->ptid.lwp ())
69dde7dc 2112 {
9327494e 2113 linux_nat_debug_printf ("LWP %d exited.", lp->ptid.pid ());
69dde7dc 2114
aa01bd36 2115 /* If this is the leader exiting, it means the whole
69dde7dc
PA
2116 process is gone. Store the status to report to the
2117 core. Store it in lp->waitstatus, because lp->status
2118 would be ambiguous (W_EXITCODE(0,0) == 0). */
7509b829 2119 lp->waitstatus = host_status_to_waitstatus (status);
69dde7dc
PA
2120 return 0;
2121 }
2122
a9f4bb21 2123 thread_dead = 1;
9327494e 2124 linux_nat_debug_printf ("%s exited.",
e53c95d4 2125 lp->ptid.to_string ().c_str ());
a9f4bb21 2126 }
d6b0e80f
AC
2127 }
2128
2129 if (thread_dead)
2130 {
e26af52f 2131 exit_lwp (lp);
d6b0e80f
AC
2132 return 0;
2133 }
2134
2135 gdb_assert (WIFSTOPPED (status));
8817a6f2 2136 lp->stopped = 1;
d6b0e80f 2137
8784d563
PA
2138 if (lp->must_set_ptrace_flags)
2139 {
5b6d1e4f 2140 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
de0d863e 2141 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2142
e38504b3 2143 linux_enable_event_reporting (lp->ptid.lwp (), options);
8784d563
PA
2144 lp->must_set_ptrace_flags = 0;
2145 }
2146
ca2163eb
PA
2147 /* Handle GNU/Linux's syscall SIGTRAPs. */
2148 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2149 {
2150 /* No longer need the sysgood bit. The ptrace event ends up
2151 recorded in lp->waitstatus if we care for it. We can carry
2152 on handling the event like a regular SIGTRAP from here
2153 on. */
2154 status = W_STOPCODE (SIGTRAP);
2155 if (linux_handle_syscall_trap (lp, 1))
2156 return wait_lwp (lp);
2157 }
bfd09d20
JS
2158 else
2159 {
2160 /* Almost all other ptrace-stops are known to be outside of system
2161 calls, with further exceptions in linux_handle_extended_wait. */
2162 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2163 }
ca2163eb 2164
d6b0e80f 2165 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2166 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2167 && linux_is_extended_waitstatus (status))
d6b0e80f 2168 {
9327494e 2169 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
4dd63d48 2170 linux_handle_extended_wait (lp, status);
20ba1ce6 2171 return 0;
d6b0e80f
AC
2172 }
2173
2174 return status;
2175}
2176
2177/* Send a SIGSTOP to LP. */
2178
2179static int
d3a70e03 2180stop_callback (struct lwp_info *lp)
d6b0e80f
AC
2181{
2182 if (!lp->stopped && !lp->signalled)
2183 {
2184 int ret;
2185
9327494e 2186 linux_nat_debug_printf ("kill %s **<SIGSTOP>**",
e53c95d4 2187 lp->ptid.to_string ().c_str ());
9327494e 2188
d6b0e80f 2189 errno = 0;
e38504b3 2190 ret = kill_lwp (lp->ptid.lwp (), SIGSTOP);
9327494e 2191 linux_nat_debug_printf ("lwp kill %d %s", ret,
d6b0e80f 2192 errno ? safe_strerror (errno) : "ERRNO-OK");
d6b0e80f
AC
2193
2194 lp->signalled = 1;
2195 gdb_assert (lp->status == 0);
2196 }
2197
2198 return 0;
2199}
2200
7b50312a
PA
2201/* Request a stop on LWP. */
2202
2203void
2204linux_stop_lwp (struct lwp_info *lwp)
2205{
d3a70e03 2206 stop_callback (lwp);
7b50312a
PA
2207}
2208
2db9a427
PA
2209/* See linux-nat.h */
2210
2211void
2212linux_stop_and_wait_all_lwps (void)
2213{
2214 /* Stop all LWP's ... */
d3a70e03 2215 iterate_over_lwps (minus_one_ptid, stop_callback);
2db9a427
PA
2216
2217 /* ... and wait until all of them have reported back that
2218 they're no longer running. */
d3a70e03 2219 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
2db9a427
PA
2220}
2221
2222/* See linux-nat.h */
2223
2224void
2225linux_unstop_all_lwps (void)
2226{
2227 iterate_over_lwps (minus_one_ptid,
d3a70e03
TT
2228 [] (struct lwp_info *info)
2229 {
2230 return resume_stopped_resumed_lwps (info, minus_one_ptid);
2231 });
2db9a427
PA
2232}
2233
57380f4e 2234/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2235
2236static int
57380f4e
DJ
2237linux_nat_has_pending_sigint (int pid)
2238{
2239 sigset_t pending, blocked, ignored;
57380f4e
DJ
2240
2241 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2242
2243 if (sigismember (&pending, SIGINT)
2244 && !sigismember (&ignored, SIGINT))
2245 return 1;
2246
2247 return 0;
2248}
2249
2250/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2251
2252static int
d3a70e03 2253set_ignore_sigint (struct lwp_info *lp)
d6b0e80f 2254{
57380f4e
DJ
2255 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2256 flag to consume the next one. */
2257 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2258 && WSTOPSIG (lp->status) == SIGINT)
2259 lp->status = 0;
2260 else
2261 lp->ignore_sigint = 1;
2262
2263 return 0;
2264}
2265
2266/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2267 This function is called after we know the LWP has stopped; if the LWP
2268 stopped before the expected SIGINT was delivered, then it will never have
2269 arrived. Also, if the signal was delivered to a shared queue and consumed
2270 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2271
57380f4e
DJ
2272static void
2273maybe_clear_ignore_sigint (struct lwp_info *lp)
2274{
2275 if (!lp->ignore_sigint)
2276 return;
2277
e38504b3 2278 if (!linux_nat_has_pending_sigint (lp->ptid.lwp ()))
57380f4e 2279 {
9327494e 2280 linux_nat_debug_printf ("Clearing bogus flag for %s",
e53c95d4 2281 lp->ptid.to_string ().c_str ());
57380f4e
DJ
2282 lp->ignore_sigint = 0;
2283 }
2284}
2285
ebec9a0f
PA
2286/* Fetch the possible triggered data watchpoint info and store it in
2287 LP.
2288
2289 On some archs, like x86, that use debug registers to set
2290 watchpoints, it's possible that the way to know which watched
2291 address trapped, is to check the register that is used to select
2292 which address to watch. Problem is, between setting the watchpoint
2293 and reading back which data address trapped, the user may change
2294 the set of watchpoints, and, as a consequence, GDB changes the
2295 debug registers in the inferior. To avoid reading back a stale
2296 stopped-data-address when that happens, we cache in LP the fact
2297 that a watchpoint trapped, and the corresponding data address, as
2298 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2299 registers meanwhile, we have the cached data we can rely on. */
2300
9c02b525
PA
2301static int
2302check_stopped_by_watchpoint (struct lwp_info *lp)
ebec9a0f 2303{
2989a365 2304 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
ebec9a0f
PA
2305 inferior_ptid = lp->ptid;
2306
f6ac5f3d 2307 if (linux_target->low_stopped_by_watchpoint ())
ebec9a0f 2308 {
15c66dd6 2309 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
f6ac5f3d
PA
2310 lp->stopped_data_address_p
2311 = linux_target->low_stopped_data_address (&lp->stopped_data_address);
ebec9a0f
PA
2312 }
2313
15c66dd6 2314 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
9c02b525
PA
2315}
2316
9c02b525 2317/* Returns true if the LWP had stopped for a watchpoint. */
ebec9a0f 2318
57810aa7 2319bool
f6ac5f3d 2320linux_nat_target::stopped_by_watchpoint ()
ebec9a0f
PA
2321{
2322 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2323
2324 gdb_assert (lp != NULL);
2325
15c66dd6 2326 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
ebec9a0f
PA
2327}
2328
57810aa7 2329bool
f6ac5f3d 2330linux_nat_target::stopped_data_address (CORE_ADDR *addr_p)
ebec9a0f
PA
2331{
2332 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2333
2334 gdb_assert (lp != NULL);
2335
2336 *addr_p = lp->stopped_data_address;
2337
2338 return lp->stopped_data_address_p;
2339}
2340
26ab7092
JK
2341/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2342
135340af
PA
2343bool
2344linux_nat_target::low_status_is_event (int status)
26ab7092
JK
2345{
2346 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2347}
2348
57380f4e
DJ
2349/* Wait until LP is stopped. */
2350
2351static int
d3a70e03 2352stop_wait_callback (struct lwp_info *lp)
57380f4e 2353{
5b6d1e4f 2354 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
6c95b8df
PA
2355
2356 /* If this is a vfork parent, bail out, it is not going to report
2357 any SIGSTOP until the vfork is done with. */
2358 if (inf->vfork_child != NULL)
2359 return 0;
2360
d6b0e80f
AC
2361 if (!lp->stopped)
2362 {
2363 int status;
2364
2365 status = wait_lwp (lp);
2366 if (status == 0)
2367 return 0;
2368
57380f4e
DJ
2369 if (lp->ignore_sigint && WIFSTOPPED (status)
2370 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2371 {
57380f4e 2372 lp->ignore_sigint = 0;
d6b0e80f
AC
2373
2374 errno = 0;
e38504b3 2375 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
8817a6f2 2376 lp->stopped = 0;
9327494e
SM
2377 linux_nat_debug_printf
2378 ("PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)",
e53c95d4 2379 lp->ptid.to_string ().c_str (),
9327494e 2380 errno ? safe_strerror (errno) : "OK");
d6b0e80f 2381
d3a70e03 2382 return stop_wait_callback (lp);
d6b0e80f
AC
2383 }
2384
57380f4e
DJ
2385 maybe_clear_ignore_sigint (lp);
2386
d6b0e80f
AC
2387 if (WSTOPSIG (status) != SIGSTOP)
2388 {
e5ef252a 2389 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2390
9327494e 2391 linux_nat_debug_printf ("Pending event %s in %s",
8d06918f 2392 status_to_str ((int) status).c_str (),
e53c95d4 2393 lp->ptid.to_string ().c_str ());
e5ef252a
PA
2394
2395 /* Save the sigtrap event. */
2396 lp->status = status;
e5ef252a 2397 gdb_assert (lp->signalled);
e7ad2f14 2398 save_stop_reason (lp);
d6b0e80f
AC
2399 }
2400 else
2401 {
7010835a 2402 /* We caught the SIGSTOP that we intended to catch. */
e5ef252a 2403
9327494e 2404 linux_nat_debug_printf ("Expected SIGSTOP caught for %s.",
e53c95d4 2405 lp->ptid.to_string ().c_str ());
e5ef252a 2406
d6b0e80f 2407 lp->signalled = 0;
7010835a
AB
2408
2409 /* If we are waiting for this stop so we can report the thread
2410 stopped then we need to record this status. Otherwise, we can
2411 now discard this stop event. */
2412 if (lp->last_resume_kind == resume_stop)
2413 {
2414 lp->status = status;
2415 save_stop_reason (lp);
2416 }
d6b0e80f
AC
2417 }
2418 }
2419
2420 return 0;
2421}
2422
9c02b525
PA
2423/* Return non-zero if LP has a wait status pending. Discard the
2424 pending event and resume the LWP if the event that originally
2425 caused the stop became uninteresting. */
d6b0e80f
AC
2426
2427static int
d3a70e03 2428status_callback (struct lwp_info *lp)
d6b0e80f
AC
2429{
2430 /* Only report a pending wait status if we pretend that this has
2431 indeed been resumed. */
ca2163eb
PA
2432 if (!lp->resumed)
2433 return 0;
2434
eb54c8bf
PA
2435 if (!lwp_status_pending_p (lp))
2436 return 0;
2437
15c66dd6
PA
2438 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2439 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525 2440 {
5b6d1e4f 2441 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
9c02b525
PA
2442 CORE_ADDR pc;
2443 int discard = 0;
2444
9c02b525
PA
2445 pc = regcache_read_pc (regcache);
2446
2447 if (pc != lp->stop_pc)
2448 {
9327494e 2449 linux_nat_debug_printf ("PC of %s changed. was=%s, now=%s",
e53c95d4 2450 lp->ptid.to_string ().c_str (),
99d9c3b9
SM
2451 paddress (current_inferior ()->arch (),
2452 lp->stop_pc),
2453 paddress (current_inferior ()->arch (), pc));
9c02b525
PA
2454 discard = 1;
2455 }
faf09f01
PA
2456
2457#if !USE_SIGTRAP_SIGINFO
a01bda52 2458 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
9c02b525 2459 {
9327494e 2460 linux_nat_debug_printf ("previous breakpoint of %s, at %s gone",
e53c95d4 2461 lp->ptid.to_string ().c_str (),
99d9c3b9
SM
2462 paddress (current_inferior ()->arch (),
2463 lp->stop_pc));
9c02b525
PA
2464
2465 discard = 1;
2466 }
faf09f01 2467#endif
9c02b525
PA
2468
2469 if (discard)
2470 {
9327494e 2471 linux_nat_debug_printf ("pending event of %s cancelled.",
e53c95d4 2472 lp->ptid.to_string ().c_str ());
9c02b525
PA
2473
2474 lp->status = 0;
2475 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2476 return 0;
2477 }
9c02b525
PA
2478 }
2479
eb54c8bf 2480 return 1;
d6b0e80f
AC
2481}
2482
d6b0e80f
AC
2483/* Count the LWP's that have had events. */
2484
2485static int
d3a70e03 2486count_events_callback (struct lwp_info *lp, int *count)
d6b0e80f 2487{
d6b0e80f
AC
2488 gdb_assert (count != NULL);
2489
9c02b525
PA
2490 /* Select only resumed LWPs that have an event pending. */
2491 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2492 (*count)++;
2493
2494 return 0;
2495}
2496
2497/* Select the LWP (if any) that is currently being single-stepped. */
2498
2499static int
d3a70e03 2500select_singlestep_lwp_callback (struct lwp_info *lp)
d6b0e80f 2501{
25289eb2
PA
2502 if (lp->last_resume_kind == resume_step
2503 && lp->status != 0)
d6b0e80f
AC
2504 return 1;
2505 else
2506 return 0;
2507}
2508
8a99810d
PA
2509/* Returns true if LP has a status pending. */
2510
2511static int
2512lwp_status_pending_p (struct lwp_info *lp)
2513{
2514 /* We check for lp->waitstatus in addition to lp->status, because we
2515 can have pending process exits recorded in lp->status and
2516 W_EXITCODE(0,0) happens to be 0. */
183be222 2517 return lp->status != 0 || lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE;
8a99810d
PA
2518}
2519
b90fc188 2520/* Select the Nth LWP that has had an event. */
d6b0e80f
AC
2521
2522static int
d3a70e03 2523select_event_lwp_callback (struct lwp_info *lp, int *selector)
d6b0e80f 2524{
d6b0e80f
AC
2525 gdb_assert (selector != NULL);
2526
9c02b525
PA
2527 /* Select only resumed LWPs that have an event pending. */
2528 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2529 if ((*selector)-- == 0)
2530 return 1;
2531
2532 return 0;
2533}
2534
e7ad2f14
PA
2535/* Called when the LWP stopped for a signal/trap. If it stopped for a
2536 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2537 and save the result in the LWP's stop_reason field. If it stopped
2538 for a breakpoint, decrement the PC if necessary on the lwp's
2539 architecture. */
9c02b525 2540
e7ad2f14
PA
2541static void
2542save_stop_reason (struct lwp_info *lp)
710151dd 2543{
e7ad2f14
PA
2544 struct regcache *regcache;
2545 struct gdbarch *gdbarch;
515630c5 2546 CORE_ADDR pc;
9c02b525 2547 CORE_ADDR sw_bp_pc;
faf09f01
PA
2548#if USE_SIGTRAP_SIGINFO
2549 siginfo_t siginfo;
2550#endif
9c02b525 2551
e7ad2f14
PA
2552 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
2553 gdb_assert (lp->status != 0);
2554
135340af 2555 if (!linux_target->low_status_is_event (lp->status))
e7ad2f14
PA
2556 return;
2557
a9deee17
PA
2558 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
2559 if (inf->starting_up)
2560 return;
2561
5b6d1e4f 2562 regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 2563 gdbarch = regcache->arch ();
e7ad2f14 2564
9c02b525 2565 pc = regcache_read_pc (regcache);
527a273a 2566 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
515630c5 2567
faf09f01
PA
2568#if USE_SIGTRAP_SIGINFO
2569 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2570 {
2571 if (siginfo.si_signo == SIGTRAP)
2572 {
e7ad2f14
PA
2573 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
2574 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2575 {
e7ad2f14
PA
2576 /* The si_code is ambiguous on this arch -- check debug
2577 registers. */
2578 if (!check_stopped_by_watchpoint (lp))
2579 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2580 }
2581 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
2582 {
2583 /* If we determine the LWP stopped for a SW breakpoint,
2584 trust it. Particularly don't check watchpoint
7da6a5b9 2585 registers, because, at least on s390, we'd find
e7ad2f14
PA
2586 stopped-by-watchpoint as long as there's a watchpoint
2587 set. */
faf09f01 2588 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
faf09f01 2589 }
e7ad2f14 2590 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2591 {
e7ad2f14
PA
2592 /* This can indicate either a hardware breakpoint or
2593 hardware watchpoint. Check debug registers. */
2594 if (!check_stopped_by_watchpoint (lp))
2595 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
faf09f01 2596 }
2bf6fb9d
PA
2597 else if (siginfo.si_code == TRAP_TRACE)
2598 {
9327494e 2599 linux_nat_debug_printf ("%s stopped by trace",
e53c95d4 2600 lp->ptid.to_string ().c_str ());
e7ad2f14
PA
2601
2602 /* We may have single stepped an instruction that
2603 triggered a watchpoint. In that case, on some
2604 architectures (such as x86), instead of TRAP_HWBKPT,
2605 si_code indicates TRAP_TRACE, and we need to check
2606 the debug registers separately. */
2607 check_stopped_by_watchpoint (lp);
2bf6fb9d 2608 }
faf09f01
PA
2609 }
2610 }
2611#else
9c02b525 2612 if ((!lp->step || lp->stop_pc == sw_bp_pc)
a01bda52 2613 && software_breakpoint_inserted_here_p (regcache->aspace (),
9c02b525 2614 sw_bp_pc))
710151dd 2615 {
9c02b525
PA
2616 /* The LWP was either continued, or stepped a software
2617 breakpoint instruction. */
e7ad2f14
PA
2618 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2619 }
2620
a01bda52 2621 if (hardware_breakpoint_inserted_here_p (regcache->aspace (), pc))
e7ad2f14
PA
2622 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2623
2624 if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
2625 check_stopped_by_watchpoint (lp);
2626#endif
2627
2628 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2629 {
9327494e 2630 linux_nat_debug_printf ("%s stopped by software breakpoint",
e53c95d4 2631 lp->ptid.to_string ().c_str ());
710151dd
PA
2632
2633 /* Back up the PC if necessary. */
9c02b525
PA
2634 if (pc != sw_bp_pc)
2635 regcache_write_pc (regcache, sw_bp_pc);
515630c5 2636
e7ad2f14
PA
2637 /* Update this so we record the correct stop PC below. */
2638 pc = sw_bp_pc;
710151dd 2639 }
e7ad2f14 2640 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525 2641 {
9327494e 2642 linux_nat_debug_printf ("%s stopped by hardware breakpoint",
e53c95d4 2643 lp->ptid.to_string ().c_str ());
e7ad2f14
PA
2644 }
2645 else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2646 {
9327494e 2647 linux_nat_debug_printf ("%s stopped by hardware watchpoint",
e53c95d4 2648 lp->ptid.to_string ().c_str ());
9c02b525 2649 }
d6b0e80f 2650
e7ad2f14 2651 lp->stop_pc = pc;
d6b0e80f
AC
2652}
2653
faf09f01
PA
2654
2655/* Returns true if the LWP had stopped for a software breakpoint. */
2656
57810aa7 2657bool
f6ac5f3d 2658linux_nat_target::stopped_by_sw_breakpoint ()
faf09f01
PA
2659{
2660 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2661
2662 gdb_assert (lp != NULL);
2663
2664 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2665}
2666
2667/* Implement the supports_stopped_by_sw_breakpoint method. */
2668
57810aa7 2669bool
f6ac5f3d 2670linux_nat_target::supports_stopped_by_sw_breakpoint ()
faf09f01
PA
2671{
2672 return USE_SIGTRAP_SIGINFO;
2673}
2674
2675/* Returns true if the LWP had stopped for a hardware
2676 breakpoint/watchpoint. */
2677
57810aa7 2678bool
f6ac5f3d 2679linux_nat_target::stopped_by_hw_breakpoint ()
faf09f01
PA
2680{
2681 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2682
2683 gdb_assert (lp != NULL);
2684
2685 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2686}
2687
2688/* Implement the supports_stopped_by_hw_breakpoint method. */
2689
57810aa7 2690bool
f6ac5f3d 2691linux_nat_target::supports_stopped_by_hw_breakpoint ()
faf09f01
PA
2692{
2693 return USE_SIGTRAP_SIGINFO;
2694}
2695
d6b0e80f
AC
2696/* Select one LWP out of those that have events pending. */
2697
2698static void
d90e17a7 2699select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2700{
2701 int num_events = 0;
2702 int random_selector;
9c02b525 2703 struct lwp_info *event_lp = NULL;
d6b0e80f 2704
ac264b3b 2705 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2706 (*orig_lp)->status = *status;
2707
9c02b525
PA
2708 /* In all-stop, give preference to the LWP that is being
2709 single-stepped. There will be at most one, and it will be the
2710 LWP that the core is most interested in. If we didn't do this,
2711 then we'd have to handle pending step SIGTRAPs somehow in case
2712 the core later continues the previously-stepped thread, as
2713 otherwise we'd report the pending SIGTRAP then, and the core, not
2714 having stepped the thread, wouldn't understand what the trap was
2715 for, and therefore would report it to the user as a random
2716 signal. */
fbea99ea 2717 if (!target_is_non_stop_p ())
d6b0e80f 2718 {
d3a70e03 2719 event_lp = iterate_over_lwps (filter, select_singlestep_lwp_callback);
9c02b525
PA
2720 if (event_lp != NULL)
2721 {
9327494e 2722 linux_nat_debug_printf ("Select single-step %s",
e53c95d4 2723 event_lp->ptid.to_string ().c_str ());
9c02b525 2724 }
d6b0e80f 2725 }
9c02b525
PA
2726
2727 if (event_lp == NULL)
d6b0e80f 2728 {
9c02b525 2729 /* Pick one at random, out of those which have had events. */
d6b0e80f 2730
9c02b525 2731 /* First see how many events we have. */
d3a70e03
TT
2732 iterate_over_lwps (filter,
2733 [&] (struct lwp_info *info)
2734 {
2735 return count_events_callback (info, &num_events);
2736 });
8bf3b159 2737 gdb_assert (num_events > 0);
d6b0e80f 2738
9c02b525
PA
2739 /* Now randomly pick a LWP out of those that have had
2740 events. */
d6b0e80f
AC
2741 random_selector = (int)
2742 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2743
9327494e
SM
2744 if (num_events > 1)
2745 linux_nat_debug_printf ("Found %d events, selecting #%d",
2746 num_events, random_selector);
d6b0e80f 2747
d3a70e03
TT
2748 event_lp
2749 = (iterate_over_lwps
2750 (filter,
2751 [&] (struct lwp_info *info)
2752 {
2753 return select_event_lwp_callback (info,
2754 &random_selector);
2755 }));
d6b0e80f
AC
2756 }
2757
2758 if (event_lp != NULL)
2759 {
2760 /* Switch the event LWP. */
2761 *orig_lp = event_lp;
2762 *status = event_lp->status;
2763 }
2764
2765 /* Flush the wait status for the event LWP. */
2766 (*orig_lp)->status = 0;
2767}
2768
2769/* Return non-zero if LP has been resumed. */
2770
2771static int
d3a70e03 2772resumed_callback (struct lwp_info *lp)
d6b0e80f
AC
2773{
2774 return lp->resumed;
2775}
2776
02f3fc28 2777/* Check if we should go on and pass this event to common code.
12d9289a 2778
897608ed
SM
2779 If so, save the status to the lwp_info structure associated to LWPID. */
2780
2781static void
9c02b525 2782linux_nat_filter_event (int lwpid, int status)
02f3fc28
PA
2783{
2784 struct lwp_info *lp;
89a5711c 2785 int event = linux_ptrace_get_extended_event (status);
02f3fc28 2786
f2907e49 2787 lp = find_lwp_pid (ptid_t (lwpid));
02f3fc28 2788
1abeb1e9
PA
2789 /* Check for events reported by anything not in our LWP list. */
2790 if (lp == nullptr)
0e5bf2a8 2791 {
1abeb1e9
PA
2792 if (WIFSTOPPED (status))
2793 {
2794 if (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC)
2795 {
2796 /* A non-leader thread exec'ed after we've seen the
2797 leader zombie, and removed it from our lists (in
2798 check_zombie_leaders). The non-leader thread changes
2799 its tid to the tgid. */
2800 linux_nat_debug_printf
2801 ("Re-adding thread group leader LWP %d after exec.",
2802 lwpid);
0e5bf2a8 2803
1abeb1e9
PA
2804 lp = add_lwp (ptid_t (lwpid, lwpid));
2805 lp->stopped = 1;
2806 lp->resumed = 1;
2807 add_thread (linux_target, lp->ptid);
2808 }
2809 else
2810 {
2811 /* A process we are controlling has forked and the new
2812 child's stop was reported to us by the kernel. Save
2813 its PID and go back to waiting for the fork event to
2814 be reported - the stopped process might be returned
2815 from waitpid before or after the fork event is. */
2816 linux_nat_debug_printf
2817 ("Saving LWP %d status %s in stopped_pids list",
2818 lwpid, status_to_str (status).c_str ());
2819 add_to_pid_list (&stopped_pids, lwpid, status);
2820 }
2821 }
2822 else
2823 {
2824 /* Don't report an event for the exit of an LWP not in our
2825 list, i.e. not part of any inferior we're debugging.
2826 This can happen if we detach from a program we originally
6cf20c46
PA
2827 forked and then it exits. However, note that we may have
2828 earlier deleted a leader of an inferior we're debugging,
2829 in check_zombie_leaders. Re-add it back here if so. */
2830 for (inferior *inf : all_inferiors (linux_target))
2831 {
2832 if (inf->pid == lwpid)
2833 {
2834 linux_nat_debug_printf
2835 ("Re-adding thread group leader LWP %d after exit.",
2836 lwpid);
2837
2838 lp = add_lwp (ptid_t (lwpid, lwpid));
2839 lp->resumed = 1;
2840 add_thread (linux_target, lp->ptid);
2841 break;
2842 }
2843 }
1abeb1e9 2844 }
0e5bf2a8 2845
1abeb1e9
PA
2846 if (lp == nullptr)
2847 return;
02f3fc28
PA
2848 }
2849
8817a6f2
PA
2850 /* This LWP is stopped now. (And if dead, this prevents it from
2851 ever being continued.) */
2852 lp->stopped = 1;
2853
8784d563
PA
2854 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2855 {
5b6d1e4f 2856 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
de0d863e 2857 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2858
e38504b3 2859 linux_enable_event_reporting (lp->ptid.lwp (), options);
8784d563
PA
2860 lp->must_set_ptrace_flags = 0;
2861 }
2862
ca2163eb
PA
2863 /* Handle GNU/Linux's syscall SIGTRAPs. */
2864 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2865 {
2866 /* No longer need the sysgood bit. The ptrace event ends up
2867 recorded in lp->waitstatus if we care for it. We can carry
2868 on handling the event like a regular SIGTRAP from here
2869 on. */
2870 status = W_STOPCODE (SIGTRAP);
2871 if (linux_handle_syscall_trap (lp, 0))
897608ed 2872 return;
ca2163eb 2873 }
bfd09d20
JS
2874 else
2875 {
2876 /* Almost all other ptrace-stops are known to be outside of system
2877 calls, with further exceptions in linux_handle_extended_wait. */
2878 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2879 }
02f3fc28 2880
ca2163eb 2881 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2882 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2883 && linux_is_extended_waitstatus (status))
02f3fc28 2884 {
9327494e
SM
2885 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
2886
4dd63d48 2887 if (linux_handle_extended_wait (lp, status))
897608ed 2888 return;
02f3fc28
PA
2889 }
2890
2891 /* Check if the thread has exited. */
9c02b525
PA
2892 if (WIFEXITED (status) || WIFSIGNALED (status))
2893 {
6cf20c46 2894 if (!report_thread_events && !is_leader (lp))
02f3fc28 2895 {
9327494e 2896 linux_nat_debug_printf ("%s exited.",
e53c95d4 2897 lp->ptid.to_string ().c_str ());
9c02b525 2898
6cf20c46 2899 /* If this was not the leader exiting, then the exit signal
4a6ed09b
PA
2900 was not the end of the debugged application and should be
2901 ignored. */
2902 exit_lwp (lp);
897608ed 2903 return;
02f3fc28
PA
2904 }
2905
77598427
PA
2906 /* Note that even if the leader was ptrace-stopped, it can still
2907 exit, if e.g., some other thread brings down the whole
2908 process (calls `exit'). So don't assert that the lwp is
2909 resumed. */
9327494e
SM
2910 linux_nat_debug_printf ("LWP %ld exited (resumed=%d)",
2911 lp->ptid.lwp (), lp->resumed);
02f3fc28 2912
9c02b525
PA
2913 /* Dead LWP's aren't expected to reported a pending sigstop. */
2914 lp->signalled = 0;
2915
2916 /* Store the pending event in the waitstatus, because
2917 W_EXITCODE(0,0) == 0. */
7509b829 2918 lp->waitstatus = host_status_to_waitstatus (status);
897608ed 2919 return;
02f3fc28
PA
2920 }
2921
02f3fc28
PA
2922 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2923 an attempt to stop an LWP. */
2924 if (lp->signalled
2925 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2926 {
02f3fc28
PA
2927 lp->signalled = 0;
2928
2bf6fb9d 2929 if (lp->last_resume_kind == resume_stop)
25289eb2 2930 {
9327494e 2931 linux_nat_debug_printf ("resume_stop SIGSTOP caught for %s.",
e53c95d4 2932 lp->ptid.to_string ().c_str ());
2bf6fb9d
PA
2933 }
2934 else
2935 {
2936 /* This is a delayed SIGSTOP. Filter out the event. */
02f3fc28 2937
9327494e
SM
2938 linux_nat_debug_printf
2939 ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2940 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 2941 lp->ptid.to_string ().c_str ());
02f3fc28 2942
2bf6fb9d 2943 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
25289eb2 2944 gdb_assert (lp->resumed);
897608ed 2945 return;
25289eb2 2946 }
02f3fc28
PA
2947 }
2948
57380f4e
DJ
2949 /* Make sure we don't report a SIGINT that we have already displayed
2950 for another thread. */
2951 if (lp->ignore_sigint
2952 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
2953 {
9327494e 2954 linux_nat_debug_printf ("Delayed SIGINT caught for %s.",
e53c95d4 2955 lp->ptid.to_string ().c_str ());
57380f4e
DJ
2956
2957 /* This is a delayed SIGINT. */
2958 lp->ignore_sigint = 0;
2959
8a99810d 2960 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
9327494e
SM
2961 linux_nat_debug_printf ("%s %s, 0, 0 (discard SIGINT)",
2962 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 2963 lp->ptid.to_string ().c_str ());
57380f4e
DJ
2964 gdb_assert (lp->resumed);
2965
2966 /* Discard the event. */
897608ed 2967 return;
57380f4e
DJ
2968 }
2969
9c02b525
PA
2970 /* Don't report signals that GDB isn't interested in, such as
2971 signals that are neither printed nor stopped upon. Stopping all
7da6a5b9 2972 threads can be a bit time-consuming, so if we want decent
9c02b525
PA
2973 performance with heavily multi-threaded programs, especially when
2974 they're using a high frequency timer, we'd better avoid it if we
2975 can. */
2976 if (WIFSTOPPED (status))
2977 {
2978 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
2979
fbea99ea 2980 if (!target_is_non_stop_p ())
9c02b525
PA
2981 {
2982 /* Only do the below in all-stop, as we currently use SIGSTOP
2983 to implement target_stop (see linux_nat_stop) in
2984 non-stop. */
2985 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
2986 {
2987 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2988 forwarded to the entire process group, that is, all LWPs
2989 will receive it - unless they're using CLONE_THREAD to
2990 share signals. Since we only want to report it once, we
2991 mark it as ignored for all LWPs except this one. */
d3a70e03 2992 iterate_over_lwps (ptid_t (lp->ptid.pid ()), set_ignore_sigint);
9c02b525
PA
2993 lp->ignore_sigint = 0;
2994 }
2995 else
2996 maybe_clear_ignore_sigint (lp);
2997 }
2998
2999 /* When using hardware single-step, we need to report every signal.
c9587f88 3000 Otherwise, signals in pass_mask may be short-circuited
d8c06f22
AB
3001 except signals that might be caused by a breakpoint, or SIGSTOP
3002 if we sent the SIGSTOP and are waiting for it to arrive. */
9c02b525 3003 if (!lp->step
c9587f88 3004 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
d8c06f22 3005 && (WSTOPSIG (status) != SIGSTOP
9213a6d7 3006 || !linux_target->find_thread (lp->ptid)->stop_requested)
c9587f88 3007 && !linux_wstatus_maybe_breakpoint (status))
9c02b525
PA
3008 {
3009 linux_resume_one_lwp (lp, lp->step, signo);
9327494e
SM
3010 linux_nat_debug_printf
3011 ("%s %s, %s (preempt 'handle')",
3012 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 3013 lp->ptid.to_string ().c_str (),
9327494e
SM
3014 (signo != GDB_SIGNAL_0
3015 ? strsignal (gdb_signal_to_host (signo)) : "0"));
897608ed 3016 return;
9c02b525
PA
3017 }
3018 }
3019
02f3fc28
PA
3020 /* An interesting event. */
3021 gdb_assert (lp);
ca2163eb 3022 lp->status = status;
e7ad2f14 3023 save_stop_reason (lp);
02f3fc28
PA
3024}
3025
0e5bf2a8
PA
3026/* Detect zombie thread group leaders, and "exit" them. We can't reap
3027 their exits until all other threads in the group have exited. */
3028
3029static void
3030check_zombie_leaders (void)
3031{
08036331 3032 for (inferior *inf : all_inferiors ())
0e5bf2a8
PA
3033 {
3034 struct lwp_info *leader_lp;
3035
3036 if (inf->pid == 0)
3037 continue;
3038
f2907e49 3039 leader_lp = find_lwp_pid (ptid_t (inf->pid));
0e5bf2a8
PA
3040 if (leader_lp != NULL
3041 /* Check if there are other threads in the group, as we may
6cf20c46
PA
3042 have raced with the inferior simply exiting. Note this
3043 isn't a watertight check. If the inferior is
3044 multi-threaded and is exiting, it may be we see the
3045 leader as zombie before we reap all the non-leader
3046 threads. See comments below. */
0e5bf2a8 3047 && num_lwps (inf->pid) > 1
5f572dec 3048 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8 3049 {
6cf20c46
PA
3050 /* A zombie leader in a multi-threaded program can mean one
3051 of three things:
3052
3053 #1 - Only the leader exited, not the whole program, e.g.,
3054 with pthread_exit. Since we can't reap the leader's exit
3055 status until all other threads are gone and reaped too,
3056 we want to delete the zombie leader right away, as it
3057 can't be debugged, we can't read its registers, etc.
3058 This is the main reason we check for zombie leaders
3059 disappearing.
3060
3061 #2 - The whole thread-group/process exited (a group exit,
3062 via e.g. exit(3), and there is (or will be shortly) an
3063 exit reported for each thread in the process, and then
3064 finally an exit for the leader once the non-leaders are
3065 reaped.
3066
3067 #3 - There are 3 or more threads in the group, and a
3068 thread other than the leader exec'd. See comments on
3069 exec events at the top of the file.
3070
3071 Ideally we would never delete the leader for case #2.
3072 Instead, we want to collect the exit status of each
3073 non-leader thread, and then finally collect the exit
3074 status of the leader as normal and use its exit code as
3075 whole-process exit code. Unfortunately, there's no
3076 race-free way to distinguish cases #1 and #2. We can't
3077 assume the exit events for the non-leaders threads are
3078 already pending in the kernel, nor can we assume the
3079 non-leader threads are in zombie state already. Between
3080 the leader becoming zombie and the non-leaders exiting
3081 and becoming zombie themselves, there's a small time
3082 window, so such a check would be racy. Temporarily
3083 pausing all threads and checking to see if all threads
3084 exit or not before re-resuming them would work in the
3085 case that all threads are running right now, but it
3086 wouldn't work if some thread is currently already
3087 ptrace-stopped, e.g., due to scheduler-locking.
3088
3089 So what we do is we delete the leader anyhow, and then
3090 later on when we see its exit status, we re-add it back.
3091 We also make sure that we only report a whole-process
3092 exit when we see the leader exiting, as opposed to when
3093 the last LWP in the LWP list exits, which can be a
3094 non-leader if we deleted the leader here. */
9327494e 3095 linux_nat_debug_printf ("Thread group leader %d zombie "
6cf20c46
PA
3096 "(it exited, or another thread execd), "
3097 "deleting it.",
9327494e 3098 inf->pid);
0e5bf2a8
PA
3099 exit_lwp (leader_lp);
3100 }
3101 }
3102}
3103
aa01bd36
PA
3104/* Convenience function that is called when the kernel reports an exit
3105 event. This decides whether to report the event to GDB as a
3106 process exit event, a thread exit event, or to suppress the
3107 event. */
3108
3109static ptid_t
3110filter_exit_event (struct lwp_info *event_child,
3111 struct target_waitstatus *ourstatus)
3112{
3113 ptid_t ptid = event_child->ptid;
3114
6cf20c46 3115 if (!is_leader (event_child))
aa01bd36
PA
3116 {
3117 if (report_thread_events)
183be222 3118 ourstatus->set_thread_exited (0);
aa01bd36 3119 else
183be222 3120 ourstatus->set_ignore ();
aa01bd36
PA
3121
3122 exit_lwp (event_child);
3123 }
3124
3125 return ptid;
3126}
3127
d6b0e80f 3128static ptid_t
f6ac5f3d 3129linux_nat_wait_1 (ptid_t ptid, struct target_waitstatus *ourstatus,
b60cea74 3130 target_wait_flags target_options)
d6b0e80f 3131{
b26b06dd
AB
3132 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3133
fc9b8e47 3134 sigset_t prev_mask;
4b60df3d 3135 enum resume_kind last_resume_kind;
12d9289a 3136 struct lwp_info *lp;
12d9289a 3137 int status;
d6b0e80f 3138
f973ed9c
DJ
3139 /* The first time we get here after starting a new inferior, we may
3140 not have added it to the LWP list yet - this is the earliest
3141 moment at which we know its PID. */
677c92fe 3142 if (ptid.is_pid () && find_lwp_pid (ptid) == nullptr)
f973ed9c 3143 {
677c92fe 3144 ptid_t lwp_ptid (ptid.pid (), ptid.pid ());
27c9d204 3145
677c92fe
SM
3146 /* Upgrade the main thread's ptid. */
3147 thread_change_ptid (linux_target, ptid, lwp_ptid);
3148 lp = add_initial_lwp (lwp_ptid);
f973ed9c
DJ
3149 lp->resumed = 1;
3150 }
3151
12696c10 3152 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
7feb7d06 3153 block_child_signals (&prev_mask);
d6b0e80f 3154
d6b0e80f 3155 /* First check if there is a LWP with a wait status pending. */
d3a70e03 3156 lp = iterate_over_lwps (ptid, status_callback);
8a99810d 3157 if (lp != NULL)
d6b0e80f 3158 {
9327494e 3159 linux_nat_debug_printf ("Using pending wait status %s for %s.",
57573e54 3160 pending_status_str (lp).c_str (),
e53c95d4 3161 lp->ptid.to_string ().c_str ());
d6b0e80f
AC
3162 }
3163
9c02b525
PA
3164 /* But if we don't find a pending event, we'll have to wait. Always
3165 pull all events out of the kernel. We'll randomly select an
3166 event LWP out of all that have events, to prevent starvation. */
7feb7d06 3167
d90e17a7 3168 while (lp == NULL)
d6b0e80f
AC
3169 {
3170 pid_t lwpid;
3171
0e5bf2a8
PA
3172 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3173 quirks:
3174
3175 - If the thread group leader exits while other threads in the
3176 thread group still exist, waitpid(TGID, ...) hangs. That
3177 waitpid won't return an exit status until the other threads
85102364 3178 in the group are reaped.
0e5bf2a8
PA
3179
3180 - When a non-leader thread execs, that thread just vanishes
3181 without reporting an exit (so we'd hang if we waited for it
3182 explicitly in that case). The exec event is reported to
3183 the TGID pid. */
3184
3185 errno = 0;
4a6ed09b 3186 lwpid = my_waitpid (-1, &status, __WALL | WNOHANG);
0e5bf2a8 3187
9327494e
SM
3188 linux_nat_debug_printf ("waitpid(-1, ...) returned %d, %s",
3189 lwpid,
3190 errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3191
d6b0e80f
AC
3192 if (lwpid > 0)
3193 {
9327494e 3194 linux_nat_debug_printf ("waitpid %ld received %s",
8d06918f
SM
3195 (long) lwpid,
3196 status_to_str (status).c_str ());
d6b0e80f 3197
9c02b525 3198 linux_nat_filter_event (lwpid, status);
0e5bf2a8
PA
3199 /* Retry until nothing comes out of waitpid. A single
3200 SIGCHLD can indicate more than one child stopped. */
3201 continue;
d6b0e80f
AC
3202 }
3203
20ba1ce6
PA
3204 /* Now that we've pulled all events out of the kernel, resume
3205 LWPs that don't have an interesting event to report. */
3206 iterate_over_lwps (minus_one_ptid,
d3a70e03
TT
3207 [] (struct lwp_info *info)
3208 {
3209 return resume_stopped_resumed_lwps (info, minus_one_ptid);
3210 });
20ba1ce6
PA
3211
3212 /* ... and find an LWP with a status to report to the core, if
3213 any. */
d3a70e03 3214 lp = iterate_over_lwps (ptid, status_callback);
9c02b525
PA
3215 if (lp != NULL)
3216 break;
3217
0e5bf2a8
PA
3218 /* Check for zombie thread group leaders. Those can't be reaped
3219 until all other threads in the thread group are. */
3220 check_zombie_leaders ();
d6b0e80f 3221
0e5bf2a8
PA
3222 /* If there are no resumed children left, bail. We'd be stuck
3223 forever in the sigsuspend call below otherwise. */
d3a70e03 3224 if (iterate_over_lwps (ptid, resumed_callback) == NULL)
0e5bf2a8 3225 {
9327494e 3226 linux_nat_debug_printf ("exit (no resumed LWP)");
b84876c2 3227
183be222 3228 ourstatus->set_no_resumed ();
b84876c2 3229
0e5bf2a8
PA
3230 restore_child_signals_mask (&prev_mask);
3231 return minus_one_ptid;
d6b0e80f 3232 }
28736962 3233
0e5bf2a8
PA
3234 /* No interesting event to report to the core. */
3235
3236 if (target_options & TARGET_WNOHANG)
3237 {
b26b06dd 3238 linux_nat_debug_printf ("no interesting events found");
28736962 3239
183be222 3240 ourstatus->set_ignore ();
28736962
PA
3241 restore_child_signals_mask (&prev_mask);
3242 return minus_one_ptid;
3243 }
d6b0e80f
AC
3244
3245 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3246 gdb_assert (lp == NULL);
0e5bf2a8
PA
3247
3248 /* Block until we get an event reported with SIGCHLD. */
9c3a5d93 3249 wait_for_signal ();
d6b0e80f
AC
3250 }
3251
d6b0e80f
AC
3252 gdb_assert (lp);
3253
ca2163eb
PA
3254 status = lp->status;
3255 lp->status = 0;
3256
fbea99ea 3257 if (!target_is_non_stop_p ())
4c28f408
PA
3258 {
3259 /* Now stop all other LWP's ... */
d3a70e03 3260 iterate_over_lwps (minus_one_ptid, stop_callback);
4c28f408
PA
3261
3262 /* ... and wait until all of them have reported back that
3263 they're no longer running. */
d3a70e03 3264 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
9c02b525
PA
3265 }
3266
3267 /* If we're not waiting for a specific LWP, choose an event LWP from
3268 among those that have had events. Giving equal priority to all
3269 LWPs that have had events helps prevent starvation. */
d7e15655 3270 if (ptid == minus_one_ptid || ptid.is_pid ())
9c02b525
PA
3271 select_event_lwp (ptid, &lp, &status);
3272
3273 gdb_assert (lp != NULL);
3274
3275 /* Now that we've selected our final event LWP, un-adjust its PC if
faf09f01
PA
3276 it was a software breakpoint, and we can't reliably support the
3277 "stopped by software breakpoint" stop reason. */
3278 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3279 && !USE_SIGTRAP_SIGINFO)
9c02b525 3280 {
5b6d1e4f 3281 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 3282 struct gdbarch *gdbarch = regcache->arch ();
527a273a 3283 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4c28f408 3284
9c02b525
PA
3285 if (decr_pc != 0)
3286 {
3287 CORE_ADDR pc;
d6b0e80f 3288
9c02b525
PA
3289 pc = regcache_read_pc (regcache);
3290 regcache_write_pc (regcache, pc + decr_pc);
3291 }
3292 }
e3e9f5a2 3293
9c02b525
PA
3294 /* We'll need this to determine whether to report a SIGSTOP as
3295 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3296 clears it. */
3297 last_resume_kind = lp->last_resume_kind;
4b60df3d 3298
fbea99ea 3299 if (!target_is_non_stop_p ())
9c02b525 3300 {
e3e9f5a2
PA
3301 /* In all-stop, from the core's perspective, all LWPs are now
3302 stopped until a new resume action is sent over. */
d3a70e03 3303 iterate_over_lwps (minus_one_ptid, resume_clear_callback);
e3e9f5a2
PA
3304 }
3305 else
25289eb2 3306 {
d3a70e03 3307 resume_clear_callback (lp);
25289eb2 3308 }
d6b0e80f 3309
135340af 3310 if (linux_target->low_status_is_event (status))
d6b0e80f 3311 {
9327494e 3312 linux_nat_debug_printf ("trap ptid is %s.",
e53c95d4 3313 lp->ptid.to_string ().c_str ());
d6b0e80f 3314 }
d6b0e80f 3315
183be222 3316 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
d6b0e80f
AC
3317 {
3318 *ourstatus = lp->waitstatus;
183be222 3319 lp->waitstatus.set_ignore ();
d6b0e80f
AC
3320 }
3321 else
7509b829 3322 *ourstatus = host_status_to_waitstatus (status);
d6b0e80f 3323
b26b06dd 3324 linux_nat_debug_printf ("event found");
b84876c2 3325
7feb7d06 3326 restore_child_signals_mask (&prev_mask);
1e225492 3327
4b60df3d 3328 if (last_resume_kind == resume_stop
183be222 3329 && ourstatus->kind () == TARGET_WAITKIND_STOPPED
25289eb2
PA
3330 && WSTOPSIG (status) == SIGSTOP)
3331 {
3332 /* A thread that has been requested to stop by GDB with
3333 target_stop, and it stopped cleanly, so report as SIG0. The
3334 use of SIGSTOP is an implementation detail. */
183be222 3335 ourstatus->set_stopped (GDB_SIGNAL_0);
25289eb2
PA
3336 }
3337
183be222
SM
3338 if (ourstatus->kind () == TARGET_WAITKIND_EXITED
3339 || ourstatus->kind () == TARGET_WAITKIND_SIGNALLED)
1e225492
JK
3340 lp->core = -1;
3341 else
2e794194 3342 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3343
183be222 3344 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
aa01bd36
PA
3345 return filter_exit_event (lp, ourstatus);
3346
f973ed9c 3347 return lp->ptid;
d6b0e80f
AC
3348}
3349
e3e9f5a2
PA
3350/* Resume LWPs that are currently stopped without any pending status
3351 to report, but are resumed from the core's perspective. */
3352
3353static int
d3a70e03 3354resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid)
e3e9f5a2 3355{
14ec4172
AB
3356 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
3357
8a9da63e 3358 if (!lp->stopped)
4dd63d48 3359 {
9327494e 3360 linux_nat_debug_printf ("NOT resuming LWP %s, not stopped",
e53c95d4 3361 lp->ptid.to_string ().c_str ());
4dd63d48
PA
3362 }
3363 else if (!lp->resumed)
3364 {
9327494e 3365 linux_nat_debug_printf ("NOT resuming LWP %s, not resumed",
e53c95d4 3366 lp->ptid.to_string ().c_str ());
4dd63d48
PA
3367 }
3368 else if (lwp_status_pending_p (lp))
3369 {
9327494e 3370 linux_nat_debug_printf ("NOT resuming LWP %s, has pending status",
e53c95d4 3371 lp->ptid.to_string ().c_str ());
4dd63d48 3372 }
8a9da63e
AB
3373 else if (inf->vfork_child != nullptr)
3374 {
3375 linux_nat_debug_printf ("NOT resuming LWP %s (vfork parent)",
3376 lp->ptid.to_string ().c_str ());
3377 }
4dd63d48 3378 else
e3e9f5a2 3379 {
5b6d1e4f 3380 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 3381 struct gdbarch *gdbarch = regcache->arch ();
336060f3 3382
a70b8144 3383 try
e3e9f5a2 3384 {
23f238d3
PA
3385 CORE_ADDR pc = regcache_read_pc (regcache);
3386 int leave_stopped = 0;
e3e9f5a2 3387
23f238d3
PA
3388 /* Don't bother if there's a breakpoint at PC that we'd hit
3389 immediately, and we're not waiting for this LWP. */
d3a70e03 3390 if (!lp->ptid.matches (wait_ptid))
23f238d3 3391 {
a01bda52 3392 if (breakpoint_inserted_here_p (regcache->aspace (), pc))
23f238d3
PA
3393 leave_stopped = 1;
3394 }
e3e9f5a2 3395
23f238d3
PA
3396 if (!leave_stopped)
3397 {
9327494e
SM
3398 linux_nat_debug_printf
3399 ("resuming stopped-resumed LWP %s at %s: step=%d",
e53c95d4 3400 lp->ptid.to_string ().c_str (), paddress (gdbarch, pc),
9327494e 3401 lp->step);
23f238d3
PA
3402
3403 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3404 }
3405 }
230d2906 3406 catch (const gdb_exception_error &ex)
23f238d3
PA
3407 {
3408 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 3409 throw;
23f238d3 3410 }
e3e9f5a2
PA
3411 }
3412
3413 return 0;
3414}
3415
f6ac5f3d
PA
3416ptid_t
3417linux_nat_target::wait (ptid_t ptid, struct target_waitstatus *ourstatus,
b60cea74 3418 target_wait_flags target_options)
7feb7d06 3419{
b26b06dd
AB
3420 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3421
7feb7d06
PA
3422 ptid_t event_ptid;
3423
e53c95d4 3424 linux_nat_debug_printf ("[%s], [%s]", ptid.to_string ().c_str (),
9327494e 3425 target_options_to_string (target_options).c_str ());
7feb7d06
PA
3426
3427 /* Flush the async file first. */
d9d41e78 3428 if (target_is_async_p ())
7feb7d06
PA
3429 async_file_flush ();
3430
e3e9f5a2
PA
3431 /* Resume LWPs that are currently stopped without any pending status
3432 to report, but are resumed from the core's perspective. LWPs get
3433 in this state if we find them stopping at a time we're not
3434 interested in reporting the event (target_wait on a
3435 specific_process, for example, see linux_nat_wait_1), and
3436 meanwhile the event became uninteresting. Don't bother resuming
3437 LWPs we're not going to wait for if they'd stop immediately. */
fbea99ea 3438 if (target_is_non_stop_p ())
d3a70e03
TT
3439 iterate_over_lwps (minus_one_ptid,
3440 [=] (struct lwp_info *info)
3441 {
3442 return resume_stopped_resumed_lwps (info, ptid);
3443 });
e3e9f5a2 3444
f6ac5f3d 3445 event_ptid = linux_nat_wait_1 (ptid, ourstatus, target_options);
7feb7d06
PA
3446
3447 /* If we requested any event, and something came out, assume there
3448 may be more. If we requested a specific lwp or process, also
3449 assume there may be more. */
d9d41e78 3450 if (target_is_async_p ()
183be222
SM
3451 && ((ourstatus->kind () != TARGET_WAITKIND_IGNORE
3452 && ourstatus->kind () != TARGET_WAITKIND_NO_RESUMED)
d7e15655 3453 || ptid != minus_one_ptid))
7feb7d06
PA
3454 async_file_mark ();
3455
7feb7d06
PA
3456 return event_ptid;
3457}
3458
1d2736d4
PA
3459/* Kill one LWP. */
3460
3461static void
3462kill_one_lwp (pid_t pid)
d6b0e80f 3463{
ed731959
JK
3464 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3465
3466 errno = 0;
1d2736d4 3467 kill_lwp (pid, SIGKILL);
9327494e 3468
ed731959 3469 if (debug_linux_nat)
57745c90
PA
3470 {
3471 int save_errno = errno;
3472
9327494e
SM
3473 linux_nat_debug_printf
3474 ("kill (SIGKILL) %ld, 0, 0 (%s)", (long) pid,
3475 save_errno != 0 ? safe_strerror (save_errno) : "OK");
57745c90 3476 }
ed731959
JK
3477
3478 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3479
d6b0e80f 3480 errno = 0;
1d2736d4 3481 ptrace (PTRACE_KILL, pid, 0, 0);
d6b0e80f 3482 if (debug_linux_nat)
57745c90
PA
3483 {
3484 int save_errno = errno;
3485
9327494e
SM
3486 linux_nat_debug_printf
3487 ("PTRACE_KILL %ld, 0, 0 (%s)", (long) pid,
3488 save_errno ? safe_strerror (save_errno) : "OK");
57745c90 3489 }
d6b0e80f
AC
3490}
3491
1d2736d4
PA
3492/* Wait for an LWP to die. */
3493
3494static void
3495kill_wait_one_lwp (pid_t pid)
d6b0e80f 3496{
1d2736d4 3497 pid_t res;
d6b0e80f
AC
3498
3499 /* We must make sure that there are no pending events (delayed
3500 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3501 program doesn't interfere with any following debugging session. */
3502
d6b0e80f
AC
3503 do
3504 {
1d2736d4
PA
3505 res = my_waitpid (pid, NULL, __WALL);
3506 if (res != (pid_t) -1)
d6b0e80f 3507 {
9327494e
SM
3508 linux_nat_debug_printf ("wait %ld received unknown.", (long) pid);
3509
4a6ed09b
PA
3510 /* The Linux kernel sometimes fails to kill a thread
3511 completely after PTRACE_KILL; that goes from the stop
3512 point in do_fork out to the one in get_signal_to_deliver
3513 and waits again. So kill it again. */
1d2736d4 3514 kill_one_lwp (pid);
d6b0e80f
AC
3515 }
3516 }
1d2736d4
PA
3517 while (res == pid);
3518
3519 gdb_assert (res == -1 && errno == ECHILD);
3520}
3521
3522/* Callback for iterate_over_lwps. */
d6b0e80f 3523
1d2736d4 3524static int
d3a70e03 3525kill_callback (struct lwp_info *lp)
1d2736d4 3526{
e38504b3 3527 kill_one_lwp (lp->ptid.lwp ());
d6b0e80f
AC
3528 return 0;
3529}
3530
1d2736d4
PA
3531/* Callback for iterate_over_lwps. */
3532
3533static int
d3a70e03 3534kill_wait_callback (struct lwp_info *lp)
1d2736d4 3535{
e38504b3 3536 kill_wait_one_lwp (lp->ptid.lwp ());
1d2736d4
PA
3537 return 0;
3538}
3539
3540/* Kill the fork children of any threads of inferior INF that are
3541 stopped at a fork event. */
3542
3543static void
3544kill_unfollowed_fork_children (struct inferior *inf)
3545{
08036331
PA
3546 for (thread_info *thread : inf->non_exited_threads ())
3547 {
3548 struct target_waitstatus *ws = &thread->pending_follow;
1d2736d4 3549
183be222
SM
3550 if (ws->kind () == TARGET_WAITKIND_FORKED
3551 || ws->kind () == TARGET_WAITKIND_VFORKED)
08036331 3552 {
183be222 3553 ptid_t child_ptid = ws->child_ptid ();
08036331
PA
3554 int child_pid = child_ptid.pid ();
3555 int child_lwp = child_ptid.lwp ();
3556
3557 kill_one_lwp (child_lwp);
3558 kill_wait_one_lwp (child_lwp);
3559
3560 /* Let the arch-specific native code know this process is
3561 gone. */
3562 linux_target->low_forget_process (child_pid);
3563 }
3564 }
1d2736d4
PA
3565}
3566
f6ac5f3d
PA
3567void
3568linux_nat_target::kill ()
d6b0e80f 3569{
f973ed9c
DJ
3570 /* If we're stopped while forking and we haven't followed yet,
3571 kill the other task. We need to do this first because the
3572 parent will be sleeping if this is a vfork. */
1d2736d4 3573 kill_unfollowed_fork_children (current_inferior ());
f973ed9c
DJ
3574
3575 if (forks_exist_p ())
7feb7d06 3576 linux_fork_killall ();
f973ed9c
DJ
3577 else
3578 {
e99b03dc 3579 ptid_t ptid = ptid_t (inferior_ptid.pid ());
e0881a8e 3580
4c28f408 3581 /* Stop all threads before killing them, since ptrace requires
30baf67b 3582 that the thread is stopped to successfully PTRACE_KILL. */
d3a70e03 3583 iterate_over_lwps (ptid, stop_callback);
4c28f408
PA
3584 /* ... and wait until all of them have reported back that
3585 they're no longer running. */
d3a70e03 3586 iterate_over_lwps (ptid, stop_wait_callback);
4c28f408 3587
f973ed9c 3588 /* Kill all LWP's ... */
d3a70e03 3589 iterate_over_lwps (ptid, kill_callback);
f973ed9c
DJ
3590
3591 /* ... and wait until we've flushed all events. */
d3a70e03 3592 iterate_over_lwps (ptid, kill_wait_callback);
f973ed9c
DJ
3593 }
3594
bc1e6c81 3595 target_mourn_inferior (inferior_ptid);
d6b0e80f
AC
3596}
3597
f6ac5f3d
PA
3598void
3599linux_nat_target::mourn_inferior ()
d6b0e80f 3600{
b26b06dd
AB
3601 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3602
e99b03dc 3603 int pid = inferior_ptid.pid ();
26cb8b7c
PA
3604
3605 purge_lwp_list (pid);
d6b0e80f 3606
8a89ddbd 3607 close_proc_mem_file (pid);
05c06f31 3608
f973ed9c 3609 if (! forks_exist_p ())
d90e17a7 3610 /* Normal case, no other forks available. */
f6ac5f3d 3611 inf_ptrace_target::mourn_inferior ();
f973ed9c
DJ
3612 else
3613 /* Multi-fork case. The current inferior_ptid has exited, but
3614 there are other viable forks to debug. Delete the exiting
3615 one and context-switch to the first available. */
3616 linux_fork_mourn_inferior ();
26cb8b7c
PA
3617
3618 /* Let the arch-specific native code know this process is gone. */
135340af 3619 linux_target->low_forget_process (pid);
d6b0e80f
AC
3620}
3621
5b009018
PA
3622/* Convert a native/host siginfo object, into/from the siginfo in the
3623 layout of the inferiors' architecture. */
3624
3625static void
a5362b9a 3626siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018 3627{
135340af
PA
3628 /* If the low target didn't do anything, then just do a straight
3629 memcpy. */
3630 if (!linux_target->low_siginfo_fixup (siginfo, inf_siginfo, direction))
5b009018
PA
3631 {
3632 if (direction == 1)
a5362b9a 3633 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 3634 else
a5362b9a 3635 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
3636 }
3637}
3638
9b409511 3639static enum target_xfer_status
7154e786 3640linux_xfer_siginfo (ptid_t ptid, enum target_object object,
dda83cd7 3641 const char *annex, gdb_byte *readbuf,
9b409511
YQ
3642 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3643 ULONGEST *xfered_len)
4aa995e1 3644{
a5362b9a
TS
3645 siginfo_t siginfo;
3646 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
3647
3648 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3649 gdb_assert (readbuf || writebuf);
3650
4aa995e1 3651 if (offset > sizeof (siginfo))
2ed4b548 3652 return TARGET_XFER_E_IO;
4aa995e1 3653
7154e786 3654 if (!linux_nat_get_siginfo (ptid, &siginfo))
2ed4b548 3655 return TARGET_XFER_E_IO;
4aa995e1 3656
5b009018
PA
3657 /* When GDB is built as a 64-bit application, ptrace writes into
3658 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3659 inferior with a 64-bit GDB should look the same as debugging it
3660 with a 32-bit GDB, we need to convert it. GDB core always sees
3661 the converted layout, so any read/write will have to be done
3662 post-conversion. */
3663 siginfo_fixup (&siginfo, inf_siginfo, 0);
3664
4aa995e1
PA
3665 if (offset + len > sizeof (siginfo))
3666 len = sizeof (siginfo) - offset;
3667
3668 if (readbuf != NULL)
5b009018 3669 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3670 else
3671 {
5b009018
PA
3672 memcpy (inf_siginfo + offset, writebuf, len);
3673
3674 /* Convert back to ptrace layout before flushing it out. */
3675 siginfo_fixup (&siginfo, inf_siginfo, 1);
3676
7154e786 3677 int pid = get_ptrace_pid (ptid);
4aa995e1
PA
3678 errno = 0;
3679 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3680 if (errno != 0)
2ed4b548 3681 return TARGET_XFER_E_IO;
4aa995e1
PA
3682 }
3683
9b409511
YQ
3684 *xfered_len = len;
3685 return TARGET_XFER_OK;
4aa995e1
PA
3686}
3687
9b409511 3688static enum target_xfer_status
f6ac5f3d
PA
3689linux_nat_xfer_osdata (enum target_object object,
3690 const char *annex, gdb_byte *readbuf,
3691 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3692 ULONGEST *xfered_len);
3693
f6ac5f3d 3694static enum target_xfer_status
f9f593dd
SM
3695linux_proc_xfer_memory_partial (int pid, gdb_byte *readbuf,
3696 const gdb_byte *writebuf, ULONGEST offset,
3697 LONGEST len, ULONGEST *xfered_len);
f6ac5f3d
PA
3698
3699enum target_xfer_status
3700linux_nat_target::xfer_partial (enum target_object object,
3701 const char *annex, gdb_byte *readbuf,
3702 const gdb_byte *writebuf,
3703 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
d6b0e80f 3704{
4aa995e1 3705 if (object == TARGET_OBJECT_SIGNAL_INFO)
7154e786 3706 return linux_xfer_siginfo (inferior_ptid, object, annex, readbuf, writebuf,
9b409511 3707 offset, len, xfered_len);
4aa995e1 3708
c35b1492
PA
3709 /* The target is connected but no live inferior is selected. Pass
3710 this request down to a lower stratum (e.g., the executable
3711 file). */
d7e15655 3712 if (object == TARGET_OBJECT_MEMORY && inferior_ptid == null_ptid)
9b409511 3713 return TARGET_XFER_EOF;
c35b1492 3714
f6ac5f3d
PA
3715 if (object == TARGET_OBJECT_AUXV)
3716 return memory_xfer_auxv (this, object, annex, readbuf, writebuf,
3717 offset, len, xfered_len);
3718
3719 if (object == TARGET_OBJECT_OSDATA)
3720 return linux_nat_xfer_osdata (object, annex, readbuf, writebuf,
3721 offset, len, xfered_len);
d6b0e80f 3722
f6ac5f3d
PA
3723 if (object == TARGET_OBJECT_MEMORY)
3724 {
05c06f31
PA
3725 /* GDB calculates all addresses in the largest possible address
3726 width. The address width must be masked before its final use
3727 by linux_proc_xfer_partial.
3728
3729 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
99d9c3b9 3730 int addr_bit = gdbarch_addr_bit (current_inferior ()->arch ());
f6ac5f3d
PA
3731
3732 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
3733 offset &= ((ULONGEST) 1 << addr_bit) - 1;
f6ac5f3d 3734
dd09fe0d
KS
3735 /* If /proc/pid/mem is writable, don't fallback to ptrace. If
3736 the write via /proc/pid/mem fails because the inferior execed
3737 (and we haven't seen the exec event yet), a subsequent ptrace
3738 poke would incorrectly write memory to the post-exec address
3739 space, while the core was trying to write to the pre-exec
3740 address space. */
3741 if (proc_mem_file_is_writable ())
f9f593dd
SM
3742 return linux_proc_xfer_memory_partial (inferior_ptid.pid (), readbuf,
3743 writebuf, offset, len,
3744 xfered_len);
05c06f31 3745 }
f6ac5f3d
PA
3746
3747 return inf_ptrace_target::xfer_partial (object, annex, readbuf, writebuf,
3748 offset, len, xfered_len);
d6b0e80f
AC
3749}
3750
57810aa7 3751bool
f6ac5f3d 3752linux_nat_target::thread_alive (ptid_t ptid)
28439f5e 3753{
4a6ed09b
PA
3754 /* As long as a PTID is in lwp list, consider it alive. */
3755 return find_lwp_pid (ptid) != NULL;
28439f5e
PA
3756}
3757
8a06aea7
PA
3758/* Implement the to_update_thread_list target method for this
3759 target. */
3760
f6ac5f3d
PA
3761void
3762linux_nat_target::update_thread_list ()
8a06aea7 3763{
4a6ed09b
PA
3764 /* We add/delete threads from the list as clone/exit events are
3765 processed, so just try deleting exited threads still in the
3766 thread list. */
3767 delete_exited_threads ();
a6904d5a
PA
3768
3769 /* Update the processor core that each lwp/thread was last seen
3770 running on. */
901b9821 3771 for (lwp_info *lwp : all_lwps ())
1ad3de98
PA
3772 {
3773 /* Avoid accessing /proc if the thread hasn't run since we last
3774 time we fetched the thread's core. Accessing /proc becomes
3775 noticeably expensive when we have thousands of LWPs. */
3776 if (lwp->core == -1)
3777 lwp->core = linux_common_core_of_thread (lwp->ptid);
3778 }
8a06aea7
PA
3779}
3780
a068643d 3781std::string
f6ac5f3d 3782linux_nat_target::pid_to_str (ptid_t ptid)
d6b0e80f 3783{
15a9e13e 3784 if (ptid.lwp_p ()
e38504b3 3785 && (ptid.pid () != ptid.lwp ()
e99b03dc 3786 || num_lwps (ptid.pid ()) > 1))
a068643d 3787 return string_printf ("LWP %ld", ptid.lwp ());
d6b0e80f
AC
3788
3789 return normal_pid_to_str (ptid);
3790}
3791
f6ac5f3d
PA
3792const char *
3793linux_nat_target::thread_name (struct thread_info *thr)
4694da01 3794{
79efa585 3795 return linux_proc_tid_get_name (thr->ptid);
4694da01
TT
3796}
3797
dba24537
AC
3798/* Accepts an integer PID; Returns a string representing a file that
3799 can be opened to get the symbols for the child process. */
3800
0e90c441 3801const char *
f6ac5f3d 3802linux_nat_target::pid_to_exec_file (int pid)
dba24537 3803{
e0d86d2c 3804 return linux_proc_pid_to_exec_file (pid);
dba24537
AC
3805}
3806
8a89ddbd
PA
3807/* Object representing an /proc/PID/mem open file. We keep one such
3808 file open per inferior.
3809
3810 It might be tempting to think about only ever opening one file at
3811 most for all inferiors, closing/reopening the file as we access
3812 memory of different inferiors, to minimize number of file
3813 descriptors open, which can otherwise run into resource limits.
3814 However, that does not work correctly -- if the inferior execs and
3815 we haven't processed the exec event yet, and, we opened a
3816 /proc/PID/mem file, we will get a mem file accessing the post-exec
3817 address space, thinking we're opening it for the pre-exec address
3818 space. That is dangerous as we can poke memory (e.g. clearing
3819 breakpoints) in the post-exec memory by mistake, corrupting the
3820 inferior. For that reason, we open the mem file as early as
3821 possible, right after spawning, forking or attaching to the
3822 inferior, when the inferior is stopped and thus before it has a
3823 chance of execing.
3824
3825 Note that after opening the file, even if the thread we opened it
3826 for subsequently exits, the open file is still usable for accessing
3827 memory. It's only when the whole process exits or execs that the
3828 file becomes invalid, at which point reads/writes return EOF. */
3829
3830class proc_mem_file
3831{
3832public:
3833 proc_mem_file (ptid_t ptid, int fd)
3834 : m_ptid (ptid), m_fd (fd)
3835 {
3836 gdb_assert (m_fd != -1);
3837 }
05c06f31 3838
8a89ddbd 3839 ~proc_mem_file ()
05c06f31 3840 {
89662f69 3841 linux_nat_debug_printf ("closing fd %d for /proc/%d/task/%ld/mem",
8a89ddbd
PA
3842 m_fd, m_ptid.pid (), m_ptid.lwp ());
3843 close (m_fd);
05c06f31 3844 }
05c06f31 3845
8a89ddbd
PA
3846 DISABLE_COPY_AND_ASSIGN (proc_mem_file);
3847
3848 int fd ()
3849 {
3850 return m_fd;
3851 }
3852
3853private:
3854 /* The LWP this file was opened for. Just for debugging
3855 purposes. */
3856 ptid_t m_ptid;
3857
3858 /* The file descriptor. */
3859 int m_fd = -1;
3860};
3861
3862/* The map between an inferior process id, and the open /proc/PID/mem
3863 file. This is stored in a map instead of in a per-inferior
3864 structure because we need to be able to access memory of processes
3865 which don't have a corresponding struct inferior object. E.g.,
3866 with "detach-on-fork on" (the default), and "follow-fork parent"
3867 (also default), we don't create an inferior for the fork child, but
3868 we still need to remove breakpoints from the fork child's
3869 memory. */
3870static std::unordered_map<int, proc_mem_file> proc_mem_file_map;
3871
3872/* Close the /proc/PID/mem file for PID. */
05c06f31
PA
3873
3874static void
8a89ddbd 3875close_proc_mem_file (pid_t pid)
dba24537 3876{
8a89ddbd 3877 proc_mem_file_map.erase (pid);
05c06f31 3878}
dba24537 3879
8a89ddbd
PA
3880/* Open the /proc/PID/mem file for the process (thread group) of PTID.
3881 We actually open /proc/PID/task/LWP/mem, as that's the LWP we know
3882 exists and is stopped right now. We prefer the
3883 /proc/PID/task/LWP/mem form over /proc/LWP/mem to avoid tid-reuse
3884 races, just in case this is ever called on an already-waited
3885 LWP. */
dba24537 3886
8a89ddbd
PA
3887static void
3888open_proc_mem_file (ptid_t ptid)
05c06f31 3889{
8a89ddbd
PA
3890 auto iter = proc_mem_file_map.find (ptid.pid ());
3891 gdb_assert (iter == proc_mem_file_map.end ());
dba24537 3892
8a89ddbd
PA
3893 char filename[64];
3894 xsnprintf (filename, sizeof filename,
3895 "/proc/%d/task/%ld/mem", ptid.pid (), ptid.lwp ());
3896
3897 int fd = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
05c06f31 3898
8a89ddbd
PA
3899 if (fd == -1)
3900 {
3901 warning (_("opening /proc/PID/mem file for lwp %d.%ld failed: %s (%d)"),
3902 ptid.pid (), ptid.lwp (),
3903 safe_strerror (errno), errno);
3904 return;
05c06f31
PA
3905 }
3906
8a89ddbd
PA
3907 proc_mem_file_map.emplace (std::piecewise_construct,
3908 std::forward_as_tuple (ptid.pid ()),
3909 std::forward_as_tuple (ptid, fd));
3910
9221923c 3911 linux_nat_debug_printf ("opened fd %d for lwp %d.%ld",
8a89ddbd
PA
3912 fd, ptid.pid (), ptid.lwp ());
3913}
3914
1bcb0708
PA
3915/* Helper for linux_proc_xfer_memory_partial and
3916 proc_mem_file_is_writable. FD is the already opened /proc/pid/mem
3917 file, and PID is the pid of the corresponding process. The rest of
3918 the arguments are like linux_proc_xfer_memory_partial's. */
8a89ddbd
PA
3919
3920static enum target_xfer_status
1bcb0708
PA
3921linux_proc_xfer_memory_partial_fd (int fd, int pid,
3922 gdb_byte *readbuf, const gdb_byte *writebuf,
3923 ULONGEST offset, LONGEST len,
3924 ULONGEST *xfered_len)
8a89ddbd
PA
3925{
3926 ssize_t ret;
3927
8a89ddbd 3928 gdb_assert (fd != -1);
dba24537 3929
31a56a22
PA
3930 /* Use pread64/pwrite64 if available, since they save a syscall and
3931 can handle 64-bit offsets even on 32-bit platforms (for instance,
3932 SPARC debugging a SPARC64 application). But only use them if the
3933 offset isn't so high that when cast to off_t it'd be negative, as
3934 seen on SPARC64. pread64/pwrite64 outright reject such offsets.
3935 lseek does not. */
dba24537 3936#ifdef HAVE_PREAD64
31a56a22
PA
3937 if ((off_t) offset >= 0)
3938 ret = (readbuf != nullptr
3939 ? pread64 (fd, readbuf, len, offset)
3940 : pwrite64 (fd, writebuf, len, offset));
3941 else
dba24537 3942#endif
31a56a22
PA
3943 {
3944 ret = lseek (fd, offset, SEEK_SET);
3945 if (ret != -1)
3946 ret = (readbuf != nullptr
3947 ? read (fd, readbuf, len)
3948 : write (fd, writebuf, len));
3949 }
dba24537 3950
05c06f31
PA
3951 if (ret == -1)
3952 {
9221923c 3953 linux_nat_debug_printf ("accessing fd %d for pid %d failed: %s (%d)",
1bcb0708 3954 fd, pid, safe_strerror (errno), errno);
284b6bb5 3955 return TARGET_XFER_E_IO;
05c06f31
PA
3956 }
3957 else if (ret == 0)
3958 {
8a89ddbd
PA
3959 /* EOF means the address space is gone, the whole process exited
3960 or execed. */
9221923c 3961 linux_nat_debug_printf ("accessing fd %d for pid %d got EOF",
1bcb0708 3962 fd, pid);
05c06f31
PA
3963 return TARGET_XFER_EOF;
3964 }
9b409511
YQ
3965 else
3966 {
8a89ddbd 3967 *xfered_len = ret;
9b409511
YQ
3968 return TARGET_XFER_OK;
3969 }
05c06f31 3970}
efcbbd14 3971
1bcb0708
PA
3972/* Implement the to_xfer_partial target method using /proc/PID/mem.
3973 Because we can use a single read/write call, this can be much more
3974 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
3975 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running
3976 threads. */
3977
3978static enum target_xfer_status
f9f593dd
SM
3979linux_proc_xfer_memory_partial (int pid, gdb_byte *readbuf,
3980 const gdb_byte *writebuf, ULONGEST offset,
3981 LONGEST len, ULONGEST *xfered_len)
1bcb0708 3982{
1bcb0708
PA
3983 auto iter = proc_mem_file_map.find (pid);
3984 if (iter == proc_mem_file_map.end ())
3985 return TARGET_XFER_EOF;
3986
3987 int fd = iter->second.fd ();
3988
3989 return linux_proc_xfer_memory_partial_fd (fd, pid, readbuf, writebuf, offset,
3990 len, xfered_len);
3991}
3992
3993/* Check whether /proc/pid/mem is writable in the current kernel, and
3994 return true if so. It wasn't writable before Linux 2.6.39, but
3995 there's no way to know whether the feature was backported to older
3996 kernels. So we check to see if it works. The result is cached,
3bfdcabb 3997 and this is guaranteed to be called once early during inferior
9dff6a5d
PA
3998 startup, so that any warning is printed out consistently between
3999 GDB invocations. Note we don't call it during GDB startup instead
4000 though, because then we might warn with e.g. just "gdb --version"
4001 on sandboxed systems. See PR gdb/29907. */
1bcb0708
PA
4002
4003static bool
4004proc_mem_file_is_writable ()
4005{
4006 static gdb::optional<bool> writable;
4007
4008 if (writable.has_value ())
4009 return *writable;
4010
4011 writable.emplace (false);
4012
4013 /* We check whether /proc/pid/mem is writable by trying to write to
4014 one of our variables via /proc/self/mem. */
4015
4016 int fd = gdb_open_cloexec ("/proc/self/mem", O_RDWR | O_LARGEFILE, 0).release ();
4017
4018 if (fd == -1)
4019 {
4020 warning (_("opening /proc/self/mem file failed: %s (%d)"),
4021 safe_strerror (errno), errno);
4022 return *writable;
4023 }
4024
4025 SCOPE_EXIT { close (fd); };
4026
4027 /* This is the variable we try to write to. Note OFFSET below. */
4028 volatile gdb_byte test_var = 0;
4029
4030 gdb_byte writebuf[] = {0x55};
4031 ULONGEST offset = (uintptr_t) &test_var;
4032 ULONGEST xfered_len;
4033
4034 enum target_xfer_status res
4035 = linux_proc_xfer_memory_partial_fd (fd, getpid (), nullptr, writebuf,
4036 offset, 1, &xfered_len);
4037
4038 if (res == TARGET_XFER_OK)
4039 {
4040 gdb_assert (xfered_len == 1);
4041 gdb_assert (test_var == 0x55);
4042 /* Success. */
4043 *writable = true;
4044 }
4045
4046 return *writable;
4047}
4048
dba24537
AC
4049/* Parse LINE as a signal set and add its set bits to SIGS. */
4050
4051static void
4052add_line_to_sigset (const char *line, sigset_t *sigs)
4053{
4054 int len = strlen (line) - 1;
4055 const char *p;
4056 int signum;
4057
4058 if (line[len] != '\n')
8a3fe4f8 4059 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4060
4061 p = line;
4062 signum = len * 4;
4063 while (len-- > 0)
4064 {
4065 int digit;
4066
4067 if (*p >= '0' && *p <= '9')
4068 digit = *p - '0';
4069 else if (*p >= 'a' && *p <= 'f')
4070 digit = *p - 'a' + 10;
4071 else
8a3fe4f8 4072 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4073
4074 signum -= 4;
4075
4076 if (digit & 1)
4077 sigaddset (sigs, signum + 1);
4078 if (digit & 2)
4079 sigaddset (sigs, signum + 2);
4080 if (digit & 4)
4081 sigaddset (sigs, signum + 3);
4082 if (digit & 8)
4083 sigaddset (sigs, signum + 4);
4084
4085 p++;
4086 }
4087}
4088
4089/* Find process PID's pending signals from /proc/pid/status and set
4090 SIGS to match. */
4091
4092void
3e43a32a
MS
4093linux_proc_pending_signals (int pid, sigset_t *pending,
4094 sigset_t *blocked, sigset_t *ignored)
dba24537 4095{
d8d2a3ee 4096 char buffer[PATH_MAX], fname[PATH_MAX];
dba24537
AC
4097
4098 sigemptyset (pending);
4099 sigemptyset (blocked);
4100 sigemptyset (ignored);
cde33bf1 4101 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
d419f42d 4102 gdb_file_up procfile = gdb_fopen_cloexec (fname, "r");
dba24537 4103 if (procfile == NULL)
8a3fe4f8 4104 error (_("Could not open %s"), fname);
dba24537 4105
d419f42d 4106 while (fgets (buffer, PATH_MAX, procfile.get ()) != NULL)
dba24537
AC
4107 {
4108 /* Normal queued signals are on the SigPnd line in the status
4109 file. However, 2.6 kernels also have a "shared" pending
4110 queue for delivering signals to a thread group, so check for
4111 a ShdPnd line also.
4112
4113 Unfortunately some Red Hat kernels include the shared pending
4114 queue but not the ShdPnd status field. */
4115
61012eef 4116 if (startswith (buffer, "SigPnd:\t"))
dba24537 4117 add_line_to_sigset (buffer + 8, pending);
61012eef 4118 else if (startswith (buffer, "ShdPnd:\t"))
dba24537 4119 add_line_to_sigset (buffer + 8, pending);
61012eef 4120 else if (startswith (buffer, "SigBlk:\t"))
dba24537 4121 add_line_to_sigset (buffer + 8, blocked);
61012eef 4122 else if (startswith (buffer, "SigIgn:\t"))
dba24537
AC
4123 add_line_to_sigset (buffer + 8, ignored);
4124 }
dba24537
AC
4125}
4126
9b409511 4127static enum target_xfer_status
f6ac5f3d 4128linux_nat_xfer_osdata (enum target_object object,
e0881a8e 4129 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4130 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4131 ULONGEST *xfered_len)
07e059b5 4132{
07e059b5
VP
4133 gdb_assert (object == TARGET_OBJECT_OSDATA);
4134
9b409511
YQ
4135 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4136 if (*xfered_len == 0)
4137 return TARGET_XFER_EOF;
4138 else
4139 return TARGET_XFER_OK;
07e059b5
VP
4140}
4141
f6ac5f3d
PA
4142std::vector<static_tracepoint_marker>
4143linux_nat_target::static_tracepoint_markers_by_strid (const char *strid)
5808517f
YQ
4144{
4145 char s[IPA_CMD_BUF_SIZE];
e99b03dc 4146 int pid = inferior_ptid.pid ();
5d9310c4 4147 std::vector<static_tracepoint_marker> markers;
256642e8 4148 const char *p = s;
184ea2f7 4149 ptid_t ptid = ptid_t (pid, 0);
5d9310c4 4150 static_tracepoint_marker marker;
5808517f
YQ
4151
4152 /* Pause all */
4153 target_stop (ptid);
4154
81aa19c3 4155 strcpy (s, "qTfSTM");
42476b70 4156 agent_run_command (pid, s, strlen (s) + 1);
5808517f 4157
1db93f14
TT
4158 /* Unpause all. */
4159 SCOPE_EXIT { target_continue_no_signal (ptid); };
5808517f
YQ
4160
4161 while (*p++ == 'm')
4162 {
5808517f
YQ
4163 do
4164 {
5d9310c4 4165 parse_static_tracepoint_marker_definition (p, &p, &marker);
5808517f 4166
5d9310c4
SM
4167 if (strid == NULL || marker.str_id == strid)
4168 markers.push_back (std::move (marker));
5808517f
YQ
4169 }
4170 while (*p++ == ','); /* comma-separated list */
4171
81aa19c3 4172 strcpy (s, "qTsSTM");
42476b70 4173 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4174 p = s;
4175 }
4176
5808517f
YQ
4177 return markers;
4178}
4179
b84876c2
PA
4180/* target_can_async_p implementation. */
4181
57810aa7 4182bool
f6ac5f3d 4183linux_nat_target::can_async_p ()
b84876c2 4184{
fce6cd34
AB
4185 /* This flag should be checked in the common target.c code. */
4186 gdb_assert (target_async_permitted);
4187
4188 /* Otherwise, this targets is always able to support async mode. */
4189 return true;
b84876c2
PA
4190}
4191
57810aa7 4192bool
f6ac5f3d 4193linux_nat_target::supports_non_stop ()
9908b566 4194{
f80c8ec4 4195 return true;
9908b566
VP
4196}
4197
fbea99ea
PA
4198/* to_always_non_stop_p implementation. */
4199
57810aa7 4200bool
f6ac5f3d 4201linux_nat_target::always_non_stop_p ()
fbea99ea 4202{
f80c8ec4 4203 return true;
fbea99ea
PA
4204}
4205
57810aa7 4206bool
f6ac5f3d 4207linux_nat_target::supports_multi_process ()
d90e17a7 4208{
aee91db3 4209 return true;
d90e17a7
PA
4210}
4211
57810aa7 4212bool
f6ac5f3d 4213linux_nat_target::supports_disable_randomization ()
03583c20 4214{
f80c8ec4 4215 return true;
03583c20
UW
4216}
4217
7feb7d06
PA
4218/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4219 so we notice when any child changes state, and notify the
4220 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4221 above to wait for the arrival of a SIGCHLD. */
4222
b84876c2 4223static void
7feb7d06 4224sigchld_handler (int signo)
b84876c2 4225{
7feb7d06
PA
4226 int old_errno = errno;
4227
01124a23 4228 if (debug_linux_nat)
da5bd37e 4229 gdb_stdlog->write_async_safe ("sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06 4230
b146ba14
JB
4231 if (signo == SIGCHLD)
4232 {
4233 /* Let the event loop know that there are events to handle. */
4234 linux_nat_target::async_file_mark_if_open ();
4235 }
7feb7d06
PA
4236
4237 errno = old_errno;
4238}
4239
4240/* Callback registered with the target events file descriptor. */
4241
4242static void
4243handle_target_event (int error, gdb_client_data client_data)
4244{
b1a35af2 4245 inferior_event_handler (INF_REG_EVENT);
7feb7d06
PA
4246}
4247
b84876c2
PA
4248/* target_async implementation. */
4249
f6ac5f3d 4250void
4a570176 4251linux_nat_target::async (bool enable)
b84876c2 4252{
4a570176 4253 if (enable == is_async_p ())
b146ba14
JB
4254 return;
4255
4256 /* Block child signals while we create/destroy the pipe, as their
4257 handler writes to it. */
4258 gdb::block_signals blocker;
4259
6a3753b3 4260 if (enable)
b84876c2 4261 {
b146ba14 4262 if (!async_file_open ())
f34652de 4263 internal_error ("creating event pipe failed.");
b146ba14
JB
4264
4265 add_file_handler (async_wait_fd (), handle_target_event, NULL,
4266 "linux-nat");
4267
4268 /* There may be pending events to handle. Tell the event loop
4269 to poll them. */
4270 async_file_mark ();
b84876c2
PA
4271 }
4272 else
4273 {
b146ba14
JB
4274 delete_file_handler (async_wait_fd ());
4275 async_file_close ();
b84876c2 4276 }
b84876c2
PA
4277}
4278
a493e3e2 4279/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4280 event came out. */
4281
4c28f408 4282static int
d3a70e03 4283linux_nat_stop_lwp (struct lwp_info *lwp)
4c28f408 4284{
d90e17a7 4285 if (!lwp->stopped)
252fbfc8 4286 {
9327494e 4287 linux_nat_debug_printf ("running -> suspending %s",
e53c95d4 4288 lwp->ptid.to_string ().c_str ());
252fbfc8 4289
252fbfc8 4290
25289eb2
PA
4291 if (lwp->last_resume_kind == resume_stop)
4292 {
9327494e
SM
4293 linux_nat_debug_printf ("already stopping LWP %ld at GDB's request",
4294 lwp->ptid.lwp ());
25289eb2
PA
4295 return 0;
4296 }
252fbfc8 4297
d3a70e03 4298 stop_callback (lwp);
25289eb2 4299 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
4300 }
4301 else
4302 {
4303 /* Already known to be stopped; do nothing. */
252fbfc8 4304
d90e17a7
PA
4305 if (debug_linux_nat)
4306 {
9213a6d7 4307 if (linux_target->find_thread (lwp->ptid)->stop_requested)
9327494e 4308 linux_nat_debug_printf ("already stopped/stop_requested %s",
e53c95d4 4309 lwp->ptid.to_string ().c_str ());
d90e17a7 4310 else
9327494e 4311 linux_nat_debug_printf ("already stopped/no stop_requested yet %s",
e53c95d4 4312 lwp->ptid.to_string ().c_str ());
252fbfc8
PA
4313 }
4314 }
4c28f408
PA
4315 return 0;
4316}
4317
f6ac5f3d
PA
4318void
4319linux_nat_target::stop (ptid_t ptid)
4c28f408 4320{
b6e52a0b 4321 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
d3a70e03 4322 iterate_over_lwps (ptid, linux_nat_stop_lwp);
bfedc46a
PA
4323}
4324
c0694254
PA
4325/* When requests are passed down from the linux-nat layer to the
4326 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4327 used. The address space pointer is stored in the inferior object,
4328 but the common code that is passed such ptid can't tell whether
4329 lwpid is a "main" process id or not (it assumes so). We reverse
4330 look up the "main" process id from the lwp here. */
4331
f6ac5f3d
PA
4332struct address_space *
4333linux_nat_target::thread_address_space (ptid_t ptid)
c0694254
PA
4334{
4335 struct lwp_info *lwp;
4336 struct inferior *inf;
4337 int pid;
4338
e38504b3 4339 if (ptid.lwp () == 0)
c0694254
PA
4340 {
4341 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4342 tgid. */
4343 lwp = find_lwp_pid (ptid);
e99b03dc 4344 pid = lwp->ptid.pid ();
c0694254
PA
4345 }
4346 else
4347 {
4348 /* A (pid,lwpid,0) ptid. */
e99b03dc 4349 pid = ptid.pid ();
c0694254
PA
4350 }
4351
5b6d1e4f 4352 inf = find_inferior_pid (this, pid);
c0694254
PA
4353 gdb_assert (inf != NULL);
4354 return inf->aspace;
4355}
4356
dc146f7c
VP
4357/* Return the cached value of the processor core for thread PTID. */
4358
f6ac5f3d
PA
4359int
4360linux_nat_target::core_of_thread (ptid_t ptid)
dc146f7c
VP
4361{
4362 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 4363
dc146f7c
VP
4364 if (info)
4365 return info->core;
4366 return -1;
4367}
4368
7a6a1731
GB
4369/* Implementation of to_filesystem_is_local. */
4370
57810aa7 4371bool
f6ac5f3d 4372linux_nat_target::filesystem_is_local ()
7a6a1731
GB
4373{
4374 struct inferior *inf = current_inferior ();
4375
4376 if (inf->fake_pid_p || inf->pid == 0)
57810aa7 4377 return true;
7a6a1731
GB
4378
4379 return linux_ns_same (inf->pid, LINUX_NS_MNT);
4380}
4381
4382/* Convert the INF argument passed to a to_fileio_* method
4383 to a process ID suitable for passing to its corresponding
4384 linux_mntns_* function. If INF is non-NULL then the
4385 caller is requesting the filesystem seen by INF. If INF
4386 is NULL then the caller is requesting the filesystem seen
4387 by the GDB. We fall back to GDB's filesystem in the case
4388 that INF is non-NULL but its PID is unknown. */
4389
4390static pid_t
4391linux_nat_fileio_pid_of (struct inferior *inf)
4392{
4393 if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4394 return getpid ();
4395 else
4396 return inf->pid;
4397}
4398
4399/* Implementation of to_fileio_open. */
4400
f6ac5f3d
PA
4401int
4402linux_nat_target::fileio_open (struct inferior *inf, const char *filename,
4403 int flags, int mode, int warn_if_slow,
b872057a 4404 fileio_error *target_errno)
7a6a1731
GB
4405{
4406 int nat_flags;
4407 mode_t nat_mode;
4408 int fd;
4409
4410 if (fileio_to_host_openflags (flags, &nat_flags) == -1
4411 || fileio_to_host_mode (mode, &nat_mode) == -1)
4412 {
4413 *target_errno = FILEIO_EINVAL;
4414 return -1;
4415 }
4416
4417 fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
4418 filename, nat_flags, nat_mode);
4419 if (fd == -1)
4420 *target_errno = host_to_fileio_error (errno);
4421
4422 return fd;
4423}
4424
4425/* Implementation of to_fileio_readlink. */
4426
f6ac5f3d
PA
4427gdb::optional<std::string>
4428linux_nat_target::fileio_readlink (struct inferior *inf, const char *filename,
b872057a 4429 fileio_error *target_errno)
7a6a1731
GB
4430{
4431 char buf[PATH_MAX];
4432 int len;
7a6a1731
GB
4433
4434 len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
4435 filename, buf, sizeof (buf));
4436 if (len < 0)
4437 {
4438 *target_errno = host_to_fileio_error (errno);
e0d3522b 4439 return {};
7a6a1731
GB
4440 }
4441
e0d3522b 4442 return std::string (buf, len);
7a6a1731
GB
4443}
4444
4445/* Implementation of to_fileio_unlink. */
4446
f6ac5f3d
PA
4447int
4448linux_nat_target::fileio_unlink (struct inferior *inf, const char *filename,
b872057a 4449 fileio_error *target_errno)
7a6a1731
GB
4450{
4451 int ret;
4452
4453 ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
4454 filename);
4455 if (ret == -1)
4456 *target_errno = host_to_fileio_error (errno);
4457
4458 return ret;
4459}
4460
aa01bd36
PA
4461/* Implementation of the to_thread_events method. */
4462
f6ac5f3d
PA
4463void
4464linux_nat_target::thread_events (int enable)
aa01bd36
PA
4465{
4466 report_thread_events = enable;
4467}
4468
f6ac5f3d
PA
4469linux_nat_target::linux_nat_target ()
4470{
f973ed9c
DJ
4471 /* We don't change the stratum; this target will sit at
4472 process_stratum and thread_db will set at thread_stratum. This
4473 is a little strange, since this is a multi-threaded-capable
4474 target, but we want to be on the stack below thread_db, and we
4475 also want to be used for single-threaded processes. */
f973ed9c
DJ
4476}
4477
f865ee35
JK
4478/* See linux-nat.h. */
4479
ef632b4b 4480bool
f865ee35 4481linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
9f0bdab8 4482{
0acd1110 4483 int pid = get_ptrace_pid (ptid);
7cc662bc 4484 return ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo) == 0;
9f0bdab8
DJ
4485}
4486
7b669087
GB
4487/* See nat/linux-nat.h. */
4488
4489ptid_t
4490current_lwp_ptid (void)
4491{
15a9e13e 4492 gdb_assert (inferior_ptid.lwp_p ());
7b669087
GB
4493 return inferior_ptid;
4494}
4495
6c265988 4496void _initialize_linux_nat ();
d6b0e80f 4497void
6c265988 4498_initialize_linux_nat ()
d6b0e80f 4499{
8864ef42 4500 add_setshow_boolean_cmd ("linux-nat", class_maintenance,
b6e52a0b
AB
4501 &debug_linux_nat, _("\
4502Set debugging of GNU/Linux native target."), _(" \
4503Show debugging of GNU/Linux native target."), _(" \
4504When on, print debug messages relating to the GNU/Linux native target."),
4505 nullptr,
4506 show_debug_linux_nat,
4507 &setdebuglist, &showdebuglist);
b84876c2 4508
7a6a1731
GB
4509 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
4510 &debug_linux_namespaces, _("\
4511Set debugging of GNU/Linux namespaces module."), _("\
4512Show debugging of GNU/Linux namespaces module."), _("\
4513Enables printf debugging output."),
4514 NULL,
4515 NULL,
4516 &setdebuglist, &showdebuglist);
4517
7feb7d06
PA
4518 /* Install a SIGCHLD handler. */
4519 sigchld_action.sa_handler = sigchld_handler;
4520 sigemptyset (&sigchld_action.sa_mask);
4521 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
4522
4523 /* Make it the default. */
7feb7d06 4524 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
4525
4526 /* Make sure we don't block SIGCHLD during a sigsuspend. */
21987b9c 4527 gdb_sigmask (SIG_SETMASK, NULL, &suspend_mask);
d6b0e80f
AC
4528 sigdelset (&suspend_mask, SIGCHLD);
4529
7feb7d06 4530 sigemptyset (&blocked_mask);
774113b0
PA
4531
4532 lwp_lwpid_htab_create ();
d6b0e80f
AC
4533}
4534\f
4535
4536/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4537 the GNU/Linux Threads library and therefore doesn't really belong
4538 here. */
4539
089436f7
TV
4540/* NPTL reserves the first two RT signals, but does not provide any
4541 way for the debugger to query the signal numbers - fortunately
4542 they don't change. */
4543static int lin_thread_signals[] = { __SIGRTMIN, __SIGRTMIN + 1 };
d6b0e80f 4544
089436f7
TV
4545/* See linux-nat.h. */
4546
4547unsigned int
4548lin_thread_get_thread_signal_num (void)
d6b0e80f 4549{
089436f7
TV
4550 return sizeof (lin_thread_signals) / sizeof (lin_thread_signals[0]);
4551}
d6b0e80f 4552
089436f7
TV
4553/* See linux-nat.h. */
4554
4555int
4556lin_thread_get_thread_signal (unsigned int i)
4557{
4558 gdb_assert (i < lin_thread_get_thread_signal_num ());
4559 return lin_thread_signals[i];
d6b0e80f 4560}