]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/linux-nat.c
Fix typo in windows-nat.c
[thirdparty/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
4a94e368 3 Copyright (C) 2001-2022 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
45741a9c 22#include "infrun.h"
3993f6b1 23#include "target.h"
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
268a13a5 26#include "gdbsupport/gdb_wait.h"
d6b0e80f
AC
27#include <unistd.h>
28#include <sys/syscall.h>
5826e159 29#include "nat/gdb_ptrace.h"
0274a8ce 30#include "linux-nat.h"
125f8a3d
GB
31#include "nat/linux-ptrace.h"
32#include "nat/linux-procfs.h"
8cc73a39 33#include "nat/linux-personality.h"
ac264b3b 34#include "linux-fork.h"
d6b0e80f
AC
35#include "gdbthread.h"
36#include "gdbcmd.h"
37#include "regcache.h"
4f844a66 38#include "regset.h"
dab06dbe 39#include "inf-child.h"
10d6c8cd
DJ
40#include "inf-ptrace.h"
41#include "auxv.h"
1777feb0 42#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
43#include "elf-bfd.h" /* for elfcore_write_* */
44#include "gregset.h" /* for gregset */
45#include "gdbcore.h" /* for get_exec_file */
46#include <ctype.h> /* for isdigit */
53ce3c39 47#include <sys/stat.h> /* for struct stat */
dba24537 48#include <fcntl.h> /* for O_RDONLY */
b84876c2 49#include "inf-loop.h"
400b5eca 50#include "gdbsupport/event-loop.h"
b84876c2 51#include "event-top.h"
07e059b5
VP
52#include <pwd.h>
53#include <sys/types.h>
2978b111 54#include <dirent.h>
07e059b5 55#include "xml-support.h"
efcbbd14 56#include <sys/vfs.h>
6c95b8df 57#include "solib.h"
125f8a3d 58#include "nat/linux-osdata.h"
6432734d 59#include "linux-tdep.h"
7dcd53a0 60#include "symfile.h"
268a13a5 61#include "gdbsupport/agent.h"
5808517f 62#include "tracepoint.h"
268a13a5 63#include "gdbsupport/buffer.h"
6ecd4729 64#include "target-descriptions.h"
268a13a5 65#include "gdbsupport/filestuff.h"
77e371c0 66#include "objfiles.h"
7a6a1731 67#include "nat/linux-namespaces.h"
b146ba14 68#include "gdbsupport/block-signals.h"
268a13a5
TT
69#include "gdbsupport/fileio.h"
70#include "gdbsupport/scope-exit.h"
21987b9c 71#include "gdbsupport/gdb-sigmask.h"
ba988419 72#include "gdbsupport/common-debug.h"
8a89ddbd 73#include <unordered_map>
efcbbd14 74
1777feb0 75/* This comment documents high-level logic of this file.
8a77dff3
VP
76
77Waiting for events in sync mode
78===============================
79
4a6ed09b
PA
80When waiting for an event in a specific thread, we just use waitpid,
81passing the specific pid, and not passing WNOHANG.
82
83When waiting for an event in all threads, waitpid is not quite good:
84
85- If the thread group leader exits while other threads in the thread
86 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
87 return an exit status until the other threads in the group are
88 reaped.
89
90- When a non-leader thread execs, that thread just vanishes without
91 reporting an exit (so we'd hang if we waited for it explicitly in
92 that case). The exec event is instead reported to the TGID pid.
93
94The solution is to always use -1 and WNOHANG, together with
95sigsuspend.
96
97First, we use non-blocking waitpid to check for events. If nothing is
98found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
99it means something happened to a child process. As soon as we know
100there's an event, we get back to calling nonblocking waitpid.
101
102Note that SIGCHLD should be blocked between waitpid and sigsuspend
103calls, so that we don't miss a signal. If SIGCHLD arrives in between,
104when it's blocked, the signal becomes pending and sigsuspend
105immediately notices it and returns.
106
107Waiting for events in async mode (TARGET_WNOHANG)
108=================================================
8a77dff3 109
7feb7d06
PA
110In async mode, GDB should always be ready to handle both user input
111and target events, so neither blocking waitpid nor sigsuspend are
112viable options. Instead, we should asynchronously notify the GDB main
113event loop whenever there's an unprocessed event from the target. We
114detect asynchronous target events by handling SIGCHLD signals. To
c150bdf0
JB
115notify the event loop about target events, an event pipe is used
116--- the pipe is registered as waitable event source in the event loop,
7feb7d06 117the event loop select/poll's on the read end of this pipe (as well on
c150bdf0
JB
118other event sources, e.g., stdin), and the SIGCHLD handler marks the
119event pipe to raise an event. This is more portable than relying on
7feb7d06
PA
120pselect/ppoll, since on kernels that lack those syscalls, libc
121emulates them with select/poll+sigprocmask, and that is racy
122(a.k.a. plain broken).
123
124Obviously, if we fail to notify the event loop if there's a target
125event, it's bad. OTOH, if we notify the event loop when there's no
126event from the target, linux_nat_wait will detect that there's no real
127event to report, and return event of type TARGET_WAITKIND_IGNORE.
128This is mostly harmless, but it will waste time and is better avoided.
129
130The main design point is that every time GDB is outside linux-nat.c,
131we have a SIGCHLD handler installed that is called when something
132happens to the target and notifies the GDB event loop. Whenever GDB
133core decides to handle the event, and calls into linux-nat.c, we
134process things as in sync mode, except that the we never block in
135sigsuspend.
136
137While processing an event, we may end up momentarily blocked in
138waitpid calls. Those waitpid calls, while blocking, are guarantied to
139return quickly. E.g., in all-stop mode, before reporting to the core
140that an LWP hit a breakpoint, all LWPs are stopped by sending them
141SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
142Note that this is different from blocking indefinitely waiting for the
143next event --- here, we're already handling an event.
8a77dff3
VP
144
145Use of signals
146==============
147
148We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
149signal is not entirely significant; we just need for a signal to be delivered,
150so that we can intercept it. SIGSTOP's advantage is that it can not be
151blocked. A disadvantage is that it is not a real-time signal, so it can only
152be queued once; we do not keep track of other sources of SIGSTOP.
153
154Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
155use them, because they have special behavior when the signal is generated -
156not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
157kills the entire thread group.
158
159A delivered SIGSTOP would stop the entire thread group, not just the thread we
160tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
161cancel it (by PTRACE_CONT without passing SIGSTOP).
162
163We could use a real-time signal instead. This would solve those problems; we
164could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
165But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
166generates it, and there are races with trying to find a signal that is not
4a6ed09b
PA
167blocked.
168
169Exec events
170===========
171
172The case of a thread group (process) with 3 or more threads, and a
173thread other than the leader execs is worth detailing:
174
175On an exec, the Linux kernel destroys all threads except the execing
176one in the thread group, and resets the execing thread's tid to the
177tgid. No exit notification is sent for the execing thread -- from the
178ptracer's perspective, it appears as though the execing thread just
179vanishes. Until we reap all other threads except the leader and the
180execing thread, the leader will be zombie, and the execing thread will
181be in `D (disc sleep)' state. As soon as all other threads are
182reaped, the execing thread changes its tid to the tgid, and the
183previous (zombie) leader vanishes, giving place to the "new"
184leader. */
a0ef4274 185
dba24537
AC
186#ifndef O_LARGEFILE
187#define O_LARGEFILE 0
188#endif
0274a8ce 189
f6ac5f3d
PA
190struct linux_nat_target *linux_target;
191
433bbbf8 192/* Does the current host support PTRACE_GETREGSET? */
0bdb2f78 193enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
433bbbf8 194
b6e52a0b
AB
195/* When true, print debug messages relating to the linux native target. */
196
197static bool debug_linux_nat;
198
8864ef42 199/* Implement 'show debug linux-nat'. */
b6e52a0b 200
920d2a44
AC
201static void
202show_debug_linux_nat (struct ui_file *file, int from_tty,
203 struct cmd_list_element *c, const char *value)
204{
6cb06a8c
TT
205 gdb_printf (file, _("Debugging of GNU/Linux native targets is %s.\n"),
206 value);
920d2a44 207}
d6b0e80f 208
17417fb0 209/* Print a linux-nat debug statement. */
9327494e
SM
210
211#define linux_nat_debug_printf(fmt, ...) \
74b773fc 212 debug_prefixed_printf_cond (debug_linux_nat, "linux-nat", fmt, ##__VA_ARGS__)
9327494e 213
b6e52a0b
AB
214/* Print "linux-nat" enter/exit debug statements. */
215
216#define LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT \
217 scoped_debug_enter_exit (debug_linux_nat, "linux-nat")
218
ae087d01
DJ
219struct simple_pid_list
220{
221 int pid;
3d799a95 222 int status;
ae087d01
DJ
223 struct simple_pid_list *next;
224};
05c309a8 225static struct simple_pid_list *stopped_pids;
ae087d01 226
aa01bd36
PA
227/* Whether target_thread_events is in effect. */
228static int report_thread_events;
229
7feb7d06
PA
230static int kill_lwp (int lwpid, int signo);
231
d3a70e03 232static int stop_callback (struct lwp_info *lp);
7feb7d06
PA
233
234static void block_child_signals (sigset_t *prev_mask);
235static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
236
237struct lwp_info;
238static struct lwp_info *add_lwp (ptid_t ptid);
239static void purge_lwp_list (int pid);
4403d8e9 240static void delete_lwp (ptid_t ptid);
2277426b
PA
241static struct lwp_info *find_lwp_pid (ptid_t ptid);
242
8a99810d
PA
243static int lwp_status_pending_p (struct lwp_info *lp);
244
e7ad2f14
PA
245static void save_stop_reason (struct lwp_info *lp);
246
8a89ddbd
PA
247static void close_proc_mem_file (pid_t pid);
248static void open_proc_mem_file (ptid_t ptid);
05c06f31 249
6cf20c46
PA
250/* Return TRUE if LWP is the leader thread of the process. */
251
252static bool
253is_leader (lwp_info *lp)
254{
255 return lp->ptid.pid () == lp->ptid.lwp ();
256}
257
cff068da
GB
258\f
259/* LWP accessors. */
260
261/* See nat/linux-nat.h. */
262
263ptid_t
264ptid_of_lwp (struct lwp_info *lwp)
265{
266 return lwp->ptid;
267}
268
269/* See nat/linux-nat.h. */
270
4b134ca1
GB
271void
272lwp_set_arch_private_info (struct lwp_info *lwp,
273 struct arch_lwp_info *info)
274{
275 lwp->arch_private = info;
276}
277
278/* See nat/linux-nat.h. */
279
280struct arch_lwp_info *
281lwp_arch_private_info (struct lwp_info *lwp)
282{
283 return lwp->arch_private;
284}
285
286/* See nat/linux-nat.h. */
287
cff068da
GB
288int
289lwp_is_stopped (struct lwp_info *lwp)
290{
291 return lwp->stopped;
292}
293
294/* See nat/linux-nat.h. */
295
296enum target_stop_reason
297lwp_stop_reason (struct lwp_info *lwp)
298{
299 return lwp->stop_reason;
300}
301
0e00e962
AA
302/* See nat/linux-nat.h. */
303
304int
305lwp_is_stepping (struct lwp_info *lwp)
306{
307 return lwp->step;
308}
309
ae087d01
DJ
310\f
311/* Trivial list manipulation functions to keep track of a list of
312 new stopped processes. */
313static void
3d799a95 314add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01 315{
8d749320 316 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
e0881a8e 317
ae087d01 318 new_pid->pid = pid;
3d799a95 319 new_pid->status = status;
ae087d01
DJ
320 new_pid->next = *listp;
321 *listp = new_pid;
322}
323
324static int
46a96992 325pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
326{
327 struct simple_pid_list **p;
328
329 for (p = listp; *p != NULL; p = &(*p)->next)
330 if ((*p)->pid == pid)
331 {
332 struct simple_pid_list *next = (*p)->next;
e0881a8e 333
46a96992 334 *statusp = (*p)->status;
ae087d01
DJ
335 xfree (*p);
336 *p = next;
337 return 1;
338 }
339 return 0;
340}
341
de0d863e
DB
342/* Return the ptrace options that we want to try to enable. */
343
344static int
345linux_nat_ptrace_options (int attached)
346{
347 int options = 0;
348
349 if (!attached)
350 options |= PTRACE_O_EXITKILL;
351
352 options |= (PTRACE_O_TRACESYSGOOD
353 | PTRACE_O_TRACEVFORKDONE
354 | PTRACE_O_TRACEVFORK
355 | PTRACE_O_TRACEFORK
356 | PTRACE_O_TRACEEXEC);
357
358 return options;
359}
360
1b919490
VB
361/* Initialize ptrace and procfs warnings and check for supported
362 ptrace features given PID.
beed38b8
JB
363
364 ATTACHED should be nonzero iff we attached to the inferior. */
3993f6b1
DJ
365
366static void
1b919490 367linux_init_ptrace_procfs (pid_t pid, int attached)
3993f6b1 368{
de0d863e
DB
369 int options = linux_nat_ptrace_options (attached);
370
371 linux_enable_event_reporting (pid, options);
96d7229d 372 linux_ptrace_init_warnings ();
1b919490 373 linux_proc_init_warnings ();
4de4c07c
DJ
374}
375
f6ac5f3d
PA
376linux_nat_target::~linux_nat_target ()
377{}
378
379void
380linux_nat_target::post_attach (int pid)
4de4c07c 381{
1b919490 382 linux_init_ptrace_procfs (pid, 1);
4de4c07c
DJ
383}
384
200fd287
AB
385/* Implement the virtual inf_ptrace_target::post_startup_inferior method. */
386
f6ac5f3d
PA
387void
388linux_nat_target::post_startup_inferior (ptid_t ptid)
4de4c07c 389{
1b919490 390 linux_init_ptrace_procfs (ptid.pid (), 0);
4de4c07c
DJ
391}
392
4403d8e9
JK
393/* Return the number of known LWPs in the tgid given by PID. */
394
395static int
396num_lwps (int pid)
397{
398 int count = 0;
4403d8e9 399
901b9821 400 for (const lwp_info *lp ATTRIBUTE_UNUSED : all_lwps ())
e99b03dc 401 if (lp->ptid.pid () == pid)
4403d8e9
JK
402 count++;
403
404 return count;
405}
406
169bb27b 407/* Deleter for lwp_info unique_ptr specialisation. */
4403d8e9 408
169bb27b 409struct lwp_deleter
4403d8e9 410{
169bb27b
AB
411 void operator() (struct lwp_info *lwp) const
412 {
413 delete_lwp (lwp->ptid);
414 }
415};
4403d8e9 416
169bb27b
AB
417/* A unique_ptr specialisation for lwp_info. */
418
419typedef std::unique_ptr<struct lwp_info, lwp_deleter> lwp_info_up;
4403d8e9 420
82d1f134 421/* Target hook for follow_fork. */
d83ad864 422
e97007b6 423void
82d1f134
SM
424linux_nat_target::follow_fork (inferior *child_inf, ptid_t child_ptid,
425 target_waitkind fork_kind, bool follow_child,
426 bool detach_fork)
3993f6b1 427{
82d1f134
SM
428 inf_ptrace_target::follow_fork (child_inf, child_ptid, fork_kind,
429 follow_child, detach_fork);
430
d83ad864 431 if (!follow_child)
4de4c07c 432 {
3a849a34
SM
433 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
434 ptid_t parent_ptid = inferior_ptid;
3a849a34
SM
435 int parent_pid = parent_ptid.lwp ();
436 int child_pid = child_ptid.lwp ();
4de4c07c 437
1777feb0 438 /* We're already attached to the parent, by default. */
3a849a34 439 lwp_info *child_lp = add_lwp (child_ptid);
d83ad864
DB
440 child_lp->stopped = 1;
441 child_lp->last_resume_kind = resume_stop;
4de4c07c 442
ac264b3b
MS
443 /* Detach new forked process? */
444 if (detach_fork)
f75c00e4 445 {
95347337
AB
446 int child_stop_signal = 0;
447 bool detach_child = true;
4403d8e9 448
169bb27b
AB
449 /* Move CHILD_LP into a unique_ptr and clear the source pointer
450 to prevent us doing anything stupid with it. */
451 lwp_info_up child_lp_ptr (child_lp);
452 child_lp = nullptr;
453
454 linux_target->low_prepare_to_resume (child_lp_ptr.get ());
c077881a
HZ
455
456 /* When debugging an inferior in an architecture that supports
457 hardware single stepping on a kernel without commit
458 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
459 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
460 set if the parent process had them set.
461 To work around this, single step the child process
462 once before detaching to clear the flags. */
463
2fd9d7ca
PA
464 /* Note that we consult the parent's architecture instead of
465 the child's because there's no inferior for the child at
466 this point. */
c077881a 467 if (!gdbarch_software_single_step_p (target_thread_architecture
2fd9d7ca 468 (parent_ptid)))
c077881a 469 {
95347337
AB
470 int status;
471
c077881a
HZ
472 linux_disable_event_reporting (child_pid);
473 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
474 perror_with_name (_("Couldn't do single step"));
475 if (my_waitpid (child_pid, &status, 0) < 0)
476 perror_with_name (_("Couldn't wait vfork process"));
95347337
AB
477 else
478 {
479 detach_child = WIFSTOPPED (status);
480 child_stop_signal = WSTOPSIG (status);
481 }
c077881a
HZ
482 }
483
95347337 484 if (detach_child)
9caaaa83 485 {
95347337 486 int signo = child_stop_signal;
9caaaa83 487
9caaaa83
PA
488 if (signo != 0
489 && !signal_pass_state (gdb_signal_from_host (signo)))
490 signo = 0;
491 ptrace (PTRACE_DETACH, child_pid, 0, signo);
8a89ddbd
PA
492
493 close_proc_mem_file (child_pid);
9caaaa83 494 }
ac264b3b 495 }
9016a515
DJ
496
497 if (has_vforked)
498 {
a2885186
SM
499 lwp_info *parent_lp = find_lwp_pid (parent_ptid);
500 linux_nat_debug_printf ("waiting for VFORK_DONE on %d", parent_pid);
501 parent_lp->stopped = 1;
6c95b8df 502
a2885186
SM
503 /* We'll handle the VFORK_DONE event like any other
504 event, in target_wait. */
9016a515 505 }
4de4c07c 506 }
3993f6b1 507 else
4de4c07c 508 {
3ced3da4 509 struct lwp_info *child_lp;
4de4c07c 510
82d1f134 511 child_lp = add_lwp (child_ptid);
3ced3da4 512 child_lp->stopped = 1;
25289eb2 513 child_lp->last_resume_kind = resume_stop;
4de4c07c 514 }
4de4c07c
DJ
515}
516
4de4c07c 517\f
f6ac5f3d
PA
518int
519linux_nat_target::insert_fork_catchpoint (int pid)
4de4c07c 520{
a2885186 521 return 0;
3993f6b1
DJ
522}
523
f6ac5f3d
PA
524int
525linux_nat_target::remove_fork_catchpoint (int pid)
eb73ad13
PA
526{
527 return 0;
528}
529
f6ac5f3d
PA
530int
531linux_nat_target::insert_vfork_catchpoint (int pid)
3993f6b1 532{
a2885186 533 return 0;
3993f6b1
DJ
534}
535
f6ac5f3d
PA
536int
537linux_nat_target::remove_vfork_catchpoint (int pid)
eb73ad13
PA
538{
539 return 0;
540}
541
f6ac5f3d
PA
542int
543linux_nat_target::insert_exec_catchpoint (int pid)
3993f6b1 544{
a2885186 545 return 0;
3993f6b1
DJ
546}
547
f6ac5f3d
PA
548int
549linux_nat_target::remove_exec_catchpoint (int pid)
eb73ad13
PA
550{
551 return 0;
552}
553
f6ac5f3d
PA
554int
555linux_nat_target::set_syscall_catchpoint (int pid, bool needed, int any_count,
556 gdb::array_view<const int> syscall_counts)
a96d9b2e 557{
a96d9b2e
SDJ
558 /* On GNU/Linux, we ignore the arguments. It means that we only
559 enable the syscall catchpoints, but do not disable them.
77b06cd7 560
649a140c 561 Also, we do not use the `syscall_counts' information because we do not
a96d9b2e
SDJ
562 filter system calls here. We let GDB do the logic for us. */
563 return 0;
564}
565
774113b0
PA
566/* List of known LWPs, keyed by LWP PID. This speeds up the common
567 case of mapping a PID returned from the kernel to our corresponding
568 lwp_info data structure. */
569static htab_t lwp_lwpid_htab;
570
571/* Calculate a hash from a lwp_info's LWP PID. */
572
573static hashval_t
574lwp_info_hash (const void *ap)
575{
576 const struct lwp_info *lp = (struct lwp_info *) ap;
e38504b3 577 pid_t pid = lp->ptid.lwp ();
774113b0
PA
578
579 return iterative_hash_object (pid, 0);
580}
581
582/* Equality function for the lwp_info hash table. Compares the LWP's
583 PID. */
584
585static int
586lwp_lwpid_htab_eq (const void *a, const void *b)
587{
588 const struct lwp_info *entry = (const struct lwp_info *) a;
589 const struct lwp_info *element = (const struct lwp_info *) b;
590
e38504b3 591 return entry->ptid.lwp () == element->ptid.lwp ();
774113b0
PA
592}
593
594/* Create the lwp_lwpid_htab hash table. */
595
596static void
597lwp_lwpid_htab_create (void)
598{
599 lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL);
600}
601
602/* Add LP to the hash table. */
603
604static void
605lwp_lwpid_htab_add_lwp (struct lwp_info *lp)
606{
607 void **slot;
608
609 slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT);
610 gdb_assert (slot != NULL && *slot == NULL);
611 *slot = lp;
612}
613
614/* Head of doubly-linked list of known LWPs. Sorted by reverse
615 creation order. This order is assumed in some cases. E.g.,
616 reaping status after killing alls lwps of a process: the leader LWP
617 must be reaped last. */
901b9821
SM
618
619static intrusive_list<lwp_info> lwp_list;
620
621/* See linux-nat.h. */
622
623lwp_info_range
624all_lwps ()
625{
626 return lwp_info_range (lwp_list.begin ());
627}
628
629/* See linux-nat.h. */
630
631lwp_info_safe_range
632all_lwps_safe ()
633{
634 return lwp_info_safe_range (lwp_list.begin ());
635}
774113b0
PA
636
637/* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
638
639static void
640lwp_list_add (struct lwp_info *lp)
641{
901b9821 642 lwp_list.push_front (*lp);
774113b0
PA
643}
644
645/* Remove LP from sorted-by-reverse-creation-order doubly-linked
646 list. */
647
648static void
649lwp_list_remove (struct lwp_info *lp)
650{
651 /* Remove from sorted-by-creation-order list. */
901b9821 652 lwp_list.erase (lwp_list.iterator_to (*lp));
774113b0
PA
653}
654
d6b0e80f
AC
655\f
656
d6b0e80f
AC
657/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
658 _initialize_linux_nat. */
659static sigset_t suspend_mask;
660
7feb7d06
PA
661/* Signals to block to make that sigsuspend work. */
662static sigset_t blocked_mask;
663
664/* SIGCHLD action. */
6bd434d6 665static struct sigaction sigchld_action;
b84876c2 666
7feb7d06
PA
667/* Block child signals (SIGCHLD and linux threads signals), and store
668 the previous mask in PREV_MASK. */
84e46146 669
7feb7d06
PA
670static void
671block_child_signals (sigset_t *prev_mask)
672{
673 /* Make sure SIGCHLD is blocked. */
674 if (!sigismember (&blocked_mask, SIGCHLD))
675 sigaddset (&blocked_mask, SIGCHLD);
676
21987b9c 677 gdb_sigmask (SIG_BLOCK, &blocked_mask, prev_mask);
7feb7d06
PA
678}
679
680/* Restore child signals mask, previously returned by
681 block_child_signals. */
682
683static void
684restore_child_signals_mask (sigset_t *prev_mask)
685{
21987b9c 686 gdb_sigmask (SIG_SETMASK, prev_mask, NULL);
7feb7d06 687}
2455069d
UW
688
689/* Mask of signals to pass directly to the inferior. */
690static sigset_t pass_mask;
691
692/* Update signals to pass to the inferior. */
f6ac5f3d 693void
adc6a863
PA
694linux_nat_target::pass_signals
695 (gdb::array_view<const unsigned char> pass_signals)
2455069d
UW
696{
697 int signo;
698
699 sigemptyset (&pass_mask);
700
701 for (signo = 1; signo < NSIG; signo++)
702 {
2ea28649 703 int target_signo = gdb_signal_from_host (signo);
adc6a863 704 if (target_signo < pass_signals.size () && pass_signals[target_signo])
dda83cd7 705 sigaddset (&pass_mask, signo);
2455069d
UW
706 }
707}
708
d6b0e80f
AC
709\f
710
711/* Prototypes for local functions. */
d3a70e03
TT
712static int stop_wait_callback (struct lwp_info *lp);
713static int resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid);
ced2dffb 714static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
710151dd 715
d6b0e80f 716\f
d6b0e80f 717
7b50312a
PA
718/* Destroy and free LP. */
719
676362df 720lwp_info::~lwp_info ()
7b50312a 721{
466eecee 722 /* Let the arch specific bits release arch_lwp_info. */
676362df 723 linux_target->low_delete_thread (this->arch_private);
7b50312a
PA
724}
725
774113b0 726/* Traversal function for purge_lwp_list. */
d90e17a7 727
774113b0
PA
728static int
729lwp_lwpid_htab_remove_pid (void **slot, void *info)
d90e17a7 730{
774113b0
PA
731 struct lwp_info *lp = (struct lwp_info *) *slot;
732 int pid = *(int *) info;
d90e17a7 733
e99b03dc 734 if (lp->ptid.pid () == pid)
d90e17a7 735 {
774113b0
PA
736 htab_clear_slot (lwp_lwpid_htab, slot);
737 lwp_list_remove (lp);
676362df 738 delete lp;
774113b0 739 }
d90e17a7 740
774113b0
PA
741 return 1;
742}
d90e17a7 743
774113b0
PA
744/* Remove all LWPs belong to PID from the lwp list. */
745
746static void
747purge_lwp_list (int pid)
748{
749 htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid);
d90e17a7
PA
750}
751
26cb8b7c
PA
752/* Add the LWP specified by PTID to the list. PTID is the first LWP
753 in the process. Return a pointer to the structure describing the
754 new LWP.
755
756 This differs from add_lwp in that we don't let the arch specific
757 bits know about this new thread. Current clients of this callback
758 take the opportunity to install watchpoints in the new thread, and
759 we shouldn't do that for the first thread. If we're spawning a
760 child ("run"), the thread executes the shell wrapper first, and we
761 shouldn't touch it until it execs the program we want to debug.
762 For "attach", it'd be okay to call the callback, but it's not
763 necessary, because watchpoints can't yet have been inserted into
764 the inferior. */
d6b0e80f
AC
765
766static struct lwp_info *
26cb8b7c 767add_initial_lwp (ptid_t ptid)
d6b0e80f 768{
15a9e13e 769 gdb_assert (ptid.lwp_p ());
d6b0e80f 770
b0f6c8d2 771 lwp_info *lp = new lwp_info (ptid);
d6b0e80f 772
d6b0e80f 773
774113b0
PA
774 /* Add to sorted-by-reverse-creation-order list. */
775 lwp_list_add (lp);
776
777 /* Add to keyed-by-pid htab. */
778 lwp_lwpid_htab_add_lwp (lp);
d6b0e80f 779
26cb8b7c
PA
780 return lp;
781}
782
783/* Add the LWP specified by PID to the list. Return a pointer to the
784 structure describing the new LWP. The LWP should already be
785 stopped. */
786
787static struct lwp_info *
788add_lwp (ptid_t ptid)
789{
790 struct lwp_info *lp;
791
792 lp = add_initial_lwp (ptid);
793
6e012a6c
PA
794 /* Let the arch specific bits know about this new thread. Current
795 clients of this callback take the opportunity to install
26cb8b7c
PA
796 watchpoints in the new thread. We don't do this for the first
797 thread though. See add_initial_lwp. */
135340af 798 linux_target->low_new_thread (lp);
9f0bdab8 799
d6b0e80f
AC
800 return lp;
801}
802
803/* Remove the LWP specified by PID from the list. */
804
805static void
806delete_lwp (ptid_t ptid)
807{
b0f6c8d2 808 lwp_info dummy (ptid);
d6b0e80f 809
b0f6c8d2 810 void **slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
774113b0
PA
811 if (slot == NULL)
812 return;
d6b0e80f 813
b0f6c8d2 814 lwp_info *lp = *(struct lwp_info **) slot;
774113b0 815 gdb_assert (lp != NULL);
d6b0e80f 816
774113b0 817 htab_clear_slot (lwp_lwpid_htab, slot);
d6b0e80f 818
774113b0
PA
819 /* Remove from sorted-by-creation-order list. */
820 lwp_list_remove (lp);
d6b0e80f 821
774113b0 822 /* Release. */
676362df 823 delete lp;
d6b0e80f
AC
824}
825
826/* Return a pointer to the structure describing the LWP corresponding
827 to PID. If no corresponding LWP could be found, return NULL. */
828
829static struct lwp_info *
830find_lwp_pid (ptid_t ptid)
831{
d6b0e80f
AC
832 int lwp;
833
15a9e13e 834 if (ptid.lwp_p ())
e38504b3 835 lwp = ptid.lwp ();
d6b0e80f 836 else
e99b03dc 837 lwp = ptid.pid ();
d6b0e80f 838
b0f6c8d2
SM
839 lwp_info dummy (ptid_t (0, lwp));
840 return (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
d6b0e80f
AC
841}
842
6d4ee8c6 843/* See nat/linux-nat.h. */
d6b0e80f
AC
844
845struct lwp_info *
d90e17a7 846iterate_over_lwps (ptid_t filter,
d3a70e03 847 gdb::function_view<iterate_over_lwps_ftype> callback)
d6b0e80f 848{
901b9821 849 for (lwp_info *lp : all_lwps_safe ())
d6b0e80f 850 {
26a57c92 851 if (lp->ptid.matches (filter))
d90e17a7 852 {
d3a70e03 853 if (callback (lp) != 0)
d90e17a7
PA
854 return lp;
855 }
d6b0e80f
AC
856 }
857
858 return NULL;
859}
860
2277426b
PA
861/* Update our internal state when changing from one checkpoint to
862 another indicated by NEW_PTID. We can only switch single-threaded
863 applications, so we only create one new LWP, and the previous list
864 is discarded. */
f973ed9c
DJ
865
866void
867linux_nat_switch_fork (ptid_t new_ptid)
868{
869 struct lwp_info *lp;
870
e99b03dc 871 purge_lwp_list (inferior_ptid.pid ());
2277426b 872
f973ed9c
DJ
873 lp = add_lwp (new_ptid);
874 lp->stopped = 1;
e26af52f 875
2277426b
PA
876 /* This changes the thread's ptid while preserving the gdb thread
877 num. Also changes the inferior pid, while preserving the
878 inferior num. */
5b6d1e4f 879 thread_change_ptid (linux_target, inferior_ptid, new_ptid);
2277426b
PA
880
881 /* We've just told GDB core that the thread changed target id, but,
882 in fact, it really is a different thread, with different register
883 contents. */
884 registers_changed ();
e26af52f
DJ
885}
886
e26af52f
DJ
887/* Handle the exit of a single thread LP. */
888
889static void
890exit_lwp (struct lwp_info *lp)
891{
5b6d1e4f 892 struct thread_info *th = find_thread_ptid (linux_target, lp->ptid);
063bfe2e
VP
893
894 if (th)
e26af52f 895 {
17faa917 896 if (print_thread_events)
6cb06a8c
TT
897 gdb_printf (_("[%s exited]\n"),
898 target_pid_to_str (lp->ptid).c_str ());
17faa917 899
00431a78 900 delete_thread (th);
e26af52f
DJ
901 }
902
903 delete_lwp (lp->ptid);
904}
905
a0ef4274
DJ
906/* Wait for the LWP specified by LP, which we have just attached to.
907 Returns a wait status for that LWP, to cache. */
908
909static int
22827c51 910linux_nat_post_attach_wait (ptid_t ptid, int *signalled)
a0ef4274 911{
e38504b3 912 pid_t new_pid, pid = ptid.lwp ();
a0ef4274
DJ
913 int status;
914
644cebc9 915 if (linux_proc_pid_is_stopped (pid))
a0ef4274 916 {
9327494e 917 linux_nat_debug_printf ("Attaching to a stopped process");
a0ef4274
DJ
918
919 /* The process is definitely stopped. It is in a job control
920 stop, unless the kernel predates the TASK_STOPPED /
921 TASK_TRACED distinction, in which case it might be in a
922 ptrace stop. Make sure it is in a ptrace stop; from there we
923 can kill it, signal it, et cetera.
924
dda83cd7 925 First make sure there is a pending SIGSTOP. Since we are
a0ef4274
DJ
926 already attached, the process can not transition from stopped
927 to running without a PTRACE_CONT; so we know this signal will
928 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
929 probably already in the queue (unless this kernel is old
930 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
931 is not an RT signal, it can only be queued once. */
932 kill_lwp (pid, SIGSTOP);
933
934 /* Finally, resume the stopped process. This will deliver the SIGSTOP
935 (or a higher priority signal, just like normal PTRACE_ATTACH). */
936 ptrace (PTRACE_CONT, pid, 0, 0);
937 }
938
939 /* Make sure the initial process is stopped. The user-level threads
940 layer might want to poke around in the inferior, and that won't
941 work if things haven't stabilized yet. */
4a6ed09b 942 new_pid = my_waitpid (pid, &status, __WALL);
dacc9cb2
PP
943 gdb_assert (pid == new_pid);
944
945 if (!WIFSTOPPED (status))
946 {
947 /* The pid we tried to attach has apparently just exited. */
9327494e 948 linux_nat_debug_printf ("Failed to stop %d: %s", pid,
8d06918f 949 status_to_str (status).c_str ());
dacc9cb2
PP
950 return status;
951 }
a0ef4274
DJ
952
953 if (WSTOPSIG (status) != SIGSTOP)
954 {
955 *signalled = 1;
9327494e 956 linux_nat_debug_printf ("Received %s after attaching",
8d06918f 957 status_to_str (status).c_str ());
a0ef4274
DJ
958 }
959
960 return status;
961}
962
f6ac5f3d
PA
963void
964linux_nat_target::create_inferior (const char *exec_file,
965 const std::string &allargs,
966 char **env, int from_tty)
b84876c2 967{
41272101
TT
968 maybe_disable_address_space_randomization restore_personality
969 (disable_randomization);
b84876c2
PA
970
971 /* The fork_child mechanism is synchronous and calls target_wait, so
972 we have to mask the async mode. */
973
2455069d 974 /* Make sure we report all signals during startup. */
adc6a863 975 pass_signals ({});
2455069d 976
f6ac5f3d 977 inf_ptrace_target::create_inferior (exec_file, allargs, env, from_tty);
8a89ddbd
PA
978
979 open_proc_mem_file (inferior_ptid);
b84876c2
PA
980}
981
8784d563
PA
982/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
983 already attached. Returns true if a new LWP is found, false
984 otherwise. */
985
986static int
987attach_proc_task_lwp_callback (ptid_t ptid)
988{
989 struct lwp_info *lp;
990
991 /* Ignore LWPs we're already attached to. */
992 lp = find_lwp_pid (ptid);
993 if (lp == NULL)
994 {
e38504b3 995 int lwpid = ptid.lwp ();
8784d563
PA
996
997 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
998 {
999 int err = errno;
1000
1001 /* Be quiet if we simply raced with the thread exiting.
1002 EPERM is returned if the thread's task still exists, and
1003 is marked as exited or zombie, as well as other
1004 conditions, so in that case, confirm the status in
1005 /proc/PID/status. */
1006 if (err == ESRCH
1007 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1008 {
9327494e
SM
1009 linux_nat_debug_printf
1010 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1011 lwpid, err, safe_strerror (err));
1012
8784d563
PA
1013 }
1014 else
1015 {
4d9b86e1 1016 std::string reason
50fa3001 1017 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1 1018
f71f0b0d 1019 warning (_("Cannot attach to lwp %d: %s"),
4d9b86e1 1020 lwpid, reason.c_str ());
8784d563
PA
1021 }
1022 }
1023 else
1024 {
9327494e 1025 linux_nat_debug_printf ("PTRACE_ATTACH %s, 0, 0 (OK)",
e53c95d4 1026 ptid.to_string ().c_str ());
8784d563
PA
1027
1028 lp = add_lwp (ptid);
8784d563
PA
1029
1030 /* The next time we wait for this LWP we'll see a SIGSTOP as
1031 PTRACE_ATTACH brings it to a halt. */
1032 lp->signalled = 1;
1033
1034 /* We need to wait for a stop before being able to make the
1035 next ptrace call on this LWP. */
1036 lp->must_set_ptrace_flags = 1;
026a9174
PA
1037
1038 /* So that wait collects the SIGSTOP. */
1039 lp->resumed = 1;
1040
1041 /* Also add the LWP to gdb's thread list, in case a
1042 matching libthread_db is not found (or the process uses
1043 raw clone). */
5b6d1e4f 1044 add_thread (linux_target, lp->ptid);
719546c4
SM
1045 set_running (linux_target, lp->ptid, true);
1046 set_executing (linux_target, lp->ptid, true);
8784d563
PA
1047 }
1048
1049 return 1;
1050 }
1051 return 0;
1052}
1053
f6ac5f3d
PA
1054void
1055linux_nat_target::attach (const char *args, int from_tty)
d6b0e80f
AC
1056{
1057 struct lwp_info *lp;
d6b0e80f 1058 int status;
af990527 1059 ptid_t ptid;
d6b0e80f 1060
2455069d 1061 /* Make sure we report all signals during attach. */
adc6a863 1062 pass_signals ({});
2455069d 1063
a70b8144 1064 try
87b0bb13 1065 {
f6ac5f3d 1066 inf_ptrace_target::attach (args, from_tty);
87b0bb13 1067 }
230d2906 1068 catch (const gdb_exception_error &ex)
87b0bb13
JK
1069 {
1070 pid_t pid = parse_pid_to_attach (args);
50fa3001 1071 std::string reason = linux_ptrace_attach_fail_reason (pid);
87b0bb13 1072
4d9b86e1 1073 if (!reason.empty ())
3d6e9d23
TT
1074 throw_error (ex.error, "warning: %s\n%s", reason.c_str (),
1075 ex.what ());
7ae1a6a6 1076 else
3d6e9d23 1077 throw_error (ex.error, "%s", ex.what ());
87b0bb13 1078 }
d6b0e80f 1079
af990527
PA
1080 /* The ptrace base target adds the main thread with (pid,0,0)
1081 format. Decorate it with lwp info. */
e99b03dc 1082 ptid = ptid_t (inferior_ptid.pid (),
184ea2f7 1083 inferior_ptid.pid ());
5b6d1e4f 1084 thread_change_ptid (linux_target, inferior_ptid, ptid);
af990527 1085
9f0bdab8 1086 /* Add the initial process as the first LWP to the list. */
26cb8b7c 1087 lp = add_initial_lwp (ptid);
a0ef4274 1088
22827c51 1089 status = linux_nat_post_attach_wait (lp->ptid, &lp->signalled);
dacc9cb2
PP
1090 if (!WIFSTOPPED (status))
1091 {
1092 if (WIFEXITED (status))
1093 {
1094 int exit_code = WEXITSTATUS (status);
1095
223ffa71 1096 target_terminal::ours ();
bc1e6c81 1097 target_mourn_inferior (inferior_ptid);
dacc9cb2
PP
1098 if (exit_code == 0)
1099 error (_("Unable to attach: program exited normally."));
1100 else
1101 error (_("Unable to attach: program exited with code %d."),
1102 exit_code);
1103 }
1104 else if (WIFSIGNALED (status))
1105 {
2ea28649 1106 enum gdb_signal signo;
dacc9cb2 1107
223ffa71 1108 target_terminal::ours ();
bc1e6c81 1109 target_mourn_inferior (inferior_ptid);
dacc9cb2 1110
2ea28649 1111 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1112 error (_("Unable to attach: program terminated with signal "
1113 "%s, %s."),
2ea28649
PA
1114 gdb_signal_to_name (signo),
1115 gdb_signal_to_string (signo));
dacc9cb2
PP
1116 }
1117
1118 internal_error (__FILE__, __LINE__,
1119 _("unexpected status %d for PID %ld"),
e38504b3 1120 status, (long) ptid.lwp ());
dacc9cb2
PP
1121 }
1122
a0ef4274 1123 lp->stopped = 1;
9f0bdab8 1124
8a89ddbd
PA
1125 open_proc_mem_file (lp->ptid);
1126
a0ef4274 1127 /* Save the wait status to report later. */
d6b0e80f 1128 lp->resumed = 1;
9327494e 1129 linux_nat_debug_printf ("waitpid %ld, saving status %s",
8d06918f
SM
1130 (long) lp->ptid.pid (),
1131 status_to_str (status).c_str ());
710151dd 1132
7feb7d06
PA
1133 lp->status = status;
1134
8784d563
PA
1135 /* We must attach to every LWP. If /proc is mounted, use that to
1136 find them now. The inferior may be using raw clone instead of
1137 using pthreads. But even if it is using pthreads, thread_db
1138 walks structures in the inferior's address space to find the list
1139 of threads/LWPs, and those structures may well be corrupted.
1140 Note that once thread_db is loaded, we'll still use it to list
1141 threads and associate pthread info with each LWP. */
e99b03dc 1142 linux_proc_attach_tgid_threads (lp->ptid.pid (),
8784d563 1143 attach_proc_task_lwp_callback);
d6b0e80f
AC
1144}
1145
4a3ee32a
SM
1146/* Ptrace-detach the thread with pid PID. */
1147
1148static void
1149detach_one_pid (int pid, int signo)
1150{
1151 if (ptrace (PTRACE_DETACH, pid, 0, signo) < 0)
1152 {
1153 int save_errno = errno;
1154
1155 /* We know the thread exists, so ESRCH must mean the lwp is
1156 zombie. This can happen if one of the already-detached
1157 threads exits the whole thread group. In that case we're
1158 still attached, and must reap the lwp. */
1159 if (save_errno == ESRCH)
1160 {
1161 int ret, status;
1162
1163 ret = my_waitpid (pid, &status, __WALL);
1164 if (ret == -1)
1165 {
1166 warning (_("Couldn't reap LWP %d while detaching: %s"),
1167 pid, safe_strerror (errno));
1168 }
1169 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1170 {
1171 warning (_("Reaping LWP %d while detaching "
1172 "returned unexpected status 0x%x"),
1173 pid, status);
1174 }
1175 }
1176 else
1177 error (_("Can't detach %d: %s"),
1178 pid, safe_strerror (save_errno));
1179 }
1180 else
1181 linux_nat_debug_printf ("PTRACE_DETACH (%d, %s, 0) (OK)",
1182 pid, strsignal (signo));
1183}
1184
ced2dffb
PA
1185/* Get pending signal of THREAD as a host signal number, for detaching
1186 purposes. This is the signal the thread last stopped for, which we
1187 need to deliver to the thread when detaching, otherwise, it'd be
1188 suppressed/lost. */
1189
a0ef4274 1190static int
ced2dffb 1191get_detach_signal (struct lwp_info *lp)
a0ef4274 1192{
a493e3e2 1193 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1194
1195 /* If we paused threads momentarily, we may have stored pending
1196 events in lp->status or lp->waitstatus (see stop_wait_callback),
1197 and GDB core hasn't seen any signal for those threads.
1198 Otherwise, the last signal reported to the core is found in the
1199 thread object's stop_signal.
1200
1201 There's a corner case that isn't handled here at present. Only
1202 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1203 stop_signal make sense as a real signal to pass to the inferior.
1204 Some catchpoint related events, like
1205 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1206 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1207 those traps are debug API (ptrace in our case) related and
1208 induced; the inferior wouldn't see them if it wasn't being
1209 traced. Hence, we should never pass them to the inferior, even
1210 when set to pass state. Since this corner case isn't handled by
1211 infrun.c when proceeding with a signal, for consistency, neither
1212 do we handle it here (or elsewhere in the file we check for
1213 signal pass state). Normally SIGTRAP isn't set to pass state, so
1214 this is really a corner case. */
1215
183be222 1216 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
a493e3e2 1217 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1218 else if (lp->status)
2ea28649 1219 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
00431a78 1220 else
ca2163eb 1221 {
5b6d1e4f 1222 struct thread_info *tp = find_thread_ptid (linux_target, lp->ptid);
e0881a8e 1223
611841bb 1224 if (target_is_non_stop_p () && !tp->executing ())
ca2163eb 1225 {
1edb66d8 1226 if (tp->has_pending_waitstatus ())
df5ad102
SM
1227 {
1228 /* If the thread has a pending event, and it was stopped with a
1229 signal, use that signal to resume it. If it has a pending
1230 event of another kind, it was not stopped with a signal, so
1231 resume it without a signal. */
1232 if (tp->pending_waitstatus ().kind () == TARGET_WAITKIND_STOPPED)
1233 signo = tp->pending_waitstatus ().sig ();
1234 else
1235 signo = GDB_SIGNAL_0;
1236 }
00431a78 1237 else
1edb66d8 1238 signo = tp->stop_signal ();
00431a78
PA
1239 }
1240 else if (!target_is_non_stop_p ())
1241 {
00431a78 1242 ptid_t last_ptid;
5b6d1e4f 1243 process_stratum_target *last_target;
00431a78 1244
5b6d1e4f 1245 get_last_target_status (&last_target, &last_ptid, nullptr);
e0881a8e 1246
5b6d1e4f
PA
1247 if (last_target == linux_target
1248 && lp->ptid.lwp () == last_ptid.lwp ())
1edb66d8 1249 signo = tp->stop_signal ();
4c28f408 1250 }
ca2163eb 1251 }
4c28f408 1252
a493e3e2 1253 if (signo == GDB_SIGNAL_0)
ca2163eb 1254 {
9327494e 1255 linux_nat_debug_printf ("lwp %s has no pending signal",
e53c95d4 1256 lp->ptid.to_string ().c_str ());
ca2163eb
PA
1257 }
1258 else if (!signal_pass_state (signo))
1259 {
9327494e
SM
1260 linux_nat_debug_printf
1261 ("lwp %s had signal %s but it is in no pass state",
e53c95d4 1262 lp->ptid.to_string ().c_str (), gdb_signal_to_string (signo));
a0ef4274 1263 }
a0ef4274 1264 else
4c28f408 1265 {
9327494e 1266 linux_nat_debug_printf ("lwp %s has pending signal %s",
e53c95d4 1267 lp->ptid.to_string ().c_str (),
9327494e 1268 gdb_signal_to_string (signo));
ced2dffb
PA
1269
1270 return gdb_signal_to_host (signo);
4c28f408 1271 }
a0ef4274
DJ
1272
1273 return 0;
1274}
1275
ced2dffb
PA
1276/* Detach from LP. If SIGNO_P is non-NULL, then it points to the
1277 signal number that should be passed to the LWP when detaching.
1278 Otherwise pass any pending signal the LWP may have, if any. */
1279
1280static void
1281detach_one_lwp (struct lwp_info *lp, int *signo_p)
d6b0e80f 1282{
e38504b3 1283 int lwpid = lp->ptid.lwp ();
ced2dffb
PA
1284 int signo;
1285
d6b0e80f
AC
1286 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1287
df5ad102
SM
1288 /* If the lwp/thread we are about to detach has a pending fork event,
1289 there is a process GDB is attached to that the core of GDB doesn't know
1290 about. Detach from it. */
1291
1292 /* Check in lwp_info::status. */
1293 if (WIFSTOPPED (lp->status) && linux_is_extended_waitstatus (lp->status))
1294 {
1295 int event = linux_ptrace_get_extended_event (lp->status);
1296
1297 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1298 {
1299 unsigned long child_pid;
1300 int ret = ptrace (PTRACE_GETEVENTMSG, lp->ptid.lwp (), 0, &child_pid);
1301 if (ret == 0)
1302 detach_one_pid (child_pid, 0);
1303 else
1304 perror_warning_with_name (_("Failed to detach fork child"));
1305 }
1306 }
1307
1308 /* Check in lwp_info::waitstatus. */
1309 if (lp->waitstatus.kind () == TARGET_WAITKIND_VFORKED
1310 || lp->waitstatus.kind () == TARGET_WAITKIND_FORKED)
1311 detach_one_pid (lp->waitstatus.child_ptid ().pid (), 0);
1312
1313
1314 /* Check in thread_info::pending_waitstatus. */
1315 thread_info *tp = find_thread_ptid (linux_target, lp->ptid);
1316 if (tp->has_pending_waitstatus ())
1317 {
1318 const target_waitstatus &ws = tp->pending_waitstatus ();
1319
1320 if (ws.kind () == TARGET_WAITKIND_VFORKED
1321 || ws.kind () == TARGET_WAITKIND_FORKED)
1322 detach_one_pid (ws.child_ptid ().pid (), 0);
1323 }
1324
1325 /* Check in thread_info::pending_follow. */
1326 if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED
1327 || tp->pending_follow.kind () == TARGET_WAITKIND_FORKED)
1328 detach_one_pid (tp->pending_follow.child_ptid ().pid (), 0);
1329
9327494e
SM
1330 if (lp->status != 0)
1331 linux_nat_debug_printf ("Pending %s for %s on detach.",
1332 strsignal (WSTOPSIG (lp->status)),
e53c95d4 1333 lp->ptid.to_string ().c_str ());
d6b0e80f 1334
a0ef4274
DJ
1335 /* If there is a pending SIGSTOP, get rid of it. */
1336 if (lp->signalled)
d6b0e80f 1337 {
9327494e 1338 linux_nat_debug_printf ("Sending SIGCONT to %s",
e53c95d4 1339 lp->ptid.to_string ().c_str ());
d6b0e80f 1340
ced2dffb 1341 kill_lwp (lwpid, SIGCONT);
d6b0e80f 1342 lp->signalled = 0;
d6b0e80f
AC
1343 }
1344
ced2dffb 1345 if (signo_p == NULL)
d6b0e80f 1346 {
a0ef4274 1347 /* Pass on any pending signal for this LWP. */
ced2dffb
PA
1348 signo = get_detach_signal (lp);
1349 }
1350 else
1351 signo = *signo_p;
a0ef4274 1352
ced2dffb
PA
1353 /* Preparing to resume may try to write registers, and fail if the
1354 lwp is zombie. If that happens, ignore the error. We'll handle
1355 it below, when detach fails with ESRCH. */
a70b8144 1356 try
ced2dffb 1357 {
135340af 1358 linux_target->low_prepare_to_resume (lp);
ced2dffb 1359 }
230d2906 1360 catch (const gdb_exception_error &ex)
ced2dffb
PA
1361 {
1362 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 1363 throw;
ced2dffb 1364 }
d6b0e80f 1365
4a3ee32a 1366 detach_one_pid (lwpid, signo);
ced2dffb
PA
1367
1368 delete_lwp (lp->ptid);
1369}
d6b0e80f 1370
ced2dffb 1371static int
d3a70e03 1372detach_callback (struct lwp_info *lp)
ced2dffb
PA
1373{
1374 /* We don't actually detach from the thread group leader just yet.
1375 If the thread group exits, we must reap the zombie clone lwps
1376 before we're able to reap the leader. */
e38504b3 1377 if (lp->ptid.lwp () != lp->ptid.pid ())
ced2dffb 1378 detach_one_lwp (lp, NULL);
d6b0e80f
AC
1379 return 0;
1380}
1381
f6ac5f3d
PA
1382void
1383linux_nat_target::detach (inferior *inf, int from_tty)
d6b0e80f 1384{
d90e17a7 1385 struct lwp_info *main_lwp;
bc09b0c1 1386 int pid = inf->pid;
a0ef4274 1387
ae5e0686
MK
1388 /* Don't unregister from the event loop, as there may be other
1389 inferiors running. */
b84876c2 1390
4c28f408 1391 /* Stop all threads before detaching. ptrace requires that the
30baf67b 1392 thread is stopped to successfully detach. */
d3a70e03 1393 iterate_over_lwps (ptid_t (pid), stop_callback);
4c28f408
PA
1394 /* ... and wait until all of them have reported back that
1395 they're no longer running. */
d3a70e03 1396 iterate_over_lwps (ptid_t (pid), stop_wait_callback);
4c28f408 1397
e87f0fe8
PA
1398 /* We can now safely remove breakpoints. We don't this in earlier
1399 in common code because this target doesn't currently support
1400 writing memory while the inferior is running. */
1401 remove_breakpoints_inf (current_inferior ());
1402
d3a70e03 1403 iterate_over_lwps (ptid_t (pid), detach_callback);
d6b0e80f
AC
1404
1405 /* Only the initial process should be left right now. */
bc09b0c1 1406 gdb_assert (num_lwps (pid) == 1);
d90e17a7 1407
f2907e49 1408 main_lwp = find_lwp_pid (ptid_t (pid));
d6b0e80f 1409
7a7d3353
PA
1410 if (forks_exist_p ())
1411 {
1412 /* Multi-fork case. The current inferior_ptid is being detached
1413 from, but there are other viable forks to debug. Detach from
1414 the current fork, and context-switch to the first
1415 available. */
6bd6f3b6 1416 linux_fork_detach (from_tty);
7a7d3353
PA
1417 }
1418 else
ced2dffb 1419 {
ced2dffb
PA
1420 target_announce_detach (from_tty);
1421
6bd6f3b6
SM
1422 /* Pass on any pending signal for the last LWP. */
1423 int signo = get_detach_signal (main_lwp);
ced2dffb
PA
1424
1425 detach_one_lwp (main_lwp, &signo);
1426
f6ac5f3d 1427 detach_success (inf);
ced2dffb 1428 }
05c06f31 1429
8a89ddbd 1430 close_proc_mem_file (pid);
d6b0e80f
AC
1431}
1432
8a99810d
PA
1433/* Resume execution of the inferior process. If STEP is nonzero,
1434 single-step it. If SIGNAL is nonzero, give it that signal. */
1435
1436static void
23f238d3
PA
1437linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1438 enum gdb_signal signo)
8a99810d 1439{
8a99810d 1440 lp->step = step;
9c02b525
PA
1441
1442 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1443 We only presently need that if the LWP is stepped though (to
1444 handle the case of stepping a breakpoint instruction). */
1445 if (step)
1446 {
5b6d1e4f 1447 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
9c02b525
PA
1448
1449 lp->stop_pc = regcache_read_pc (regcache);
1450 }
1451 else
1452 lp->stop_pc = 0;
1453
135340af 1454 linux_target->low_prepare_to_resume (lp);
f6ac5f3d 1455 linux_target->low_resume (lp->ptid, step, signo);
23f238d3
PA
1456
1457 /* Successfully resumed. Clear state that no longer makes sense,
1458 and mark the LWP as running. Must not do this before resuming
1459 otherwise if that fails other code will be confused. E.g., we'd
1460 later try to stop the LWP and hang forever waiting for a stop
1461 status. Note that we must not throw after this is cleared,
1462 otherwise handle_zombie_lwp_error would get confused. */
8a99810d 1463 lp->stopped = 0;
1ad3de98 1464 lp->core = -1;
23f238d3 1465 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
5b6d1e4f 1466 registers_changed_ptid (linux_target, lp->ptid);
8a99810d
PA
1467}
1468
23f238d3
PA
1469/* Called when we try to resume a stopped LWP and that errors out. If
1470 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1471 or about to become), discard the error, clear any pending status
1472 the LWP may have, and return true (we'll collect the exit status
1473 soon enough). Otherwise, return false. */
1474
1475static int
1476check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1477{
1478 /* If we get an error after resuming the LWP successfully, we'd
1479 confuse !T state for the LWP being gone. */
1480 gdb_assert (lp->stopped);
1481
1482 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1483 because even if ptrace failed with ESRCH, the tracee may be "not
1484 yet fully dead", but already refusing ptrace requests. In that
1485 case the tracee has 'R (Running)' state for a little bit
1486 (observed in Linux 3.18). See also the note on ESRCH in the
1487 ptrace(2) man page. Instead, check whether the LWP has any state
1488 other than ptrace-stopped. */
1489
1490 /* Don't assume anything if /proc/PID/status can't be read. */
e38504b3 1491 if (linux_proc_pid_is_trace_stopped_nowarn (lp->ptid.lwp ()) == 0)
23f238d3
PA
1492 {
1493 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1494 lp->status = 0;
183be222 1495 lp->waitstatus.set_ignore ();
23f238d3
PA
1496 return 1;
1497 }
1498 return 0;
1499}
1500
1501/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1502 disappears while we try to resume it. */
1503
1504static void
1505linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1506{
a70b8144 1507 try
23f238d3
PA
1508 {
1509 linux_resume_one_lwp_throw (lp, step, signo);
1510 }
230d2906 1511 catch (const gdb_exception_error &ex)
23f238d3
PA
1512 {
1513 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 1514 throw;
23f238d3 1515 }
23f238d3
PA
1516}
1517
d6b0e80f
AC
1518/* Resume LP. */
1519
25289eb2 1520static void
e5ef252a 1521resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1522{
25289eb2 1523 if (lp->stopped)
6c95b8df 1524 {
5b6d1e4f 1525 struct inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
25289eb2
PA
1526
1527 if (inf->vfork_child != NULL)
1528 {
9327494e 1529 linux_nat_debug_printf ("Not resuming %s (vfork parent)",
e53c95d4 1530 lp->ptid.to_string ().c_str ());
25289eb2 1531 }
8a99810d 1532 else if (!lwp_status_pending_p (lp))
25289eb2 1533 {
9327494e 1534 linux_nat_debug_printf ("Resuming sibling %s, %s, %s",
e53c95d4 1535 lp->ptid.to_string ().c_str (),
9327494e
SM
1536 (signo != GDB_SIGNAL_0
1537 ? strsignal (gdb_signal_to_host (signo))
1538 : "0"),
1539 step ? "step" : "resume");
25289eb2 1540
8a99810d 1541 linux_resume_one_lwp (lp, step, signo);
25289eb2
PA
1542 }
1543 else
1544 {
9327494e 1545 linux_nat_debug_printf ("Not resuming sibling %s (has pending)",
e53c95d4 1546 lp->ptid.to_string ().c_str ());
25289eb2 1547 }
6c95b8df 1548 }
25289eb2 1549 else
9327494e 1550 linux_nat_debug_printf ("Not resuming sibling %s (not stopped)",
e53c95d4 1551 lp->ptid.to_string ().c_str ());
25289eb2 1552}
d6b0e80f 1553
8817a6f2
PA
1554/* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1555 Resume LWP with the last stop signal, if it is in pass state. */
e5ef252a 1556
25289eb2 1557static int
d3a70e03 1558linux_nat_resume_callback (struct lwp_info *lp, struct lwp_info *except)
25289eb2 1559{
e5ef252a
PA
1560 enum gdb_signal signo = GDB_SIGNAL_0;
1561
8817a6f2
PA
1562 if (lp == except)
1563 return 0;
1564
e5ef252a
PA
1565 if (lp->stopped)
1566 {
1567 struct thread_info *thread;
1568
5b6d1e4f 1569 thread = find_thread_ptid (linux_target, lp->ptid);
e5ef252a
PA
1570 if (thread != NULL)
1571 {
1edb66d8
SM
1572 signo = thread->stop_signal ();
1573 thread->set_stop_signal (GDB_SIGNAL_0);
e5ef252a
PA
1574 }
1575 }
1576
1577 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1578 return 0;
1579}
1580
1581static int
d3a70e03 1582resume_clear_callback (struct lwp_info *lp)
d6b0e80f
AC
1583{
1584 lp->resumed = 0;
25289eb2 1585 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1586 return 0;
1587}
1588
1589static int
d3a70e03 1590resume_set_callback (struct lwp_info *lp)
d6b0e80f
AC
1591{
1592 lp->resumed = 1;
25289eb2 1593 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1594 return 0;
1595}
1596
f6ac5f3d 1597void
d51926f0 1598linux_nat_target::resume (ptid_t scope_ptid, int step, enum gdb_signal signo)
d6b0e80f
AC
1599{
1600 struct lwp_info *lp;
d6b0e80f 1601
9327494e
SM
1602 linux_nat_debug_printf ("Preparing to %s %s, %s, inferior_ptid %s",
1603 step ? "step" : "resume",
d51926f0 1604 scope_ptid.to_string ().c_str (),
9327494e
SM
1605 (signo != GDB_SIGNAL_0
1606 ? strsignal (gdb_signal_to_host (signo)) : "0"),
e53c95d4 1607 inferior_ptid.to_string ().c_str ());
76f50ad1 1608
7da6a5b9
LM
1609 /* Mark the lwps we're resuming as resumed and update their
1610 last_resume_kind to resume_continue. */
d51926f0 1611 iterate_over_lwps (scope_ptid, resume_set_callback);
d6b0e80f 1612
d51926f0 1613 lp = find_lwp_pid (inferior_ptid);
9f0bdab8 1614 gdb_assert (lp != NULL);
d6b0e80f 1615
9f0bdab8 1616 /* Remember if we're stepping. */
25289eb2 1617 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1618
9f0bdab8
DJ
1619 /* If we have a pending wait status for this thread, there is no
1620 point in resuming the process. But first make sure that
1621 linux_nat_wait won't preemptively handle the event - we
1622 should never take this short-circuit if we are going to
1623 leave LP running, since we have skipped resuming all the
1624 other threads. This bit of code needs to be synchronized
1625 with linux_nat_wait. */
76f50ad1 1626
9f0bdab8
DJ
1627 if (lp->status && WIFSTOPPED (lp->status))
1628 {
2455069d
UW
1629 if (!lp->step
1630 && WSTOPSIG (lp->status)
1631 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1632 {
9327494e
SM
1633 linux_nat_debug_printf
1634 ("Not short circuiting for ignored status 0x%x", lp->status);
9f0bdab8 1635
d6b0e80f
AC
1636 /* FIXME: What should we do if we are supposed to continue
1637 this thread with a signal? */
a493e3e2 1638 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 1639 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1640 lp->status = 0;
1641 }
1642 }
76f50ad1 1643
8a99810d 1644 if (lwp_status_pending_p (lp))
9f0bdab8
DJ
1645 {
1646 /* FIXME: What should we do if we are supposed to continue
1647 this thread with a signal? */
a493e3e2 1648 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 1649
9327494e
SM
1650 linux_nat_debug_printf ("Short circuiting for status 0x%x",
1651 lp->status);
d6b0e80f 1652
7feb7d06
PA
1653 if (target_can_async_p ())
1654 {
6a3753b3 1655 target_async (1);
7feb7d06
PA
1656 /* Tell the event loop we have something to process. */
1657 async_file_mark ();
1658 }
9f0bdab8 1659 return;
d6b0e80f
AC
1660 }
1661
d51926f0
PA
1662 /* No use iterating unless we're resuming other threads. */
1663 if (scope_ptid != lp->ptid)
1664 iterate_over_lwps (scope_ptid, [=] (struct lwp_info *info)
1665 {
1666 return linux_nat_resume_callback (info, lp);
1667 });
d90e17a7 1668
9327494e
SM
1669 linux_nat_debug_printf ("%s %s, %s (resume event thread)",
1670 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 1671 lp->ptid.to_string ().c_str (),
9327494e
SM
1672 (signo != GDB_SIGNAL_0
1673 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2 1674
2bf6fb9d 1675 linux_resume_one_lwp (lp, step, signo);
d6b0e80f
AC
1676}
1677
c5f62d5f 1678/* Send a signal to an LWP. */
d6b0e80f
AC
1679
1680static int
1681kill_lwp (int lwpid, int signo)
1682{
4a6ed09b 1683 int ret;
d6b0e80f 1684
4a6ed09b
PA
1685 errno = 0;
1686 ret = syscall (__NR_tkill, lwpid, signo);
1687 if (errno == ENOSYS)
1688 {
1689 /* If tkill fails, then we are not using nptl threads, a
1690 configuration we no longer support. */
1691 perror_with_name (("tkill"));
1692 }
1693 return ret;
d6b0e80f
AC
1694}
1695
ca2163eb
PA
1696/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1697 event, check if the core is interested in it: if not, ignore the
1698 event, and keep waiting; otherwise, we need to toggle the LWP's
1699 syscall entry/exit status, since the ptrace event itself doesn't
1700 indicate it, and report the trap to higher layers. */
1701
1702static int
1703linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1704{
1705 struct target_waitstatus *ourstatus = &lp->waitstatus;
1706 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
5b6d1e4f 1707 thread_info *thread = find_thread_ptid (linux_target, lp->ptid);
00431a78 1708 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, thread);
ca2163eb
PA
1709
1710 if (stopping)
1711 {
1712 /* If we're stopping threads, there's a SIGSTOP pending, which
1713 makes it so that the LWP reports an immediate syscall return,
1714 followed by the SIGSTOP. Skip seeing that "return" using
1715 PTRACE_CONT directly, and let stop_wait_callback collect the
1716 SIGSTOP. Later when the thread is resumed, a new syscall
1717 entry event. If we didn't do this (and returned 0), we'd
1718 leave a syscall entry pending, and our caller, by using
1719 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1720 itself. Later, when the user re-resumes this LWP, we'd see
1721 another syscall entry event and we'd mistake it for a return.
1722
1723 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1724 (leaving immediately with LWP->signalled set, without issuing
1725 a PTRACE_CONT), it would still be problematic to leave this
1726 syscall enter pending, as later when the thread is resumed,
1727 it would then see the same syscall exit mentioned above,
1728 followed by the delayed SIGSTOP, while the syscall didn't
1729 actually get to execute. It seems it would be even more
1730 confusing to the user. */
1731
9327494e
SM
1732 linux_nat_debug_printf
1733 ("ignoring syscall %d for LWP %ld (stopping threads), resuming with "
1734 "PTRACE_CONT for SIGSTOP", syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1735
1736 lp->syscall_state = TARGET_WAITKIND_IGNORE;
e38504b3 1737 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
8817a6f2 1738 lp->stopped = 0;
ca2163eb
PA
1739 return 1;
1740 }
1741
bfd09d20
JS
1742 /* Always update the entry/return state, even if this particular
1743 syscall isn't interesting to the core now. In async mode,
1744 the user could install a new catchpoint for this syscall
1745 between syscall enter/return, and we'll need to know to
1746 report a syscall return if that happens. */
1747 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1748 ? TARGET_WAITKIND_SYSCALL_RETURN
1749 : TARGET_WAITKIND_SYSCALL_ENTRY);
1750
ca2163eb
PA
1751 if (catch_syscall_enabled ())
1752 {
ca2163eb
PA
1753 if (catching_syscall_number (syscall_number))
1754 {
1755 /* Alright, an event to report. */
183be222
SM
1756 if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
1757 ourstatus->set_syscall_entry (syscall_number);
1758 else if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
1759 ourstatus->set_syscall_return (syscall_number);
1760 else
1761 gdb_assert_not_reached ("unexpected syscall state");
ca2163eb 1762
9327494e
SM
1763 linux_nat_debug_printf
1764 ("stopping for %s of syscall %d for LWP %ld",
1765 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1766 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1767
ca2163eb
PA
1768 return 0;
1769 }
1770
9327494e
SM
1771 linux_nat_debug_printf
1772 ("ignoring %s of syscall %d for LWP %ld",
1773 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1774 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1775 }
1776 else
1777 {
1778 /* If we had been syscall tracing, and hence used PT_SYSCALL
1779 before on this LWP, it could happen that the user removes all
1780 syscall catchpoints before we get to process this event.
1781 There are two noteworthy issues here:
1782
1783 - When stopped at a syscall entry event, resuming with
1784 PT_STEP still resumes executing the syscall and reports a
1785 syscall return.
1786
1787 - Only PT_SYSCALL catches syscall enters. If we last
1788 single-stepped this thread, then this event can't be a
1789 syscall enter. If we last single-stepped this thread, this
1790 has to be a syscall exit.
1791
1792 The points above mean that the next resume, be it PT_STEP or
1793 PT_CONTINUE, can not trigger a syscall trace event. */
9327494e
SM
1794 linux_nat_debug_printf
1795 ("caught syscall event with no syscall catchpoints. %d for LWP %ld, "
1796 "ignoring", syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1797 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1798 }
1799
1800 /* The core isn't interested in this event. For efficiency, avoid
1801 stopping all threads only to have the core resume them all again.
1802 Since we're not stopping threads, if we're still syscall tracing
1803 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1804 subsequent syscall. Simply resume using the inf-ptrace layer,
1805 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1806
8a99810d 1807 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
ca2163eb
PA
1808 return 1;
1809}
1810
3d799a95
DJ
1811/* Handle a GNU/Linux extended wait response. If we see a clone
1812 event, we need to add the new LWP to our list (and not report the
1813 trap to higher layers). This function returns non-zero if the
1814 event should be ignored and we should wait again. If STOPPING is
1815 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1816
1817static int
4dd63d48 1818linux_handle_extended_wait (struct lwp_info *lp, int status)
d6b0e80f 1819{
e38504b3 1820 int pid = lp->ptid.lwp ();
3d799a95 1821 struct target_waitstatus *ourstatus = &lp->waitstatus;
89a5711c 1822 int event = linux_ptrace_get_extended_event (status);
d6b0e80f 1823
bfd09d20
JS
1824 /* All extended events we currently use are mid-syscall. Only
1825 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
1826 you have to be using PTRACE_SEIZE to get that. */
1827 lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
1828
3d799a95
DJ
1829 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1830 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1831 {
3d799a95
DJ
1832 unsigned long new_pid;
1833 int ret;
1834
1835 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1836
3d799a95
DJ
1837 /* If we haven't already seen the new PID stop, wait for it now. */
1838 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1839 {
1840 /* The new child has a pending SIGSTOP. We can't affect it until it
1841 hits the SIGSTOP, but we're already attached. */
4a6ed09b 1842 ret = my_waitpid (new_pid, &status, __WALL);
3d799a95
DJ
1843 if (ret == -1)
1844 perror_with_name (_("waiting for new child"));
1845 else if (ret != new_pid)
1846 internal_error (__FILE__, __LINE__,
1847 _("wait returned unexpected PID %d"), ret);
1848 else if (!WIFSTOPPED (status))
1849 internal_error (__FILE__, __LINE__,
1850 _("wait returned unexpected status 0x%x"), status);
1851 }
1852
183be222 1853 ptid_t child_ptid (new_pid, new_pid);
3d799a95 1854
26cb8b7c
PA
1855 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1856 {
8a89ddbd
PA
1857 open_proc_mem_file (child_ptid);
1858
26cb8b7c
PA
1859 /* The arch-specific native code may need to know about new
1860 forks even if those end up never mapped to an
1861 inferior. */
135340af 1862 linux_target->low_new_fork (lp, new_pid);
26cb8b7c 1863 }
1310c1b0
PFC
1864 else if (event == PTRACE_EVENT_CLONE)
1865 {
1866 linux_target->low_new_clone (lp, new_pid);
1867 }
26cb8b7c 1868
2277426b 1869 if (event == PTRACE_EVENT_FORK
e99b03dc 1870 && linux_fork_checkpointing_p (lp->ptid.pid ()))
2277426b 1871 {
2277426b
PA
1872 /* Handle checkpointing by linux-fork.c here as a special
1873 case. We don't want the follow-fork-mode or 'catch fork'
1874 to interfere with this. */
1875
1876 /* This won't actually modify the breakpoint list, but will
1877 physically remove the breakpoints from the child. */
184ea2f7 1878 detach_breakpoints (ptid_t (new_pid, new_pid));
2277426b
PA
1879
1880 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
1881 if (!find_fork_pid (new_pid))
1882 add_fork (new_pid);
2277426b
PA
1883
1884 /* Report as spurious, so that infrun doesn't want to follow
1885 this fork. We're actually doing an infcall in
1886 linux-fork.c. */
183be222 1887 ourstatus->set_spurious ();
2277426b
PA
1888
1889 /* Report the stop to the core. */
1890 return 0;
1891 }
1892
3d799a95 1893 if (event == PTRACE_EVENT_FORK)
183be222 1894 ourstatus->set_forked (child_ptid);
3d799a95 1895 else if (event == PTRACE_EVENT_VFORK)
183be222 1896 ourstatus->set_vforked (child_ptid);
4dd63d48 1897 else if (event == PTRACE_EVENT_CLONE)
3d799a95 1898 {
78768c4a
JK
1899 struct lwp_info *new_lp;
1900
183be222 1901 ourstatus->set_ignore ();
78768c4a 1902
9327494e
SM
1903 linux_nat_debug_printf
1904 ("Got clone event from LWP %d, new child is LWP %ld", pid, new_pid);
3c4d7e12 1905
184ea2f7 1906 new_lp = add_lwp (ptid_t (lp->ptid.pid (), new_pid));
4c28f408 1907 new_lp->stopped = 1;
4dd63d48 1908 new_lp->resumed = 1;
d6b0e80f 1909
2db9a427
PA
1910 /* If the thread_db layer is active, let it record the user
1911 level thread id and status, and add the thread to GDB's
1912 list. */
1913 if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
3d799a95 1914 {
2db9a427
PA
1915 /* The process is not using thread_db. Add the LWP to
1916 GDB's list. */
e38504b3 1917 target_post_attach (new_lp->ptid.lwp ());
5b6d1e4f 1918 add_thread (linux_target, new_lp->ptid);
2db9a427 1919 }
4c28f408 1920
2ee52aa4 1921 /* Even if we're stopping the thread for some reason
4dd63d48
PA
1922 internal to this module, from the perspective of infrun
1923 and the user/frontend, this new thread is running until
1924 it next reports a stop. */
719546c4
SM
1925 set_running (linux_target, new_lp->ptid, true);
1926 set_executing (linux_target, new_lp->ptid, true);
4c28f408 1927
4dd63d48 1928 if (WSTOPSIG (status) != SIGSTOP)
79395f92 1929 {
4dd63d48
PA
1930 /* This can happen if someone starts sending signals to
1931 the new thread before it gets a chance to run, which
1932 have a lower number than SIGSTOP (e.g. SIGUSR1).
1933 This is an unlikely case, and harder to handle for
1934 fork / vfork than for clone, so we do not try - but
1935 we handle it for clone events here. */
1936
1937 new_lp->signalled = 1;
1938
79395f92
PA
1939 /* We created NEW_LP so it cannot yet contain STATUS. */
1940 gdb_assert (new_lp->status == 0);
1941
1942 /* Save the wait status to report later. */
9327494e
SM
1943 linux_nat_debug_printf
1944 ("waitpid of new LWP %ld, saving status %s",
8d06918f 1945 (long) new_lp->ptid.lwp (), status_to_str (status).c_str ());
79395f92
PA
1946 new_lp->status = status;
1947 }
aa01bd36
PA
1948 else if (report_thread_events)
1949 {
183be222 1950 new_lp->waitstatus.set_thread_created ();
aa01bd36
PA
1951 new_lp->status = status;
1952 }
79395f92 1953
3d799a95
DJ
1954 return 1;
1955 }
1956
1957 return 0;
d6b0e80f
AC
1958 }
1959
3d799a95
DJ
1960 if (event == PTRACE_EVENT_EXEC)
1961 {
9327494e 1962 linux_nat_debug_printf ("Got exec event from LWP %ld", lp->ptid.lwp ());
a75724bc 1963
8a89ddbd
PA
1964 /* Close the previous /proc/PID/mem file for this inferior,
1965 which was using the address space which is now gone.
1966 Reading/writing from this file would return 0/EOF. */
1967 close_proc_mem_file (lp->ptid.pid ());
1968
1969 /* Open a new file for the new address space. */
1970 open_proc_mem_file (lp->ptid);
05c06f31 1971
183be222
SM
1972 ourstatus->set_execd
1973 (make_unique_xstrdup (linux_proc_pid_to_exec_file (pid)));
3d799a95 1974
8af756ef
PA
1975 /* The thread that execed must have been resumed, but, when a
1976 thread execs, it changes its tid to the tgid, and the old
1977 tgid thread might have not been resumed. */
1978 lp->resumed = 1;
6c95b8df
PA
1979 return 0;
1980 }
1981
1982 if (event == PTRACE_EVENT_VFORK_DONE)
1983 {
9327494e 1984 linux_nat_debug_printf
5a0c4a06
SM
1985 ("Got PTRACE_EVENT_VFORK_DONE from LWP %ld",
1986 lp->ptid.lwp ());
1987 ourstatus->set_vfork_done ();
1988 return 0;
3d799a95
DJ
1989 }
1990
1991 internal_error (__FILE__, __LINE__,
1992 _("unknown ptrace event %d"), event);
d6b0e80f
AC
1993}
1994
9c3a5d93
PA
1995/* Suspend waiting for a signal. We're mostly interested in
1996 SIGCHLD/SIGINT. */
1997
1998static void
1999wait_for_signal ()
2000{
9327494e 2001 linux_nat_debug_printf ("about to sigsuspend");
9c3a5d93
PA
2002 sigsuspend (&suspend_mask);
2003
2004 /* If the quit flag is set, it means that the user pressed Ctrl-C
2005 and we're debugging a process that is running on a separate
2006 terminal, so we must forward the Ctrl-C to the inferior. (If the
2007 inferior is sharing GDB's terminal, then the Ctrl-C reaches the
2008 inferior directly.) We must do this here because functions that
2009 need to block waiting for a signal loop forever until there's an
2010 event to report before returning back to the event loop. */
2011 if (!target_terminal::is_ours ())
2012 {
2013 if (check_quit_flag ())
2014 target_pass_ctrlc ();
2015 }
2016}
2017
d6b0e80f
AC
2018/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2019 exited. */
2020
2021static int
2022wait_lwp (struct lwp_info *lp)
2023{
2024 pid_t pid;
432b4d03 2025 int status = 0;
d6b0e80f 2026 int thread_dead = 0;
432b4d03 2027 sigset_t prev_mask;
d6b0e80f
AC
2028
2029 gdb_assert (!lp->stopped);
2030 gdb_assert (lp->status == 0);
2031
432b4d03
JK
2032 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2033 block_child_signals (&prev_mask);
2034
2035 for (;;)
d6b0e80f 2036 {
e38504b3 2037 pid = my_waitpid (lp->ptid.lwp (), &status, __WALL | WNOHANG);
a9f4bb21
PA
2038 if (pid == -1 && errno == ECHILD)
2039 {
2040 /* The thread has previously exited. We need to delete it
4a6ed09b
PA
2041 now because if this was a non-leader thread execing, we
2042 won't get an exit event. See comments on exec events at
2043 the top of the file. */
a9f4bb21 2044 thread_dead = 1;
9327494e 2045 linux_nat_debug_printf ("%s vanished.",
e53c95d4 2046 lp->ptid.to_string ().c_str ());
a9f4bb21 2047 }
432b4d03
JK
2048 if (pid != 0)
2049 break;
2050
2051 /* Bugs 10970, 12702.
2052 Thread group leader may have exited in which case we'll lock up in
2053 waitpid if there are other threads, even if they are all zombies too.
2054 Basically, we're not supposed to use waitpid this way.
4a6ed09b
PA
2055 tkill(pid,0) cannot be used here as it gets ESRCH for both
2056 for zombie and running processes.
432b4d03
JK
2057
2058 As a workaround, check if we're waiting for the thread group leader and
2059 if it's a zombie, and avoid calling waitpid if it is.
2060
2061 This is racy, what if the tgl becomes a zombie right after we check?
2062 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2063 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03 2064
e38504b3
TT
2065 if (lp->ptid.pid () == lp->ptid.lwp ()
2066 && linux_proc_pid_is_zombie (lp->ptid.lwp ()))
d6b0e80f 2067 {
d6b0e80f 2068 thread_dead = 1;
9327494e 2069 linux_nat_debug_printf ("Thread group leader %s vanished.",
e53c95d4 2070 lp->ptid.to_string ().c_str ());
432b4d03 2071 break;
d6b0e80f 2072 }
432b4d03
JK
2073
2074 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2075 get invoked despite our caller had them intentionally blocked by
2076 block_child_signals. This is sensitive only to the loop of
2077 linux_nat_wait_1 and there if we get called my_waitpid gets called
2078 again before it gets to sigsuspend so we can safely let the handlers
2079 get executed here. */
9c3a5d93 2080 wait_for_signal ();
432b4d03
JK
2081 }
2082
2083 restore_child_signals_mask (&prev_mask);
2084
d6b0e80f
AC
2085 if (!thread_dead)
2086 {
e38504b3 2087 gdb_assert (pid == lp->ptid.lwp ());
d6b0e80f 2088
9327494e 2089 linux_nat_debug_printf ("waitpid %s received %s",
e53c95d4 2090 lp->ptid.to_string ().c_str (),
8d06918f 2091 status_to_str (status).c_str ());
d6b0e80f 2092
a9f4bb21
PA
2093 /* Check if the thread has exited. */
2094 if (WIFEXITED (status) || WIFSIGNALED (status))
2095 {
aa01bd36 2096 if (report_thread_events
e38504b3 2097 || lp->ptid.pid () == lp->ptid.lwp ())
69dde7dc 2098 {
9327494e 2099 linux_nat_debug_printf ("LWP %d exited.", lp->ptid.pid ());
69dde7dc 2100
aa01bd36 2101 /* If this is the leader exiting, it means the whole
69dde7dc
PA
2102 process is gone. Store the status to report to the
2103 core. Store it in lp->waitstatus, because lp->status
2104 would be ambiguous (W_EXITCODE(0,0) == 0). */
7509b829 2105 lp->waitstatus = host_status_to_waitstatus (status);
69dde7dc
PA
2106 return 0;
2107 }
2108
a9f4bb21 2109 thread_dead = 1;
9327494e 2110 linux_nat_debug_printf ("%s exited.",
e53c95d4 2111 lp->ptid.to_string ().c_str ());
a9f4bb21 2112 }
d6b0e80f
AC
2113 }
2114
2115 if (thread_dead)
2116 {
e26af52f 2117 exit_lwp (lp);
d6b0e80f
AC
2118 return 0;
2119 }
2120
2121 gdb_assert (WIFSTOPPED (status));
8817a6f2 2122 lp->stopped = 1;
d6b0e80f 2123
8784d563
PA
2124 if (lp->must_set_ptrace_flags)
2125 {
5b6d1e4f 2126 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
de0d863e 2127 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2128
e38504b3 2129 linux_enable_event_reporting (lp->ptid.lwp (), options);
8784d563
PA
2130 lp->must_set_ptrace_flags = 0;
2131 }
2132
ca2163eb
PA
2133 /* Handle GNU/Linux's syscall SIGTRAPs. */
2134 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2135 {
2136 /* No longer need the sysgood bit. The ptrace event ends up
2137 recorded in lp->waitstatus if we care for it. We can carry
2138 on handling the event like a regular SIGTRAP from here
2139 on. */
2140 status = W_STOPCODE (SIGTRAP);
2141 if (linux_handle_syscall_trap (lp, 1))
2142 return wait_lwp (lp);
2143 }
bfd09d20
JS
2144 else
2145 {
2146 /* Almost all other ptrace-stops are known to be outside of system
2147 calls, with further exceptions in linux_handle_extended_wait. */
2148 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2149 }
ca2163eb 2150
d6b0e80f 2151 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2152 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2153 && linux_is_extended_waitstatus (status))
d6b0e80f 2154 {
9327494e 2155 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
4dd63d48 2156 linux_handle_extended_wait (lp, status);
20ba1ce6 2157 return 0;
d6b0e80f
AC
2158 }
2159
2160 return status;
2161}
2162
2163/* Send a SIGSTOP to LP. */
2164
2165static int
d3a70e03 2166stop_callback (struct lwp_info *lp)
d6b0e80f
AC
2167{
2168 if (!lp->stopped && !lp->signalled)
2169 {
2170 int ret;
2171
9327494e 2172 linux_nat_debug_printf ("kill %s **<SIGSTOP>**",
e53c95d4 2173 lp->ptid.to_string ().c_str ());
9327494e 2174
d6b0e80f 2175 errno = 0;
e38504b3 2176 ret = kill_lwp (lp->ptid.lwp (), SIGSTOP);
9327494e 2177 linux_nat_debug_printf ("lwp kill %d %s", ret,
d6b0e80f 2178 errno ? safe_strerror (errno) : "ERRNO-OK");
d6b0e80f
AC
2179
2180 lp->signalled = 1;
2181 gdb_assert (lp->status == 0);
2182 }
2183
2184 return 0;
2185}
2186
7b50312a
PA
2187/* Request a stop on LWP. */
2188
2189void
2190linux_stop_lwp (struct lwp_info *lwp)
2191{
d3a70e03 2192 stop_callback (lwp);
7b50312a
PA
2193}
2194
2db9a427
PA
2195/* See linux-nat.h */
2196
2197void
2198linux_stop_and_wait_all_lwps (void)
2199{
2200 /* Stop all LWP's ... */
d3a70e03 2201 iterate_over_lwps (minus_one_ptid, stop_callback);
2db9a427
PA
2202
2203 /* ... and wait until all of them have reported back that
2204 they're no longer running. */
d3a70e03 2205 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
2db9a427
PA
2206}
2207
2208/* See linux-nat.h */
2209
2210void
2211linux_unstop_all_lwps (void)
2212{
2213 iterate_over_lwps (minus_one_ptid,
d3a70e03
TT
2214 [] (struct lwp_info *info)
2215 {
2216 return resume_stopped_resumed_lwps (info, minus_one_ptid);
2217 });
2db9a427
PA
2218}
2219
57380f4e 2220/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2221
2222static int
57380f4e
DJ
2223linux_nat_has_pending_sigint (int pid)
2224{
2225 sigset_t pending, blocked, ignored;
57380f4e
DJ
2226
2227 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2228
2229 if (sigismember (&pending, SIGINT)
2230 && !sigismember (&ignored, SIGINT))
2231 return 1;
2232
2233 return 0;
2234}
2235
2236/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2237
2238static int
d3a70e03 2239set_ignore_sigint (struct lwp_info *lp)
d6b0e80f 2240{
57380f4e
DJ
2241 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2242 flag to consume the next one. */
2243 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2244 && WSTOPSIG (lp->status) == SIGINT)
2245 lp->status = 0;
2246 else
2247 lp->ignore_sigint = 1;
2248
2249 return 0;
2250}
2251
2252/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2253 This function is called after we know the LWP has stopped; if the LWP
2254 stopped before the expected SIGINT was delivered, then it will never have
2255 arrived. Also, if the signal was delivered to a shared queue and consumed
2256 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2257
57380f4e
DJ
2258static void
2259maybe_clear_ignore_sigint (struct lwp_info *lp)
2260{
2261 if (!lp->ignore_sigint)
2262 return;
2263
e38504b3 2264 if (!linux_nat_has_pending_sigint (lp->ptid.lwp ()))
57380f4e 2265 {
9327494e 2266 linux_nat_debug_printf ("Clearing bogus flag for %s",
e53c95d4 2267 lp->ptid.to_string ().c_str ());
57380f4e
DJ
2268 lp->ignore_sigint = 0;
2269 }
2270}
2271
ebec9a0f
PA
2272/* Fetch the possible triggered data watchpoint info and store it in
2273 LP.
2274
2275 On some archs, like x86, that use debug registers to set
2276 watchpoints, it's possible that the way to know which watched
2277 address trapped, is to check the register that is used to select
2278 which address to watch. Problem is, between setting the watchpoint
2279 and reading back which data address trapped, the user may change
2280 the set of watchpoints, and, as a consequence, GDB changes the
2281 debug registers in the inferior. To avoid reading back a stale
2282 stopped-data-address when that happens, we cache in LP the fact
2283 that a watchpoint trapped, and the corresponding data address, as
2284 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2285 registers meanwhile, we have the cached data we can rely on. */
2286
9c02b525
PA
2287static int
2288check_stopped_by_watchpoint (struct lwp_info *lp)
ebec9a0f 2289{
2989a365 2290 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
ebec9a0f
PA
2291 inferior_ptid = lp->ptid;
2292
f6ac5f3d 2293 if (linux_target->low_stopped_by_watchpoint ())
ebec9a0f 2294 {
15c66dd6 2295 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
f6ac5f3d
PA
2296 lp->stopped_data_address_p
2297 = linux_target->low_stopped_data_address (&lp->stopped_data_address);
ebec9a0f
PA
2298 }
2299
15c66dd6 2300 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
9c02b525
PA
2301}
2302
9c02b525 2303/* Returns true if the LWP had stopped for a watchpoint. */
ebec9a0f 2304
57810aa7 2305bool
f6ac5f3d 2306linux_nat_target::stopped_by_watchpoint ()
ebec9a0f
PA
2307{
2308 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2309
2310 gdb_assert (lp != NULL);
2311
15c66dd6 2312 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
ebec9a0f
PA
2313}
2314
57810aa7 2315bool
f6ac5f3d 2316linux_nat_target::stopped_data_address (CORE_ADDR *addr_p)
ebec9a0f
PA
2317{
2318 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2319
2320 gdb_assert (lp != NULL);
2321
2322 *addr_p = lp->stopped_data_address;
2323
2324 return lp->stopped_data_address_p;
2325}
2326
26ab7092
JK
2327/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2328
135340af
PA
2329bool
2330linux_nat_target::low_status_is_event (int status)
26ab7092
JK
2331{
2332 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2333}
2334
57380f4e
DJ
2335/* Wait until LP is stopped. */
2336
2337static int
d3a70e03 2338stop_wait_callback (struct lwp_info *lp)
57380f4e 2339{
5b6d1e4f 2340 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
6c95b8df
PA
2341
2342 /* If this is a vfork parent, bail out, it is not going to report
2343 any SIGSTOP until the vfork is done with. */
2344 if (inf->vfork_child != NULL)
2345 return 0;
2346
d6b0e80f
AC
2347 if (!lp->stopped)
2348 {
2349 int status;
2350
2351 status = wait_lwp (lp);
2352 if (status == 0)
2353 return 0;
2354
57380f4e
DJ
2355 if (lp->ignore_sigint && WIFSTOPPED (status)
2356 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2357 {
57380f4e 2358 lp->ignore_sigint = 0;
d6b0e80f
AC
2359
2360 errno = 0;
e38504b3 2361 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
8817a6f2 2362 lp->stopped = 0;
9327494e
SM
2363 linux_nat_debug_printf
2364 ("PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)",
e53c95d4 2365 lp->ptid.to_string ().c_str (),
9327494e 2366 errno ? safe_strerror (errno) : "OK");
d6b0e80f 2367
d3a70e03 2368 return stop_wait_callback (lp);
d6b0e80f
AC
2369 }
2370
57380f4e
DJ
2371 maybe_clear_ignore_sigint (lp);
2372
d6b0e80f
AC
2373 if (WSTOPSIG (status) != SIGSTOP)
2374 {
e5ef252a 2375 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2376
9327494e 2377 linux_nat_debug_printf ("Pending event %s in %s",
8d06918f 2378 status_to_str ((int) status).c_str (),
e53c95d4 2379 lp->ptid.to_string ().c_str ());
e5ef252a
PA
2380
2381 /* Save the sigtrap event. */
2382 lp->status = status;
e5ef252a 2383 gdb_assert (lp->signalled);
e7ad2f14 2384 save_stop_reason (lp);
d6b0e80f
AC
2385 }
2386 else
2387 {
7010835a 2388 /* We caught the SIGSTOP that we intended to catch. */
e5ef252a 2389
9327494e 2390 linux_nat_debug_printf ("Expected SIGSTOP caught for %s.",
e53c95d4 2391 lp->ptid.to_string ().c_str ());
e5ef252a 2392
d6b0e80f 2393 lp->signalled = 0;
7010835a
AB
2394
2395 /* If we are waiting for this stop so we can report the thread
2396 stopped then we need to record this status. Otherwise, we can
2397 now discard this stop event. */
2398 if (lp->last_resume_kind == resume_stop)
2399 {
2400 lp->status = status;
2401 save_stop_reason (lp);
2402 }
d6b0e80f
AC
2403 }
2404 }
2405
2406 return 0;
2407}
2408
9c02b525
PA
2409/* Return non-zero if LP has a wait status pending. Discard the
2410 pending event and resume the LWP if the event that originally
2411 caused the stop became uninteresting. */
d6b0e80f
AC
2412
2413static int
d3a70e03 2414status_callback (struct lwp_info *lp)
d6b0e80f
AC
2415{
2416 /* Only report a pending wait status if we pretend that this has
2417 indeed been resumed. */
ca2163eb
PA
2418 if (!lp->resumed)
2419 return 0;
2420
eb54c8bf
PA
2421 if (!lwp_status_pending_p (lp))
2422 return 0;
2423
15c66dd6
PA
2424 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2425 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525 2426 {
5b6d1e4f 2427 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
9c02b525
PA
2428 CORE_ADDR pc;
2429 int discard = 0;
2430
9c02b525
PA
2431 pc = regcache_read_pc (regcache);
2432
2433 if (pc != lp->stop_pc)
2434 {
9327494e 2435 linux_nat_debug_printf ("PC of %s changed. was=%s, now=%s",
e53c95d4 2436 lp->ptid.to_string ().c_str (),
9327494e
SM
2437 paddress (target_gdbarch (), lp->stop_pc),
2438 paddress (target_gdbarch (), pc));
9c02b525
PA
2439 discard = 1;
2440 }
faf09f01
PA
2441
2442#if !USE_SIGTRAP_SIGINFO
a01bda52 2443 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
9c02b525 2444 {
9327494e 2445 linux_nat_debug_printf ("previous breakpoint of %s, at %s gone",
e53c95d4 2446 lp->ptid.to_string ().c_str (),
9327494e 2447 paddress (target_gdbarch (), lp->stop_pc));
9c02b525
PA
2448
2449 discard = 1;
2450 }
faf09f01 2451#endif
9c02b525
PA
2452
2453 if (discard)
2454 {
9327494e 2455 linux_nat_debug_printf ("pending event of %s cancelled.",
e53c95d4 2456 lp->ptid.to_string ().c_str ());
9c02b525
PA
2457
2458 lp->status = 0;
2459 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2460 return 0;
2461 }
9c02b525
PA
2462 }
2463
eb54c8bf 2464 return 1;
d6b0e80f
AC
2465}
2466
d6b0e80f
AC
2467/* Count the LWP's that have had events. */
2468
2469static int
d3a70e03 2470count_events_callback (struct lwp_info *lp, int *count)
d6b0e80f 2471{
d6b0e80f
AC
2472 gdb_assert (count != NULL);
2473
9c02b525
PA
2474 /* Select only resumed LWPs that have an event pending. */
2475 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2476 (*count)++;
2477
2478 return 0;
2479}
2480
2481/* Select the LWP (if any) that is currently being single-stepped. */
2482
2483static int
d3a70e03 2484select_singlestep_lwp_callback (struct lwp_info *lp)
d6b0e80f 2485{
25289eb2
PA
2486 if (lp->last_resume_kind == resume_step
2487 && lp->status != 0)
d6b0e80f
AC
2488 return 1;
2489 else
2490 return 0;
2491}
2492
8a99810d
PA
2493/* Returns true if LP has a status pending. */
2494
2495static int
2496lwp_status_pending_p (struct lwp_info *lp)
2497{
2498 /* We check for lp->waitstatus in addition to lp->status, because we
2499 can have pending process exits recorded in lp->status and
2500 W_EXITCODE(0,0) happens to be 0. */
183be222 2501 return lp->status != 0 || lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE;
8a99810d
PA
2502}
2503
b90fc188 2504/* Select the Nth LWP that has had an event. */
d6b0e80f
AC
2505
2506static int
d3a70e03 2507select_event_lwp_callback (struct lwp_info *lp, int *selector)
d6b0e80f 2508{
d6b0e80f
AC
2509 gdb_assert (selector != NULL);
2510
9c02b525
PA
2511 /* Select only resumed LWPs that have an event pending. */
2512 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2513 if ((*selector)-- == 0)
2514 return 1;
2515
2516 return 0;
2517}
2518
e7ad2f14
PA
2519/* Called when the LWP stopped for a signal/trap. If it stopped for a
2520 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2521 and save the result in the LWP's stop_reason field. If it stopped
2522 for a breakpoint, decrement the PC if necessary on the lwp's
2523 architecture. */
9c02b525 2524
e7ad2f14
PA
2525static void
2526save_stop_reason (struct lwp_info *lp)
710151dd 2527{
e7ad2f14
PA
2528 struct regcache *regcache;
2529 struct gdbarch *gdbarch;
515630c5 2530 CORE_ADDR pc;
9c02b525 2531 CORE_ADDR sw_bp_pc;
faf09f01
PA
2532#if USE_SIGTRAP_SIGINFO
2533 siginfo_t siginfo;
2534#endif
9c02b525 2535
e7ad2f14
PA
2536 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
2537 gdb_assert (lp->status != 0);
2538
135340af 2539 if (!linux_target->low_status_is_event (lp->status))
e7ad2f14
PA
2540 return;
2541
a9deee17
PA
2542 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
2543 if (inf->starting_up)
2544 return;
2545
5b6d1e4f 2546 regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 2547 gdbarch = regcache->arch ();
e7ad2f14 2548
9c02b525 2549 pc = regcache_read_pc (regcache);
527a273a 2550 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
515630c5 2551
faf09f01
PA
2552#if USE_SIGTRAP_SIGINFO
2553 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2554 {
2555 if (siginfo.si_signo == SIGTRAP)
2556 {
e7ad2f14
PA
2557 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
2558 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2559 {
e7ad2f14
PA
2560 /* The si_code is ambiguous on this arch -- check debug
2561 registers. */
2562 if (!check_stopped_by_watchpoint (lp))
2563 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2564 }
2565 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
2566 {
2567 /* If we determine the LWP stopped for a SW breakpoint,
2568 trust it. Particularly don't check watchpoint
7da6a5b9 2569 registers, because, at least on s390, we'd find
e7ad2f14
PA
2570 stopped-by-watchpoint as long as there's a watchpoint
2571 set. */
faf09f01 2572 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
faf09f01 2573 }
e7ad2f14 2574 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2575 {
e7ad2f14
PA
2576 /* This can indicate either a hardware breakpoint or
2577 hardware watchpoint. Check debug registers. */
2578 if (!check_stopped_by_watchpoint (lp))
2579 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
faf09f01 2580 }
2bf6fb9d
PA
2581 else if (siginfo.si_code == TRAP_TRACE)
2582 {
9327494e 2583 linux_nat_debug_printf ("%s stopped by trace",
e53c95d4 2584 lp->ptid.to_string ().c_str ());
e7ad2f14
PA
2585
2586 /* We may have single stepped an instruction that
2587 triggered a watchpoint. In that case, on some
2588 architectures (such as x86), instead of TRAP_HWBKPT,
2589 si_code indicates TRAP_TRACE, and we need to check
2590 the debug registers separately. */
2591 check_stopped_by_watchpoint (lp);
2bf6fb9d 2592 }
faf09f01
PA
2593 }
2594 }
2595#else
9c02b525 2596 if ((!lp->step || lp->stop_pc == sw_bp_pc)
a01bda52 2597 && software_breakpoint_inserted_here_p (regcache->aspace (),
9c02b525 2598 sw_bp_pc))
710151dd 2599 {
9c02b525
PA
2600 /* The LWP was either continued, or stepped a software
2601 breakpoint instruction. */
e7ad2f14
PA
2602 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2603 }
2604
a01bda52 2605 if (hardware_breakpoint_inserted_here_p (regcache->aspace (), pc))
e7ad2f14
PA
2606 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2607
2608 if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
2609 check_stopped_by_watchpoint (lp);
2610#endif
2611
2612 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2613 {
9327494e 2614 linux_nat_debug_printf ("%s stopped by software breakpoint",
e53c95d4 2615 lp->ptid.to_string ().c_str ());
710151dd
PA
2616
2617 /* Back up the PC if necessary. */
9c02b525
PA
2618 if (pc != sw_bp_pc)
2619 regcache_write_pc (regcache, sw_bp_pc);
515630c5 2620
e7ad2f14
PA
2621 /* Update this so we record the correct stop PC below. */
2622 pc = sw_bp_pc;
710151dd 2623 }
e7ad2f14 2624 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525 2625 {
9327494e 2626 linux_nat_debug_printf ("%s stopped by hardware breakpoint",
e53c95d4 2627 lp->ptid.to_string ().c_str ());
e7ad2f14
PA
2628 }
2629 else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2630 {
9327494e 2631 linux_nat_debug_printf ("%s stopped by hardware watchpoint",
e53c95d4 2632 lp->ptid.to_string ().c_str ());
9c02b525 2633 }
d6b0e80f 2634
e7ad2f14 2635 lp->stop_pc = pc;
d6b0e80f
AC
2636}
2637
faf09f01
PA
2638
2639/* Returns true if the LWP had stopped for a software breakpoint. */
2640
57810aa7 2641bool
f6ac5f3d 2642linux_nat_target::stopped_by_sw_breakpoint ()
faf09f01
PA
2643{
2644 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2645
2646 gdb_assert (lp != NULL);
2647
2648 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2649}
2650
2651/* Implement the supports_stopped_by_sw_breakpoint method. */
2652
57810aa7 2653bool
f6ac5f3d 2654linux_nat_target::supports_stopped_by_sw_breakpoint ()
faf09f01
PA
2655{
2656 return USE_SIGTRAP_SIGINFO;
2657}
2658
2659/* Returns true if the LWP had stopped for a hardware
2660 breakpoint/watchpoint. */
2661
57810aa7 2662bool
f6ac5f3d 2663linux_nat_target::stopped_by_hw_breakpoint ()
faf09f01
PA
2664{
2665 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2666
2667 gdb_assert (lp != NULL);
2668
2669 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2670}
2671
2672/* Implement the supports_stopped_by_hw_breakpoint method. */
2673
57810aa7 2674bool
f6ac5f3d 2675linux_nat_target::supports_stopped_by_hw_breakpoint ()
faf09f01
PA
2676{
2677 return USE_SIGTRAP_SIGINFO;
2678}
2679
d6b0e80f
AC
2680/* Select one LWP out of those that have events pending. */
2681
2682static void
d90e17a7 2683select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2684{
2685 int num_events = 0;
2686 int random_selector;
9c02b525 2687 struct lwp_info *event_lp = NULL;
d6b0e80f 2688
ac264b3b 2689 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2690 (*orig_lp)->status = *status;
2691
9c02b525
PA
2692 /* In all-stop, give preference to the LWP that is being
2693 single-stepped. There will be at most one, and it will be the
2694 LWP that the core is most interested in. If we didn't do this,
2695 then we'd have to handle pending step SIGTRAPs somehow in case
2696 the core later continues the previously-stepped thread, as
2697 otherwise we'd report the pending SIGTRAP then, and the core, not
2698 having stepped the thread, wouldn't understand what the trap was
2699 for, and therefore would report it to the user as a random
2700 signal. */
fbea99ea 2701 if (!target_is_non_stop_p ())
d6b0e80f 2702 {
d3a70e03 2703 event_lp = iterate_over_lwps (filter, select_singlestep_lwp_callback);
9c02b525
PA
2704 if (event_lp != NULL)
2705 {
9327494e 2706 linux_nat_debug_printf ("Select single-step %s",
e53c95d4 2707 event_lp->ptid.to_string ().c_str ());
9c02b525 2708 }
d6b0e80f 2709 }
9c02b525
PA
2710
2711 if (event_lp == NULL)
d6b0e80f 2712 {
9c02b525 2713 /* Pick one at random, out of those which have had events. */
d6b0e80f 2714
9c02b525 2715 /* First see how many events we have. */
d3a70e03
TT
2716 iterate_over_lwps (filter,
2717 [&] (struct lwp_info *info)
2718 {
2719 return count_events_callback (info, &num_events);
2720 });
8bf3b159 2721 gdb_assert (num_events > 0);
d6b0e80f 2722
9c02b525
PA
2723 /* Now randomly pick a LWP out of those that have had
2724 events. */
d6b0e80f
AC
2725 random_selector = (int)
2726 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2727
9327494e
SM
2728 if (num_events > 1)
2729 linux_nat_debug_printf ("Found %d events, selecting #%d",
2730 num_events, random_selector);
d6b0e80f 2731
d3a70e03
TT
2732 event_lp
2733 = (iterate_over_lwps
2734 (filter,
2735 [&] (struct lwp_info *info)
2736 {
2737 return select_event_lwp_callback (info,
2738 &random_selector);
2739 }));
d6b0e80f
AC
2740 }
2741
2742 if (event_lp != NULL)
2743 {
2744 /* Switch the event LWP. */
2745 *orig_lp = event_lp;
2746 *status = event_lp->status;
2747 }
2748
2749 /* Flush the wait status for the event LWP. */
2750 (*orig_lp)->status = 0;
2751}
2752
2753/* Return non-zero if LP has been resumed. */
2754
2755static int
d3a70e03 2756resumed_callback (struct lwp_info *lp)
d6b0e80f
AC
2757{
2758 return lp->resumed;
2759}
2760
02f3fc28 2761/* Check if we should go on and pass this event to common code.
12d9289a 2762
897608ed
SM
2763 If so, save the status to the lwp_info structure associated to LWPID. */
2764
2765static void
9c02b525 2766linux_nat_filter_event (int lwpid, int status)
02f3fc28
PA
2767{
2768 struct lwp_info *lp;
89a5711c 2769 int event = linux_ptrace_get_extended_event (status);
02f3fc28 2770
f2907e49 2771 lp = find_lwp_pid (ptid_t (lwpid));
02f3fc28 2772
1abeb1e9
PA
2773 /* Check for events reported by anything not in our LWP list. */
2774 if (lp == nullptr)
0e5bf2a8 2775 {
1abeb1e9
PA
2776 if (WIFSTOPPED (status))
2777 {
2778 if (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC)
2779 {
2780 /* A non-leader thread exec'ed after we've seen the
2781 leader zombie, and removed it from our lists (in
2782 check_zombie_leaders). The non-leader thread changes
2783 its tid to the tgid. */
2784 linux_nat_debug_printf
2785 ("Re-adding thread group leader LWP %d after exec.",
2786 lwpid);
0e5bf2a8 2787
1abeb1e9
PA
2788 lp = add_lwp (ptid_t (lwpid, lwpid));
2789 lp->stopped = 1;
2790 lp->resumed = 1;
2791 add_thread (linux_target, lp->ptid);
2792 }
2793 else
2794 {
2795 /* A process we are controlling has forked and the new
2796 child's stop was reported to us by the kernel. Save
2797 its PID and go back to waiting for the fork event to
2798 be reported - the stopped process might be returned
2799 from waitpid before or after the fork event is. */
2800 linux_nat_debug_printf
2801 ("Saving LWP %d status %s in stopped_pids list",
2802 lwpid, status_to_str (status).c_str ());
2803 add_to_pid_list (&stopped_pids, lwpid, status);
2804 }
2805 }
2806 else
2807 {
2808 /* Don't report an event for the exit of an LWP not in our
2809 list, i.e. not part of any inferior we're debugging.
2810 This can happen if we detach from a program we originally
6cf20c46
PA
2811 forked and then it exits. However, note that we may have
2812 earlier deleted a leader of an inferior we're debugging,
2813 in check_zombie_leaders. Re-add it back here if so. */
2814 for (inferior *inf : all_inferiors (linux_target))
2815 {
2816 if (inf->pid == lwpid)
2817 {
2818 linux_nat_debug_printf
2819 ("Re-adding thread group leader LWP %d after exit.",
2820 lwpid);
2821
2822 lp = add_lwp (ptid_t (lwpid, lwpid));
2823 lp->resumed = 1;
2824 add_thread (linux_target, lp->ptid);
2825 break;
2826 }
2827 }
1abeb1e9 2828 }
0e5bf2a8 2829
1abeb1e9
PA
2830 if (lp == nullptr)
2831 return;
02f3fc28
PA
2832 }
2833
8817a6f2
PA
2834 /* This LWP is stopped now. (And if dead, this prevents it from
2835 ever being continued.) */
2836 lp->stopped = 1;
2837
8784d563
PA
2838 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2839 {
5b6d1e4f 2840 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
de0d863e 2841 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2842
e38504b3 2843 linux_enable_event_reporting (lp->ptid.lwp (), options);
8784d563
PA
2844 lp->must_set_ptrace_flags = 0;
2845 }
2846
ca2163eb
PA
2847 /* Handle GNU/Linux's syscall SIGTRAPs. */
2848 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2849 {
2850 /* No longer need the sysgood bit. The ptrace event ends up
2851 recorded in lp->waitstatus if we care for it. We can carry
2852 on handling the event like a regular SIGTRAP from here
2853 on. */
2854 status = W_STOPCODE (SIGTRAP);
2855 if (linux_handle_syscall_trap (lp, 0))
897608ed 2856 return;
ca2163eb 2857 }
bfd09d20
JS
2858 else
2859 {
2860 /* Almost all other ptrace-stops are known to be outside of system
2861 calls, with further exceptions in linux_handle_extended_wait. */
2862 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2863 }
02f3fc28 2864
ca2163eb 2865 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2866 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2867 && linux_is_extended_waitstatus (status))
02f3fc28 2868 {
9327494e
SM
2869 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
2870
4dd63d48 2871 if (linux_handle_extended_wait (lp, status))
897608ed 2872 return;
02f3fc28
PA
2873 }
2874
2875 /* Check if the thread has exited. */
9c02b525
PA
2876 if (WIFEXITED (status) || WIFSIGNALED (status))
2877 {
6cf20c46 2878 if (!report_thread_events && !is_leader (lp))
02f3fc28 2879 {
9327494e 2880 linux_nat_debug_printf ("%s exited.",
e53c95d4 2881 lp->ptid.to_string ().c_str ());
9c02b525 2882
6cf20c46 2883 /* If this was not the leader exiting, then the exit signal
4a6ed09b
PA
2884 was not the end of the debugged application and should be
2885 ignored. */
2886 exit_lwp (lp);
897608ed 2887 return;
02f3fc28
PA
2888 }
2889
77598427
PA
2890 /* Note that even if the leader was ptrace-stopped, it can still
2891 exit, if e.g., some other thread brings down the whole
2892 process (calls `exit'). So don't assert that the lwp is
2893 resumed. */
9327494e
SM
2894 linux_nat_debug_printf ("LWP %ld exited (resumed=%d)",
2895 lp->ptid.lwp (), lp->resumed);
02f3fc28 2896
9c02b525
PA
2897 /* Dead LWP's aren't expected to reported a pending sigstop. */
2898 lp->signalled = 0;
2899
2900 /* Store the pending event in the waitstatus, because
2901 W_EXITCODE(0,0) == 0. */
7509b829 2902 lp->waitstatus = host_status_to_waitstatus (status);
897608ed 2903 return;
02f3fc28
PA
2904 }
2905
02f3fc28
PA
2906 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2907 an attempt to stop an LWP. */
2908 if (lp->signalled
2909 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2910 {
02f3fc28
PA
2911 lp->signalled = 0;
2912
2bf6fb9d 2913 if (lp->last_resume_kind == resume_stop)
25289eb2 2914 {
9327494e 2915 linux_nat_debug_printf ("resume_stop SIGSTOP caught for %s.",
e53c95d4 2916 lp->ptid.to_string ().c_str ());
2bf6fb9d
PA
2917 }
2918 else
2919 {
2920 /* This is a delayed SIGSTOP. Filter out the event. */
02f3fc28 2921
9327494e
SM
2922 linux_nat_debug_printf
2923 ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2924 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 2925 lp->ptid.to_string ().c_str ());
02f3fc28 2926
2bf6fb9d 2927 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
25289eb2 2928 gdb_assert (lp->resumed);
897608ed 2929 return;
25289eb2 2930 }
02f3fc28
PA
2931 }
2932
57380f4e
DJ
2933 /* Make sure we don't report a SIGINT that we have already displayed
2934 for another thread. */
2935 if (lp->ignore_sigint
2936 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
2937 {
9327494e 2938 linux_nat_debug_printf ("Delayed SIGINT caught for %s.",
e53c95d4 2939 lp->ptid.to_string ().c_str ());
57380f4e
DJ
2940
2941 /* This is a delayed SIGINT. */
2942 lp->ignore_sigint = 0;
2943
8a99810d 2944 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
9327494e
SM
2945 linux_nat_debug_printf ("%s %s, 0, 0 (discard SIGINT)",
2946 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 2947 lp->ptid.to_string ().c_str ());
57380f4e
DJ
2948 gdb_assert (lp->resumed);
2949
2950 /* Discard the event. */
897608ed 2951 return;
57380f4e
DJ
2952 }
2953
9c02b525
PA
2954 /* Don't report signals that GDB isn't interested in, such as
2955 signals that are neither printed nor stopped upon. Stopping all
7da6a5b9 2956 threads can be a bit time-consuming, so if we want decent
9c02b525
PA
2957 performance with heavily multi-threaded programs, especially when
2958 they're using a high frequency timer, we'd better avoid it if we
2959 can. */
2960 if (WIFSTOPPED (status))
2961 {
2962 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
2963
fbea99ea 2964 if (!target_is_non_stop_p ())
9c02b525
PA
2965 {
2966 /* Only do the below in all-stop, as we currently use SIGSTOP
2967 to implement target_stop (see linux_nat_stop) in
2968 non-stop. */
2969 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
2970 {
2971 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2972 forwarded to the entire process group, that is, all LWPs
2973 will receive it - unless they're using CLONE_THREAD to
2974 share signals. Since we only want to report it once, we
2975 mark it as ignored for all LWPs except this one. */
d3a70e03 2976 iterate_over_lwps (ptid_t (lp->ptid.pid ()), set_ignore_sigint);
9c02b525
PA
2977 lp->ignore_sigint = 0;
2978 }
2979 else
2980 maybe_clear_ignore_sigint (lp);
2981 }
2982
2983 /* When using hardware single-step, we need to report every signal.
c9587f88 2984 Otherwise, signals in pass_mask may be short-circuited
d8c06f22
AB
2985 except signals that might be caused by a breakpoint, or SIGSTOP
2986 if we sent the SIGSTOP and are waiting for it to arrive. */
9c02b525 2987 if (!lp->step
c9587f88 2988 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
d8c06f22 2989 && (WSTOPSIG (status) != SIGSTOP
5b6d1e4f 2990 || !find_thread_ptid (linux_target, lp->ptid)->stop_requested)
c9587f88 2991 && !linux_wstatus_maybe_breakpoint (status))
9c02b525
PA
2992 {
2993 linux_resume_one_lwp (lp, lp->step, signo);
9327494e
SM
2994 linux_nat_debug_printf
2995 ("%s %s, %s (preempt 'handle')",
2996 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 2997 lp->ptid.to_string ().c_str (),
9327494e
SM
2998 (signo != GDB_SIGNAL_0
2999 ? strsignal (gdb_signal_to_host (signo)) : "0"));
897608ed 3000 return;
9c02b525
PA
3001 }
3002 }
3003
02f3fc28
PA
3004 /* An interesting event. */
3005 gdb_assert (lp);
ca2163eb 3006 lp->status = status;
e7ad2f14 3007 save_stop_reason (lp);
02f3fc28
PA
3008}
3009
0e5bf2a8
PA
3010/* Detect zombie thread group leaders, and "exit" them. We can't reap
3011 their exits until all other threads in the group have exited. */
3012
3013static void
3014check_zombie_leaders (void)
3015{
08036331 3016 for (inferior *inf : all_inferiors ())
0e5bf2a8
PA
3017 {
3018 struct lwp_info *leader_lp;
3019
3020 if (inf->pid == 0)
3021 continue;
3022
f2907e49 3023 leader_lp = find_lwp_pid (ptid_t (inf->pid));
0e5bf2a8
PA
3024 if (leader_lp != NULL
3025 /* Check if there are other threads in the group, as we may
6cf20c46
PA
3026 have raced with the inferior simply exiting. Note this
3027 isn't a watertight check. If the inferior is
3028 multi-threaded and is exiting, it may be we see the
3029 leader as zombie before we reap all the non-leader
3030 threads. See comments below. */
0e5bf2a8 3031 && num_lwps (inf->pid) > 1
5f572dec 3032 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8 3033 {
6cf20c46
PA
3034 /* A zombie leader in a multi-threaded program can mean one
3035 of three things:
3036
3037 #1 - Only the leader exited, not the whole program, e.g.,
3038 with pthread_exit. Since we can't reap the leader's exit
3039 status until all other threads are gone and reaped too,
3040 we want to delete the zombie leader right away, as it
3041 can't be debugged, we can't read its registers, etc.
3042 This is the main reason we check for zombie leaders
3043 disappearing.
3044
3045 #2 - The whole thread-group/process exited (a group exit,
3046 via e.g. exit(3), and there is (or will be shortly) an
3047 exit reported for each thread in the process, and then
3048 finally an exit for the leader once the non-leaders are
3049 reaped.
3050
3051 #3 - There are 3 or more threads in the group, and a
3052 thread other than the leader exec'd. See comments on
3053 exec events at the top of the file.
3054
3055 Ideally we would never delete the leader for case #2.
3056 Instead, we want to collect the exit status of each
3057 non-leader thread, and then finally collect the exit
3058 status of the leader as normal and use its exit code as
3059 whole-process exit code. Unfortunately, there's no
3060 race-free way to distinguish cases #1 and #2. We can't
3061 assume the exit events for the non-leaders threads are
3062 already pending in the kernel, nor can we assume the
3063 non-leader threads are in zombie state already. Between
3064 the leader becoming zombie and the non-leaders exiting
3065 and becoming zombie themselves, there's a small time
3066 window, so such a check would be racy. Temporarily
3067 pausing all threads and checking to see if all threads
3068 exit or not before re-resuming them would work in the
3069 case that all threads are running right now, but it
3070 wouldn't work if some thread is currently already
3071 ptrace-stopped, e.g., due to scheduler-locking.
3072
3073 So what we do is we delete the leader anyhow, and then
3074 later on when we see its exit status, we re-add it back.
3075 We also make sure that we only report a whole-process
3076 exit when we see the leader exiting, as opposed to when
3077 the last LWP in the LWP list exits, which can be a
3078 non-leader if we deleted the leader here. */
9327494e 3079 linux_nat_debug_printf ("Thread group leader %d zombie "
6cf20c46
PA
3080 "(it exited, or another thread execd), "
3081 "deleting it.",
9327494e 3082 inf->pid);
0e5bf2a8
PA
3083 exit_lwp (leader_lp);
3084 }
3085 }
3086}
3087
aa01bd36
PA
3088/* Convenience function that is called when the kernel reports an exit
3089 event. This decides whether to report the event to GDB as a
3090 process exit event, a thread exit event, or to suppress the
3091 event. */
3092
3093static ptid_t
3094filter_exit_event (struct lwp_info *event_child,
3095 struct target_waitstatus *ourstatus)
3096{
3097 ptid_t ptid = event_child->ptid;
3098
6cf20c46 3099 if (!is_leader (event_child))
aa01bd36
PA
3100 {
3101 if (report_thread_events)
183be222 3102 ourstatus->set_thread_exited (0);
aa01bd36 3103 else
183be222 3104 ourstatus->set_ignore ();
aa01bd36
PA
3105
3106 exit_lwp (event_child);
3107 }
3108
3109 return ptid;
3110}
3111
d6b0e80f 3112static ptid_t
f6ac5f3d 3113linux_nat_wait_1 (ptid_t ptid, struct target_waitstatus *ourstatus,
b60cea74 3114 target_wait_flags target_options)
d6b0e80f 3115{
fc9b8e47 3116 sigset_t prev_mask;
4b60df3d 3117 enum resume_kind last_resume_kind;
12d9289a 3118 struct lwp_info *lp;
12d9289a 3119 int status;
d6b0e80f 3120
9327494e 3121 linux_nat_debug_printf ("enter");
b84876c2 3122
f973ed9c
DJ
3123 /* The first time we get here after starting a new inferior, we may
3124 not have added it to the LWP list yet - this is the earliest
3125 moment at which we know its PID. */
677c92fe 3126 if (ptid.is_pid () && find_lwp_pid (ptid) == nullptr)
f973ed9c 3127 {
677c92fe 3128 ptid_t lwp_ptid (ptid.pid (), ptid.pid ());
27c9d204 3129
677c92fe
SM
3130 /* Upgrade the main thread's ptid. */
3131 thread_change_ptid (linux_target, ptid, lwp_ptid);
3132 lp = add_initial_lwp (lwp_ptid);
f973ed9c
DJ
3133 lp->resumed = 1;
3134 }
3135
12696c10 3136 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
7feb7d06 3137 block_child_signals (&prev_mask);
d6b0e80f 3138
d6b0e80f 3139 /* First check if there is a LWP with a wait status pending. */
d3a70e03 3140 lp = iterate_over_lwps (ptid, status_callback);
8a99810d 3141 if (lp != NULL)
d6b0e80f 3142 {
9327494e 3143 linux_nat_debug_printf ("Using pending wait status %s for %s.",
8d06918f 3144 status_to_str (lp->status).c_str (),
e53c95d4 3145 lp->ptid.to_string ().c_str ());
d6b0e80f
AC
3146 }
3147
9c02b525
PA
3148 /* But if we don't find a pending event, we'll have to wait. Always
3149 pull all events out of the kernel. We'll randomly select an
3150 event LWP out of all that have events, to prevent starvation. */
7feb7d06 3151
d90e17a7 3152 while (lp == NULL)
d6b0e80f
AC
3153 {
3154 pid_t lwpid;
3155
0e5bf2a8
PA
3156 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3157 quirks:
3158
3159 - If the thread group leader exits while other threads in the
3160 thread group still exist, waitpid(TGID, ...) hangs. That
3161 waitpid won't return an exit status until the other threads
85102364 3162 in the group are reaped.
0e5bf2a8
PA
3163
3164 - When a non-leader thread execs, that thread just vanishes
3165 without reporting an exit (so we'd hang if we waited for it
3166 explicitly in that case). The exec event is reported to
3167 the TGID pid. */
3168
3169 errno = 0;
4a6ed09b 3170 lwpid = my_waitpid (-1, &status, __WALL | WNOHANG);
0e5bf2a8 3171
9327494e
SM
3172 linux_nat_debug_printf ("waitpid(-1, ...) returned %d, %s",
3173 lwpid,
3174 errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3175
d6b0e80f
AC
3176 if (lwpid > 0)
3177 {
9327494e 3178 linux_nat_debug_printf ("waitpid %ld received %s",
8d06918f
SM
3179 (long) lwpid,
3180 status_to_str (status).c_str ());
d6b0e80f 3181
9c02b525 3182 linux_nat_filter_event (lwpid, status);
0e5bf2a8
PA
3183 /* Retry until nothing comes out of waitpid. A single
3184 SIGCHLD can indicate more than one child stopped. */
3185 continue;
d6b0e80f
AC
3186 }
3187
20ba1ce6
PA
3188 /* Now that we've pulled all events out of the kernel, resume
3189 LWPs that don't have an interesting event to report. */
3190 iterate_over_lwps (minus_one_ptid,
d3a70e03
TT
3191 [] (struct lwp_info *info)
3192 {
3193 return resume_stopped_resumed_lwps (info, minus_one_ptid);
3194 });
20ba1ce6
PA
3195
3196 /* ... and find an LWP with a status to report to the core, if
3197 any. */
d3a70e03 3198 lp = iterate_over_lwps (ptid, status_callback);
9c02b525
PA
3199 if (lp != NULL)
3200 break;
3201
0e5bf2a8
PA
3202 /* Check for zombie thread group leaders. Those can't be reaped
3203 until all other threads in the thread group are. */
3204 check_zombie_leaders ();
d6b0e80f 3205
0e5bf2a8
PA
3206 /* If there are no resumed children left, bail. We'd be stuck
3207 forever in the sigsuspend call below otherwise. */
d3a70e03 3208 if (iterate_over_lwps (ptid, resumed_callback) == NULL)
0e5bf2a8 3209 {
9327494e 3210 linux_nat_debug_printf ("exit (no resumed LWP)");
b84876c2 3211
183be222 3212 ourstatus->set_no_resumed ();
b84876c2 3213
0e5bf2a8
PA
3214 restore_child_signals_mask (&prev_mask);
3215 return minus_one_ptid;
d6b0e80f 3216 }
28736962 3217
0e5bf2a8
PA
3218 /* No interesting event to report to the core. */
3219
3220 if (target_options & TARGET_WNOHANG)
3221 {
9327494e 3222 linux_nat_debug_printf ("exit (ignore)");
28736962 3223
183be222 3224 ourstatus->set_ignore ();
28736962
PA
3225 restore_child_signals_mask (&prev_mask);
3226 return minus_one_ptid;
3227 }
d6b0e80f
AC
3228
3229 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3230 gdb_assert (lp == NULL);
0e5bf2a8
PA
3231
3232 /* Block until we get an event reported with SIGCHLD. */
9c3a5d93 3233 wait_for_signal ();
d6b0e80f
AC
3234 }
3235
d6b0e80f
AC
3236 gdb_assert (lp);
3237
ca2163eb
PA
3238 status = lp->status;
3239 lp->status = 0;
3240
fbea99ea 3241 if (!target_is_non_stop_p ())
4c28f408
PA
3242 {
3243 /* Now stop all other LWP's ... */
d3a70e03 3244 iterate_over_lwps (minus_one_ptid, stop_callback);
4c28f408
PA
3245
3246 /* ... and wait until all of them have reported back that
3247 they're no longer running. */
d3a70e03 3248 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
9c02b525
PA
3249 }
3250
3251 /* If we're not waiting for a specific LWP, choose an event LWP from
3252 among those that have had events. Giving equal priority to all
3253 LWPs that have had events helps prevent starvation. */
d7e15655 3254 if (ptid == minus_one_ptid || ptid.is_pid ())
9c02b525
PA
3255 select_event_lwp (ptid, &lp, &status);
3256
3257 gdb_assert (lp != NULL);
3258
3259 /* Now that we've selected our final event LWP, un-adjust its PC if
faf09f01
PA
3260 it was a software breakpoint, and we can't reliably support the
3261 "stopped by software breakpoint" stop reason. */
3262 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3263 && !USE_SIGTRAP_SIGINFO)
9c02b525 3264 {
5b6d1e4f 3265 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 3266 struct gdbarch *gdbarch = regcache->arch ();
527a273a 3267 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4c28f408 3268
9c02b525
PA
3269 if (decr_pc != 0)
3270 {
3271 CORE_ADDR pc;
d6b0e80f 3272
9c02b525
PA
3273 pc = regcache_read_pc (regcache);
3274 regcache_write_pc (regcache, pc + decr_pc);
3275 }
3276 }
e3e9f5a2 3277
9c02b525
PA
3278 /* We'll need this to determine whether to report a SIGSTOP as
3279 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3280 clears it. */
3281 last_resume_kind = lp->last_resume_kind;
4b60df3d 3282
fbea99ea 3283 if (!target_is_non_stop_p ())
9c02b525 3284 {
e3e9f5a2
PA
3285 /* In all-stop, from the core's perspective, all LWPs are now
3286 stopped until a new resume action is sent over. */
d3a70e03 3287 iterate_over_lwps (minus_one_ptid, resume_clear_callback);
e3e9f5a2
PA
3288 }
3289 else
25289eb2 3290 {
d3a70e03 3291 resume_clear_callback (lp);
25289eb2 3292 }
d6b0e80f 3293
135340af 3294 if (linux_target->low_status_is_event (status))
d6b0e80f 3295 {
9327494e 3296 linux_nat_debug_printf ("trap ptid is %s.",
e53c95d4 3297 lp->ptid.to_string ().c_str ());
d6b0e80f 3298 }
d6b0e80f 3299
183be222 3300 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
d6b0e80f
AC
3301 {
3302 *ourstatus = lp->waitstatus;
183be222 3303 lp->waitstatus.set_ignore ();
d6b0e80f
AC
3304 }
3305 else
7509b829 3306 *ourstatus = host_status_to_waitstatus (status);
d6b0e80f 3307
9327494e 3308 linux_nat_debug_printf ("exit");
b84876c2 3309
7feb7d06 3310 restore_child_signals_mask (&prev_mask);
1e225492 3311
4b60df3d 3312 if (last_resume_kind == resume_stop
183be222 3313 && ourstatus->kind () == TARGET_WAITKIND_STOPPED
25289eb2
PA
3314 && WSTOPSIG (status) == SIGSTOP)
3315 {
3316 /* A thread that has been requested to stop by GDB with
3317 target_stop, and it stopped cleanly, so report as SIG0. The
3318 use of SIGSTOP is an implementation detail. */
183be222 3319 ourstatus->set_stopped (GDB_SIGNAL_0);
25289eb2
PA
3320 }
3321
183be222
SM
3322 if (ourstatus->kind () == TARGET_WAITKIND_EXITED
3323 || ourstatus->kind () == TARGET_WAITKIND_SIGNALLED)
1e225492
JK
3324 lp->core = -1;
3325 else
2e794194 3326 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3327
183be222 3328 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
aa01bd36
PA
3329 return filter_exit_event (lp, ourstatus);
3330
f973ed9c 3331 return lp->ptid;
d6b0e80f
AC
3332}
3333
e3e9f5a2
PA
3334/* Resume LWPs that are currently stopped without any pending status
3335 to report, but are resumed from the core's perspective. */
3336
3337static int
d3a70e03 3338resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid)
e3e9f5a2 3339{
4dd63d48
PA
3340 if (!lp->stopped)
3341 {
9327494e 3342 linux_nat_debug_printf ("NOT resuming LWP %s, not stopped",
e53c95d4 3343 lp->ptid.to_string ().c_str ());
4dd63d48
PA
3344 }
3345 else if (!lp->resumed)
3346 {
9327494e 3347 linux_nat_debug_printf ("NOT resuming LWP %s, not resumed",
e53c95d4 3348 lp->ptid.to_string ().c_str ());
4dd63d48
PA
3349 }
3350 else if (lwp_status_pending_p (lp))
3351 {
9327494e 3352 linux_nat_debug_printf ("NOT resuming LWP %s, has pending status",
e53c95d4 3353 lp->ptid.to_string ().c_str ());
4dd63d48
PA
3354 }
3355 else
e3e9f5a2 3356 {
5b6d1e4f 3357 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 3358 struct gdbarch *gdbarch = regcache->arch ();
336060f3 3359
a70b8144 3360 try
e3e9f5a2 3361 {
23f238d3
PA
3362 CORE_ADDR pc = regcache_read_pc (regcache);
3363 int leave_stopped = 0;
e3e9f5a2 3364
23f238d3
PA
3365 /* Don't bother if there's a breakpoint at PC that we'd hit
3366 immediately, and we're not waiting for this LWP. */
d3a70e03 3367 if (!lp->ptid.matches (wait_ptid))
23f238d3 3368 {
a01bda52 3369 if (breakpoint_inserted_here_p (regcache->aspace (), pc))
23f238d3
PA
3370 leave_stopped = 1;
3371 }
e3e9f5a2 3372
23f238d3
PA
3373 if (!leave_stopped)
3374 {
9327494e
SM
3375 linux_nat_debug_printf
3376 ("resuming stopped-resumed LWP %s at %s: step=%d",
e53c95d4 3377 lp->ptid.to_string ().c_str (), paddress (gdbarch, pc),
9327494e 3378 lp->step);
23f238d3
PA
3379
3380 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3381 }
3382 }
230d2906 3383 catch (const gdb_exception_error &ex)
23f238d3
PA
3384 {
3385 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 3386 throw;
23f238d3 3387 }
e3e9f5a2
PA
3388 }
3389
3390 return 0;
3391}
3392
f6ac5f3d
PA
3393ptid_t
3394linux_nat_target::wait (ptid_t ptid, struct target_waitstatus *ourstatus,
b60cea74 3395 target_wait_flags target_options)
7feb7d06
PA
3396{
3397 ptid_t event_ptid;
3398
e53c95d4 3399 linux_nat_debug_printf ("[%s], [%s]", ptid.to_string ().c_str (),
9327494e 3400 target_options_to_string (target_options).c_str ());
7feb7d06
PA
3401
3402 /* Flush the async file first. */
d9d41e78 3403 if (target_is_async_p ())
7feb7d06
PA
3404 async_file_flush ();
3405
e3e9f5a2
PA
3406 /* Resume LWPs that are currently stopped without any pending status
3407 to report, but are resumed from the core's perspective. LWPs get
3408 in this state if we find them stopping at a time we're not
3409 interested in reporting the event (target_wait on a
3410 specific_process, for example, see linux_nat_wait_1), and
3411 meanwhile the event became uninteresting. Don't bother resuming
3412 LWPs we're not going to wait for if they'd stop immediately. */
fbea99ea 3413 if (target_is_non_stop_p ())
d3a70e03
TT
3414 iterate_over_lwps (minus_one_ptid,
3415 [=] (struct lwp_info *info)
3416 {
3417 return resume_stopped_resumed_lwps (info, ptid);
3418 });
e3e9f5a2 3419
f6ac5f3d 3420 event_ptid = linux_nat_wait_1 (ptid, ourstatus, target_options);
7feb7d06
PA
3421
3422 /* If we requested any event, and something came out, assume there
3423 may be more. If we requested a specific lwp or process, also
3424 assume there may be more. */
d9d41e78 3425 if (target_is_async_p ()
183be222
SM
3426 && ((ourstatus->kind () != TARGET_WAITKIND_IGNORE
3427 && ourstatus->kind () != TARGET_WAITKIND_NO_RESUMED)
d7e15655 3428 || ptid != minus_one_ptid))
7feb7d06
PA
3429 async_file_mark ();
3430
7feb7d06
PA
3431 return event_ptid;
3432}
3433
1d2736d4
PA
3434/* Kill one LWP. */
3435
3436static void
3437kill_one_lwp (pid_t pid)
d6b0e80f 3438{
ed731959
JK
3439 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3440
3441 errno = 0;
1d2736d4 3442 kill_lwp (pid, SIGKILL);
9327494e 3443
ed731959 3444 if (debug_linux_nat)
57745c90
PA
3445 {
3446 int save_errno = errno;
3447
9327494e
SM
3448 linux_nat_debug_printf
3449 ("kill (SIGKILL) %ld, 0, 0 (%s)", (long) pid,
3450 save_errno != 0 ? safe_strerror (save_errno) : "OK");
57745c90 3451 }
ed731959
JK
3452
3453 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3454
d6b0e80f 3455 errno = 0;
1d2736d4 3456 ptrace (PTRACE_KILL, pid, 0, 0);
d6b0e80f 3457 if (debug_linux_nat)
57745c90
PA
3458 {
3459 int save_errno = errno;
3460
9327494e
SM
3461 linux_nat_debug_printf
3462 ("PTRACE_KILL %ld, 0, 0 (%s)", (long) pid,
3463 save_errno ? safe_strerror (save_errno) : "OK");
57745c90 3464 }
d6b0e80f
AC
3465}
3466
1d2736d4
PA
3467/* Wait for an LWP to die. */
3468
3469static void
3470kill_wait_one_lwp (pid_t pid)
d6b0e80f 3471{
1d2736d4 3472 pid_t res;
d6b0e80f
AC
3473
3474 /* We must make sure that there are no pending events (delayed
3475 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3476 program doesn't interfere with any following debugging session. */
3477
d6b0e80f
AC
3478 do
3479 {
1d2736d4
PA
3480 res = my_waitpid (pid, NULL, __WALL);
3481 if (res != (pid_t) -1)
d6b0e80f 3482 {
9327494e
SM
3483 linux_nat_debug_printf ("wait %ld received unknown.", (long) pid);
3484
4a6ed09b
PA
3485 /* The Linux kernel sometimes fails to kill a thread
3486 completely after PTRACE_KILL; that goes from the stop
3487 point in do_fork out to the one in get_signal_to_deliver
3488 and waits again. So kill it again. */
1d2736d4 3489 kill_one_lwp (pid);
d6b0e80f
AC
3490 }
3491 }
1d2736d4
PA
3492 while (res == pid);
3493
3494 gdb_assert (res == -1 && errno == ECHILD);
3495}
3496
3497/* Callback for iterate_over_lwps. */
d6b0e80f 3498
1d2736d4 3499static int
d3a70e03 3500kill_callback (struct lwp_info *lp)
1d2736d4 3501{
e38504b3 3502 kill_one_lwp (lp->ptid.lwp ());
d6b0e80f
AC
3503 return 0;
3504}
3505
1d2736d4
PA
3506/* Callback for iterate_over_lwps. */
3507
3508static int
d3a70e03 3509kill_wait_callback (struct lwp_info *lp)
1d2736d4 3510{
e38504b3 3511 kill_wait_one_lwp (lp->ptid.lwp ());
1d2736d4
PA
3512 return 0;
3513}
3514
3515/* Kill the fork children of any threads of inferior INF that are
3516 stopped at a fork event. */
3517
3518static void
3519kill_unfollowed_fork_children (struct inferior *inf)
3520{
08036331
PA
3521 for (thread_info *thread : inf->non_exited_threads ())
3522 {
3523 struct target_waitstatus *ws = &thread->pending_follow;
1d2736d4 3524
183be222
SM
3525 if (ws->kind () == TARGET_WAITKIND_FORKED
3526 || ws->kind () == TARGET_WAITKIND_VFORKED)
08036331 3527 {
183be222 3528 ptid_t child_ptid = ws->child_ptid ();
08036331
PA
3529 int child_pid = child_ptid.pid ();
3530 int child_lwp = child_ptid.lwp ();
3531
3532 kill_one_lwp (child_lwp);
3533 kill_wait_one_lwp (child_lwp);
3534
3535 /* Let the arch-specific native code know this process is
3536 gone. */
3537 linux_target->low_forget_process (child_pid);
3538 }
3539 }
1d2736d4
PA
3540}
3541
f6ac5f3d
PA
3542void
3543linux_nat_target::kill ()
d6b0e80f 3544{
f973ed9c
DJ
3545 /* If we're stopped while forking and we haven't followed yet,
3546 kill the other task. We need to do this first because the
3547 parent will be sleeping if this is a vfork. */
1d2736d4 3548 kill_unfollowed_fork_children (current_inferior ());
f973ed9c
DJ
3549
3550 if (forks_exist_p ())
7feb7d06 3551 linux_fork_killall ();
f973ed9c
DJ
3552 else
3553 {
e99b03dc 3554 ptid_t ptid = ptid_t (inferior_ptid.pid ());
e0881a8e 3555
4c28f408 3556 /* Stop all threads before killing them, since ptrace requires
30baf67b 3557 that the thread is stopped to successfully PTRACE_KILL. */
d3a70e03 3558 iterate_over_lwps (ptid, stop_callback);
4c28f408
PA
3559 /* ... and wait until all of them have reported back that
3560 they're no longer running. */
d3a70e03 3561 iterate_over_lwps (ptid, stop_wait_callback);
4c28f408 3562
f973ed9c 3563 /* Kill all LWP's ... */
d3a70e03 3564 iterate_over_lwps (ptid, kill_callback);
f973ed9c
DJ
3565
3566 /* ... and wait until we've flushed all events. */
d3a70e03 3567 iterate_over_lwps (ptid, kill_wait_callback);
f973ed9c
DJ
3568 }
3569
bc1e6c81 3570 target_mourn_inferior (inferior_ptid);
d6b0e80f
AC
3571}
3572
f6ac5f3d
PA
3573void
3574linux_nat_target::mourn_inferior ()
d6b0e80f 3575{
e99b03dc 3576 int pid = inferior_ptid.pid ();
26cb8b7c
PA
3577
3578 purge_lwp_list (pid);
d6b0e80f 3579
8a89ddbd 3580 close_proc_mem_file (pid);
05c06f31 3581
f973ed9c 3582 if (! forks_exist_p ())
d90e17a7 3583 /* Normal case, no other forks available. */
f6ac5f3d 3584 inf_ptrace_target::mourn_inferior ();
f973ed9c
DJ
3585 else
3586 /* Multi-fork case. The current inferior_ptid has exited, but
3587 there are other viable forks to debug. Delete the exiting
3588 one and context-switch to the first available. */
3589 linux_fork_mourn_inferior ();
26cb8b7c
PA
3590
3591 /* Let the arch-specific native code know this process is gone. */
135340af 3592 linux_target->low_forget_process (pid);
d6b0e80f
AC
3593}
3594
5b009018
PA
3595/* Convert a native/host siginfo object, into/from the siginfo in the
3596 layout of the inferiors' architecture. */
3597
3598static void
a5362b9a 3599siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018 3600{
135340af
PA
3601 /* If the low target didn't do anything, then just do a straight
3602 memcpy. */
3603 if (!linux_target->low_siginfo_fixup (siginfo, inf_siginfo, direction))
5b009018
PA
3604 {
3605 if (direction == 1)
a5362b9a 3606 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 3607 else
a5362b9a 3608 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
3609 }
3610}
3611
9b409511 3612static enum target_xfer_status
f6ac5f3d 3613linux_xfer_siginfo (enum target_object object,
dda83cd7 3614 const char *annex, gdb_byte *readbuf,
9b409511
YQ
3615 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3616 ULONGEST *xfered_len)
4aa995e1 3617{
4aa995e1 3618 int pid;
a5362b9a
TS
3619 siginfo_t siginfo;
3620 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
3621
3622 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3623 gdb_assert (readbuf || writebuf);
3624
e38504b3 3625 pid = inferior_ptid.lwp ();
4aa995e1 3626 if (pid == 0)
e99b03dc 3627 pid = inferior_ptid.pid ();
4aa995e1
PA
3628
3629 if (offset > sizeof (siginfo))
2ed4b548 3630 return TARGET_XFER_E_IO;
4aa995e1
PA
3631
3632 errno = 0;
3633 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3634 if (errno != 0)
2ed4b548 3635 return TARGET_XFER_E_IO;
4aa995e1 3636
5b009018
PA
3637 /* When GDB is built as a 64-bit application, ptrace writes into
3638 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3639 inferior with a 64-bit GDB should look the same as debugging it
3640 with a 32-bit GDB, we need to convert it. GDB core always sees
3641 the converted layout, so any read/write will have to be done
3642 post-conversion. */
3643 siginfo_fixup (&siginfo, inf_siginfo, 0);
3644
4aa995e1
PA
3645 if (offset + len > sizeof (siginfo))
3646 len = sizeof (siginfo) - offset;
3647
3648 if (readbuf != NULL)
5b009018 3649 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3650 else
3651 {
5b009018
PA
3652 memcpy (inf_siginfo + offset, writebuf, len);
3653
3654 /* Convert back to ptrace layout before flushing it out. */
3655 siginfo_fixup (&siginfo, inf_siginfo, 1);
3656
4aa995e1
PA
3657 errno = 0;
3658 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3659 if (errno != 0)
2ed4b548 3660 return TARGET_XFER_E_IO;
4aa995e1
PA
3661 }
3662
9b409511
YQ
3663 *xfered_len = len;
3664 return TARGET_XFER_OK;
4aa995e1
PA
3665}
3666
9b409511 3667static enum target_xfer_status
f6ac5f3d
PA
3668linux_nat_xfer_osdata (enum target_object object,
3669 const char *annex, gdb_byte *readbuf,
3670 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3671 ULONGEST *xfered_len);
3672
f6ac5f3d 3673static enum target_xfer_status
05c06f31
PA
3674linux_proc_xfer_memory_partial (gdb_byte *readbuf, const gdb_byte *writebuf,
3675 ULONGEST offset, LONGEST len, ULONGEST *xfered_len);
f6ac5f3d
PA
3676
3677enum target_xfer_status
3678linux_nat_target::xfer_partial (enum target_object object,
3679 const char *annex, gdb_byte *readbuf,
3680 const gdb_byte *writebuf,
3681 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
d6b0e80f 3682{
4aa995e1 3683 if (object == TARGET_OBJECT_SIGNAL_INFO)
f6ac5f3d 3684 return linux_xfer_siginfo (object, annex, readbuf, writebuf,
9b409511 3685 offset, len, xfered_len);
4aa995e1 3686
c35b1492
PA
3687 /* The target is connected but no live inferior is selected. Pass
3688 this request down to a lower stratum (e.g., the executable
3689 file). */
d7e15655 3690 if (object == TARGET_OBJECT_MEMORY && inferior_ptid == null_ptid)
9b409511 3691 return TARGET_XFER_EOF;
c35b1492 3692
f6ac5f3d
PA
3693 if (object == TARGET_OBJECT_AUXV)
3694 return memory_xfer_auxv (this, object, annex, readbuf, writebuf,
3695 offset, len, xfered_len);
3696
3697 if (object == TARGET_OBJECT_OSDATA)
3698 return linux_nat_xfer_osdata (object, annex, readbuf, writebuf,
3699 offset, len, xfered_len);
d6b0e80f 3700
f6ac5f3d
PA
3701 if (object == TARGET_OBJECT_MEMORY)
3702 {
05c06f31
PA
3703 /* GDB calculates all addresses in the largest possible address
3704 width. The address width must be masked before its final use
3705 by linux_proc_xfer_partial.
3706
3707 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
f6ac5f3d
PA
3708 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
3709
3710 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
3711 offset &= ((ULONGEST) 1 << addr_bit) - 1;
f6ac5f3d 3712
05c06f31
PA
3713 return linux_proc_xfer_memory_partial (readbuf, writebuf,
3714 offset, len, xfered_len);
3715 }
f6ac5f3d
PA
3716
3717 return inf_ptrace_target::xfer_partial (object, annex, readbuf, writebuf,
3718 offset, len, xfered_len);
d6b0e80f
AC
3719}
3720
57810aa7 3721bool
f6ac5f3d 3722linux_nat_target::thread_alive (ptid_t ptid)
28439f5e 3723{
4a6ed09b
PA
3724 /* As long as a PTID is in lwp list, consider it alive. */
3725 return find_lwp_pid (ptid) != NULL;
28439f5e
PA
3726}
3727
8a06aea7
PA
3728/* Implement the to_update_thread_list target method for this
3729 target. */
3730
f6ac5f3d
PA
3731void
3732linux_nat_target::update_thread_list ()
8a06aea7 3733{
4a6ed09b
PA
3734 /* We add/delete threads from the list as clone/exit events are
3735 processed, so just try deleting exited threads still in the
3736 thread list. */
3737 delete_exited_threads ();
a6904d5a
PA
3738
3739 /* Update the processor core that each lwp/thread was last seen
3740 running on. */
901b9821 3741 for (lwp_info *lwp : all_lwps ())
1ad3de98
PA
3742 {
3743 /* Avoid accessing /proc if the thread hasn't run since we last
3744 time we fetched the thread's core. Accessing /proc becomes
3745 noticeably expensive when we have thousands of LWPs. */
3746 if (lwp->core == -1)
3747 lwp->core = linux_common_core_of_thread (lwp->ptid);
3748 }
8a06aea7
PA
3749}
3750
a068643d 3751std::string
f6ac5f3d 3752linux_nat_target::pid_to_str (ptid_t ptid)
d6b0e80f 3753{
15a9e13e 3754 if (ptid.lwp_p ()
e38504b3 3755 && (ptid.pid () != ptid.lwp ()
e99b03dc 3756 || num_lwps (ptid.pid ()) > 1))
a068643d 3757 return string_printf ("LWP %ld", ptid.lwp ());
d6b0e80f
AC
3758
3759 return normal_pid_to_str (ptid);
3760}
3761
f6ac5f3d
PA
3762const char *
3763linux_nat_target::thread_name (struct thread_info *thr)
4694da01 3764{
79efa585 3765 return linux_proc_tid_get_name (thr->ptid);
4694da01
TT
3766}
3767
dba24537
AC
3768/* Accepts an integer PID; Returns a string representing a file that
3769 can be opened to get the symbols for the child process. */
3770
0e90c441 3771const char *
f6ac5f3d 3772linux_nat_target::pid_to_exec_file (int pid)
dba24537 3773{
e0d86d2c 3774 return linux_proc_pid_to_exec_file (pid);
dba24537
AC
3775}
3776
8a89ddbd
PA
3777/* Object representing an /proc/PID/mem open file. We keep one such
3778 file open per inferior.
3779
3780 It might be tempting to think about only ever opening one file at
3781 most for all inferiors, closing/reopening the file as we access
3782 memory of different inferiors, to minimize number of file
3783 descriptors open, which can otherwise run into resource limits.
3784 However, that does not work correctly -- if the inferior execs and
3785 we haven't processed the exec event yet, and, we opened a
3786 /proc/PID/mem file, we will get a mem file accessing the post-exec
3787 address space, thinking we're opening it for the pre-exec address
3788 space. That is dangerous as we can poke memory (e.g. clearing
3789 breakpoints) in the post-exec memory by mistake, corrupting the
3790 inferior. For that reason, we open the mem file as early as
3791 possible, right after spawning, forking or attaching to the
3792 inferior, when the inferior is stopped and thus before it has a
3793 chance of execing.
3794
3795 Note that after opening the file, even if the thread we opened it
3796 for subsequently exits, the open file is still usable for accessing
3797 memory. It's only when the whole process exits or execs that the
3798 file becomes invalid, at which point reads/writes return EOF. */
3799
3800class proc_mem_file
3801{
3802public:
3803 proc_mem_file (ptid_t ptid, int fd)
3804 : m_ptid (ptid), m_fd (fd)
3805 {
3806 gdb_assert (m_fd != -1);
3807 }
05c06f31 3808
8a89ddbd 3809 ~proc_mem_file ()
05c06f31 3810 {
89662f69 3811 linux_nat_debug_printf ("closing fd %d for /proc/%d/task/%ld/mem",
8a89ddbd
PA
3812 m_fd, m_ptid.pid (), m_ptid.lwp ());
3813 close (m_fd);
05c06f31 3814 }
05c06f31 3815
8a89ddbd
PA
3816 DISABLE_COPY_AND_ASSIGN (proc_mem_file);
3817
3818 int fd ()
3819 {
3820 return m_fd;
3821 }
3822
3823private:
3824 /* The LWP this file was opened for. Just for debugging
3825 purposes. */
3826 ptid_t m_ptid;
3827
3828 /* The file descriptor. */
3829 int m_fd = -1;
3830};
3831
3832/* The map between an inferior process id, and the open /proc/PID/mem
3833 file. This is stored in a map instead of in a per-inferior
3834 structure because we need to be able to access memory of processes
3835 which don't have a corresponding struct inferior object. E.g.,
3836 with "detach-on-fork on" (the default), and "follow-fork parent"
3837 (also default), we don't create an inferior for the fork child, but
3838 we still need to remove breakpoints from the fork child's
3839 memory. */
3840static std::unordered_map<int, proc_mem_file> proc_mem_file_map;
3841
3842/* Close the /proc/PID/mem file for PID. */
05c06f31
PA
3843
3844static void
8a89ddbd 3845close_proc_mem_file (pid_t pid)
dba24537 3846{
8a89ddbd 3847 proc_mem_file_map.erase (pid);
05c06f31 3848}
dba24537 3849
8a89ddbd
PA
3850/* Open the /proc/PID/mem file for the process (thread group) of PTID.
3851 We actually open /proc/PID/task/LWP/mem, as that's the LWP we know
3852 exists and is stopped right now. We prefer the
3853 /proc/PID/task/LWP/mem form over /proc/LWP/mem to avoid tid-reuse
3854 races, just in case this is ever called on an already-waited
3855 LWP. */
dba24537 3856
8a89ddbd
PA
3857static void
3858open_proc_mem_file (ptid_t ptid)
05c06f31 3859{
8a89ddbd
PA
3860 auto iter = proc_mem_file_map.find (ptid.pid ());
3861 gdb_assert (iter == proc_mem_file_map.end ());
dba24537 3862
8a89ddbd
PA
3863 char filename[64];
3864 xsnprintf (filename, sizeof filename,
3865 "/proc/%d/task/%ld/mem", ptid.pid (), ptid.lwp ());
3866
3867 int fd = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
05c06f31 3868
8a89ddbd
PA
3869 if (fd == -1)
3870 {
3871 warning (_("opening /proc/PID/mem file for lwp %d.%ld failed: %s (%d)"),
3872 ptid.pid (), ptid.lwp (),
3873 safe_strerror (errno), errno);
3874 return;
05c06f31
PA
3875 }
3876
8a89ddbd
PA
3877 proc_mem_file_map.emplace (std::piecewise_construct,
3878 std::forward_as_tuple (ptid.pid ()),
3879 std::forward_as_tuple (ptid, fd));
3880
9221923c 3881 linux_nat_debug_printf ("opened fd %d for lwp %d.%ld",
8a89ddbd
PA
3882 fd, ptid.pid (), ptid.lwp ());
3883}
3884
3885/* Implement the to_xfer_partial target method using /proc/PID/mem.
3886 Because we can use a single read/write call, this can be much more
3887 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
3888 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running
3889 threads. */
3890
3891static enum target_xfer_status
3892linux_proc_xfer_memory_partial (gdb_byte *readbuf, const gdb_byte *writebuf,
3893 ULONGEST offset, LONGEST len,
3894 ULONGEST *xfered_len)
3895{
3896 ssize_t ret;
3897
3898 auto iter = proc_mem_file_map.find (inferior_ptid.pid ());
3899 if (iter == proc_mem_file_map.end ())
3900 return TARGET_XFER_EOF;
3901
3902 int fd = iter->second.fd ();
3903
3904 gdb_assert (fd != -1);
dba24537 3905
a379284a
AA
3906 /* Use pread64/pwrite64 if available, since they save a syscall and can
3907 handle 64-bit offsets even on 32-bit platforms (for instance, SPARC
3908 debugging a SPARC64 application). */
dba24537 3909#ifdef HAVE_PREAD64
a379284a
AA
3910 ret = (readbuf ? pread64 (fd, readbuf, len, offset)
3911 : pwrite64 (fd, writebuf, len, offset));
dba24537 3912#else
a379284a
AA
3913 ret = lseek (fd, offset, SEEK_SET);
3914 if (ret != -1)
3915 ret = (readbuf ? read (fd, readbuf, len)
3916 : write (fd, writebuf, len));
dba24537 3917#endif
dba24537 3918
05c06f31
PA
3919 if (ret == -1)
3920 {
9221923c 3921 linux_nat_debug_printf ("accessing fd %d for pid %d failed: %s (%d)",
8a89ddbd 3922 fd, inferior_ptid.pid (),
05c06f31 3923 safe_strerror (errno), errno);
284b6bb5 3924 return TARGET_XFER_E_IO;
05c06f31
PA
3925 }
3926 else if (ret == 0)
3927 {
8a89ddbd
PA
3928 /* EOF means the address space is gone, the whole process exited
3929 or execed. */
9221923c 3930 linux_nat_debug_printf ("accessing fd %d for pid %d got EOF",
8a89ddbd 3931 fd, inferior_ptid.pid ());
05c06f31
PA
3932 return TARGET_XFER_EOF;
3933 }
9b409511
YQ
3934 else
3935 {
8a89ddbd 3936 *xfered_len = ret;
9b409511
YQ
3937 return TARGET_XFER_OK;
3938 }
05c06f31 3939}
efcbbd14 3940
dba24537
AC
3941/* Parse LINE as a signal set and add its set bits to SIGS. */
3942
3943static void
3944add_line_to_sigset (const char *line, sigset_t *sigs)
3945{
3946 int len = strlen (line) - 1;
3947 const char *p;
3948 int signum;
3949
3950 if (line[len] != '\n')
8a3fe4f8 3951 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3952
3953 p = line;
3954 signum = len * 4;
3955 while (len-- > 0)
3956 {
3957 int digit;
3958
3959 if (*p >= '0' && *p <= '9')
3960 digit = *p - '0';
3961 else if (*p >= 'a' && *p <= 'f')
3962 digit = *p - 'a' + 10;
3963 else
8a3fe4f8 3964 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3965
3966 signum -= 4;
3967
3968 if (digit & 1)
3969 sigaddset (sigs, signum + 1);
3970 if (digit & 2)
3971 sigaddset (sigs, signum + 2);
3972 if (digit & 4)
3973 sigaddset (sigs, signum + 3);
3974 if (digit & 8)
3975 sigaddset (sigs, signum + 4);
3976
3977 p++;
3978 }
3979}
3980
3981/* Find process PID's pending signals from /proc/pid/status and set
3982 SIGS to match. */
3983
3984void
3e43a32a
MS
3985linux_proc_pending_signals (int pid, sigset_t *pending,
3986 sigset_t *blocked, sigset_t *ignored)
dba24537 3987{
d8d2a3ee 3988 char buffer[PATH_MAX], fname[PATH_MAX];
dba24537
AC
3989
3990 sigemptyset (pending);
3991 sigemptyset (blocked);
3992 sigemptyset (ignored);
cde33bf1 3993 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
d419f42d 3994 gdb_file_up procfile = gdb_fopen_cloexec (fname, "r");
dba24537 3995 if (procfile == NULL)
8a3fe4f8 3996 error (_("Could not open %s"), fname);
dba24537 3997
d419f42d 3998 while (fgets (buffer, PATH_MAX, procfile.get ()) != NULL)
dba24537
AC
3999 {
4000 /* Normal queued signals are on the SigPnd line in the status
4001 file. However, 2.6 kernels also have a "shared" pending
4002 queue for delivering signals to a thread group, so check for
4003 a ShdPnd line also.
4004
4005 Unfortunately some Red Hat kernels include the shared pending
4006 queue but not the ShdPnd status field. */
4007
61012eef 4008 if (startswith (buffer, "SigPnd:\t"))
dba24537 4009 add_line_to_sigset (buffer + 8, pending);
61012eef 4010 else if (startswith (buffer, "ShdPnd:\t"))
dba24537 4011 add_line_to_sigset (buffer + 8, pending);
61012eef 4012 else if (startswith (buffer, "SigBlk:\t"))
dba24537 4013 add_line_to_sigset (buffer + 8, blocked);
61012eef 4014 else if (startswith (buffer, "SigIgn:\t"))
dba24537
AC
4015 add_line_to_sigset (buffer + 8, ignored);
4016 }
dba24537
AC
4017}
4018
9b409511 4019static enum target_xfer_status
f6ac5f3d 4020linux_nat_xfer_osdata (enum target_object object,
e0881a8e 4021 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4022 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4023 ULONGEST *xfered_len)
07e059b5 4024{
07e059b5
VP
4025 gdb_assert (object == TARGET_OBJECT_OSDATA);
4026
9b409511
YQ
4027 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4028 if (*xfered_len == 0)
4029 return TARGET_XFER_EOF;
4030 else
4031 return TARGET_XFER_OK;
07e059b5
VP
4032}
4033
f6ac5f3d
PA
4034std::vector<static_tracepoint_marker>
4035linux_nat_target::static_tracepoint_markers_by_strid (const char *strid)
5808517f
YQ
4036{
4037 char s[IPA_CMD_BUF_SIZE];
e99b03dc 4038 int pid = inferior_ptid.pid ();
5d9310c4 4039 std::vector<static_tracepoint_marker> markers;
256642e8 4040 const char *p = s;
184ea2f7 4041 ptid_t ptid = ptid_t (pid, 0);
5d9310c4 4042 static_tracepoint_marker marker;
5808517f
YQ
4043
4044 /* Pause all */
4045 target_stop (ptid);
4046
4047 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4048 s[sizeof ("qTfSTM")] = 0;
4049
42476b70 4050 agent_run_command (pid, s, strlen (s) + 1);
5808517f 4051
1db93f14
TT
4052 /* Unpause all. */
4053 SCOPE_EXIT { target_continue_no_signal (ptid); };
5808517f
YQ
4054
4055 while (*p++ == 'm')
4056 {
5808517f
YQ
4057 do
4058 {
5d9310c4 4059 parse_static_tracepoint_marker_definition (p, &p, &marker);
5808517f 4060
5d9310c4
SM
4061 if (strid == NULL || marker.str_id == strid)
4062 markers.push_back (std::move (marker));
5808517f
YQ
4063 }
4064 while (*p++ == ','); /* comma-separated list */
4065
4066 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4067 s[sizeof ("qTsSTM")] = 0;
42476b70 4068 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4069 p = s;
4070 }
4071
5808517f
YQ
4072 return markers;
4073}
4074
b84876c2
PA
4075/* target_can_async_p implementation. */
4076
57810aa7 4077bool
f6ac5f3d 4078linux_nat_target::can_async_p ()
b84876c2 4079{
fce6cd34
AB
4080 /* This flag should be checked in the common target.c code. */
4081 gdb_assert (target_async_permitted);
4082
4083 /* Otherwise, this targets is always able to support async mode. */
4084 return true;
b84876c2
PA
4085}
4086
57810aa7 4087bool
f6ac5f3d 4088linux_nat_target::supports_non_stop ()
9908b566 4089{
f80c8ec4 4090 return true;
9908b566
VP
4091}
4092
fbea99ea
PA
4093/* to_always_non_stop_p implementation. */
4094
57810aa7 4095bool
f6ac5f3d 4096linux_nat_target::always_non_stop_p ()
fbea99ea 4097{
f80c8ec4 4098 return true;
fbea99ea
PA
4099}
4100
57810aa7 4101bool
f6ac5f3d 4102linux_nat_target::supports_multi_process ()
d90e17a7 4103{
aee91db3 4104 return true;
d90e17a7
PA
4105}
4106
57810aa7 4107bool
f6ac5f3d 4108linux_nat_target::supports_disable_randomization ()
03583c20 4109{
f80c8ec4 4110 return true;
03583c20
UW
4111}
4112
7feb7d06
PA
4113/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4114 so we notice when any child changes state, and notify the
4115 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4116 above to wait for the arrival of a SIGCHLD. */
4117
b84876c2 4118static void
7feb7d06 4119sigchld_handler (int signo)
b84876c2 4120{
7feb7d06
PA
4121 int old_errno = errno;
4122
01124a23 4123 if (debug_linux_nat)
da5bd37e 4124 gdb_stdlog->write_async_safe ("sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06 4125
b146ba14
JB
4126 if (signo == SIGCHLD)
4127 {
4128 /* Let the event loop know that there are events to handle. */
4129 linux_nat_target::async_file_mark_if_open ();
4130 }
7feb7d06
PA
4131
4132 errno = old_errno;
4133}
4134
4135/* Callback registered with the target events file descriptor. */
4136
4137static void
4138handle_target_event (int error, gdb_client_data client_data)
4139{
b1a35af2 4140 inferior_event_handler (INF_REG_EVENT);
7feb7d06
PA
4141}
4142
b84876c2
PA
4143/* target_async implementation. */
4144
f6ac5f3d
PA
4145void
4146linux_nat_target::async (int enable)
b84876c2 4147{
b146ba14
JB
4148 if ((enable != 0) == is_async_p ())
4149 return;
4150
4151 /* Block child signals while we create/destroy the pipe, as their
4152 handler writes to it. */
4153 gdb::block_signals blocker;
4154
6a3753b3 4155 if (enable)
b84876c2 4156 {
b146ba14
JB
4157 if (!async_file_open ())
4158 internal_error (__FILE__, __LINE__, "creating event pipe failed.");
4159
4160 add_file_handler (async_wait_fd (), handle_target_event, NULL,
4161 "linux-nat");
4162
4163 /* There may be pending events to handle. Tell the event loop
4164 to poll them. */
4165 async_file_mark ();
b84876c2
PA
4166 }
4167 else
4168 {
b146ba14
JB
4169 delete_file_handler (async_wait_fd ());
4170 async_file_close ();
b84876c2 4171 }
b84876c2
PA
4172}
4173
a493e3e2 4174/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4175 event came out. */
4176
4c28f408 4177static int
d3a70e03 4178linux_nat_stop_lwp (struct lwp_info *lwp)
4c28f408 4179{
d90e17a7 4180 if (!lwp->stopped)
252fbfc8 4181 {
9327494e 4182 linux_nat_debug_printf ("running -> suspending %s",
e53c95d4 4183 lwp->ptid.to_string ().c_str ());
252fbfc8 4184
252fbfc8 4185
25289eb2
PA
4186 if (lwp->last_resume_kind == resume_stop)
4187 {
9327494e
SM
4188 linux_nat_debug_printf ("already stopping LWP %ld at GDB's request",
4189 lwp->ptid.lwp ());
25289eb2
PA
4190 return 0;
4191 }
252fbfc8 4192
d3a70e03 4193 stop_callback (lwp);
25289eb2 4194 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
4195 }
4196 else
4197 {
4198 /* Already known to be stopped; do nothing. */
252fbfc8 4199
d90e17a7
PA
4200 if (debug_linux_nat)
4201 {
5b6d1e4f 4202 if (find_thread_ptid (linux_target, lwp->ptid)->stop_requested)
9327494e 4203 linux_nat_debug_printf ("already stopped/stop_requested %s",
e53c95d4 4204 lwp->ptid.to_string ().c_str ());
d90e17a7 4205 else
9327494e 4206 linux_nat_debug_printf ("already stopped/no stop_requested yet %s",
e53c95d4 4207 lwp->ptid.to_string ().c_str ());
252fbfc8
PA
4208 }
4209 }
4c28f408
PA
4210 return 0;
4211}
4212
f6ac5f3d
PA
4213void
4214linux_nat_target::stop (ptid_t ptid)
4c28f408 4215{
b6e52a0b 4216 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
d3a70e03 4217 iterate_over_lwps (ptid, linux_nat_stop_lwp);
bfedc46a
PA
4218}
4219
c0694254
PA
4220/* When requests are passed down from the linux-nat layer to the
4221 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4222 used. The address space pointer is stored in the inferior object,
4223 but the common code that is passed such ptid can't tell whether
4224 lwpid is a "main" process id or not (it assumes so). We reverse
4225 look up the "main" process id from the lwp here. */
4226
f6ac5f3d
PA
4227struct address_space *
4228linux_nat_target::thread_address_space (ptid_t ptid)
c0694254
PA
4229{
4230 struct lwp_info *lwp;
4231 struct inferior *inf;
4232 int pid;
4233
e38504b3 4234 if (ptid.lwp () == 0)
c0694254
PA
4235 {
4236 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4237 tgid. */
4238 lwp = find_lwp_pid (ptid);
e99b03dc 4239 pid = lwp->ptid.pid ();
c0694254
PA
4240 }
4241 else
4242 {
4243 /* A (pid,lwpid,0) ptid. */
e99b03dc 4244 pid = ptid.pid ();
c0694254
PA
4245 }
4246
5b6d1e4f 4247 inf = find_inferior_pid (this, pid);
c0694254
PA
4248 gdb_assert (inf != NULL);
4249 return inf->aspace;
4250}
4251
dc146f7c
VP
4252/* Return the cached value of the processor core for thread PTID. */
4253
f6ac5f3d
PA
4254int
4255linux_nat_target::core_of_thread (ptid_t ptid)
dc146f7c
VP
4256{
4257 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 4258
dc146f7c
VP
4259 if (info)
4260 return info->core;
4261 return -1;
4262}
4263
7a6a1731
GB
4264/* Implementation of to_filesystem_is_local. */
4265
57810aa7 4266bool
f6ac5f3d 4267linux_nat_target::filesystem_is_local ()
7a6a1731
GB
4268{
4269 struct inferior *inf = current_inferior ();
4270
4271 if (inf->fake_pid_p || inf->pid == 0)
57810aa7 4272 return true;
7a6a1731
GB
4273
4274 return linux_ns_same (inf->pid, LINUX_NS_MNT);
4275}
4276
4277/* Convert the INF argument passed to a to_fileio_* method
4278 to a process ID suitable for passing to its corresponding
4279 linux_mntns_* function. If INF is non-NULL then the
4280 caller is requesting the filesystem seen by INF. If INF
4281 is NULL then the caller is requesting the filesystem seen
4282 by the GDB. We fall back to GDB's filesystem in the case
4283 that INF is non-NULL but its PID is unknown. */
4284
4285static pid_t
4286linux_nat_fileio_pid_of (struct inferior *inf)
4287{
4288 if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4289 return getpid ();
4290 else
4291 return inf->pid;
4292}
4293
4294/* Implementation of to_fileio_open. */
4295
f6ac5f3d
PA
4296int
4297linux_nat_target::fileio_open (struct inferior *inf, const char *filename,
4298 int flags, int mode, int warn_if_slow,
4299 int *target_errno)
7a6a1731
GB
4300{
4301 int nat_flags;
4302 mode_t nat_mode;
4303 int fd;
4304
4305 if (fileio_to_host_openflags (flags, &nat_flags) == -1
4306 || fileio_to_host_mode (mode, &nat_mode) == -1)
4307 {
4308 *target_errno = FILEIO_EINVAL;
4309 return -1;
4310 }
4311
4312 fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
4313 filename, nat_flags, nat_mode);
4314 if (fd == -1)
4315 *target_errno = host_to_fileio_error (errno);
4316
4317 return fd;
4318}
4319
4320/* Implementation of to_fileio_readlink. */
4321
f6ac5f3d
PA
4322gdb::optional<std::string>
4323linux_nat_target::fileio_readlink (struct inferior *inf, const char *filename,
4324 int *target_errno)
7a6a1731
GB
4325{
4326 char buf[PATH_MAX];
4327 int len;
7a6a1731
GB
4328
4329 len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
4330 filename, buf, sizeof (buf));
4331 if (len < 0)
4332 {
4333 *target_errno = host_to_fileio_error (errno);
e0d3522b 4334 return {};
7a6a1731
GB
4335 }
4336
e0d3522b 4337 return std::string (buf, len);
7a6a1731
GB
4338}
4339
4340/* Implementation of to_fileio_unlink. */
4341
f6ac5f3d
PA
4342int
4343linux_nat_target::fileio_unlink (struct inferior *inf, const char *filename,
4344 int *target_errno)
7a6a1731
GB
4345{
4346 int ret;
4347
4348 ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
4349 filename);
4350 if (ret == -1)
4351 *target_errno = host_to_fileio_error (errno);
4352
4353 return ret;
4354}
4355
aa01bd36
PA
4356/* Implementation of the to_thread_events method. */
4357
f6ac5f3d
PA
4358void
4359linux_nat_target::thread_events (int enable)
aa01bd36
PA
4360{
4361 report_thread_events = enable;
4362}
4363
f6ac5f3d
PA
4364linux_nat_target::linux_nat_target ()
4365{
f973ed9c
DJ
4366 /* We don't change the stratum; this target will sit at
4367 process_stratum and thread_db will set at thread_stratum. This
4368 is a little strange, since this is a multi-threaded-capable
4369 target, but we want to be on the stack below thread_db, and we
4370 also want to be used for single-threaded processes. */
f973ed9c
DJ
4371}
4372
f865ee35
JK
4373/* See linux-nat.h. */
4374
4375int
4376linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
9f0bdab8 4377{
da559b09 4378 int pid;
9f0bdab8 4379
e38504b3 4380 pid = ptid.lwp ();
da559b09 4381 if (pid == 0)
e99b03dc 4382 pid = ptid.pid ();
f865ee35 4383
da559b09
JK
4384 errno = 0;
4385 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4386 if (errno != 0)
4387 {
4388 memset (siginfo, 0, sizeof (*siginfo));
4389 return 0;
4390 }
f865ee35 4391 return 1;
9f0bdab8
DJ
4392}
4393
7b669087
GB
4394/* See nat/linux-nat.h. */
4395
4396ptid_t
4397current_lwp_ptid (void)
4398{
15a9e13e 4399 gdb_assert (inferior_ptid.lwp_p ());
7b669087
GB
4400 return inferior_ptid;
4401}
4402
6c265988 4403void _initialize_linux_nat ();
d6b0e80f 4404void
6c265988 4405_initialize_linux_nat ()
d6b0e80f 4406{
8864ef42 4407 add_setshow_boolean_cmd ("linux-nat", class_maintenance,
b6e52a0b
AB
4408 &debug_linux_nat, _("\
4409Set debugging of GNU/Linux native target."), _(" \
4410Show debugging of GNU/Linux native target."), _(" \
4411When on, print debug messages relating to the GNU/Linux native target."),
4412 nullptr,
4413 show_debug_linux_nat,
4414 &setdebuglist, &showdebuglist);
b84876c2 4415
7a6a1731
GB
4416 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
4417 &debug_linux_namespaces, _("\
4418Set debugging of GNU/Linux namespaces module."), _("\
4419Show debugging of GNU/Linux namespaces module."), _("\
4420Enables printf debugging output."),
4421 NULL,
4422 NULL,
4423 &setdebuglist, &showdebuglist);
4424
7feb7d06
PA
4425 /* Install a SIGCHLD handler. */
4426 sigchld_action.sa_handler = sigchld_handler;
4427 sigemptyset (&sigchld_action.sa_mask);
4428 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
4429
4430 /* Make it the default. */
7feb7d06 4431 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
4432
4433 /* Make sure we don't block SIGCHLD during a sigsuspend. */
21987b9c 4434 gdb_sigmask (SIG_SETMASK, NULL, &suspend_mask);
d6b0e80f
AC
4435 sigdelset (&suspend_mask, SIGCHLD);
4436
7feb7d06 4437 sigemptyset (&blocked_mask);
774113b0
PA
4438
4439 lwp_lwpid_htab_create ();
d6b0e80f
AC
4440}
4441\f
4442
4443/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4444 the GNU/Linux Threads library and therefore doesn't really belong
4445 here. */
4446
089436f7
TV
4447/* NPTL reserves the first two RT signals, but does not provide any
4448 way for the debugger to query the signal numbers - fortunately
4449 they don't change. */
4450static int lin_thread_signals[] = { __SIGRTMIN, __SIGRTMIN + 1 };
d6b0e80f 4451
089436f7
TV
4452/* See linux-nat.h. */
4453
4454unsigned int
4455lin_thread_get_thread_signal_num (void)
d6b0e80f 4456{
089436f7
TV
4457 return sizeof (lin_thread_signals) / sizeof (lin_thread_signals[0]);
4458}
d6b0e80f 4459
089436f7
TV
4460/* See linux-nat.h. */
4461
4462int
4463lin_thread_get_thread_signal (unsigned int i)
4464{
4465 gdb_assert (i < lin_thread_get_thread_signal_num ());
4466 return lin_thread_signals[i];
d6b0e80f 4467}