]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/linux-nat.c
gdb/linux-nat: use l linux_nat_get_siginfo in linux_xfer_siginfo
[thirdparty/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
4a94e368 3 Copyright (C) 2001-2022 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
45741a9c 22#include "infrun.h"
3993f6b1 23#include "target.h"
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
268a13a5 26#include "gdbsupport/gdb_wait.h"
d6b0e80f
AC
27#include <unistd.h>
28#include <sys/syscall.h>
5826e159 29#include "nat/gdb_ptrace.h"
0274a8ce 30#include "linux-nat.h"
125f8a3d
GB
31#include "nat/linux-ptrace.h"
32#include "nat/linux-procfs.h"
8cc73a39 33#include "nat/linux-personality.h"
ac264b3b 34#include "linux-fork.h"
d6b0e80f
AC
35#include "gdbthread.h"
36#include "gdbcmd.h"
37#include "regcache.h"
4f844a66 38#include "regset.h"
dab06dbe 39#include "inf-child.h"
10d6c8cd
DJ
40#include "inf-ptrace.h"
41#include "auxv.h"
1777feb0 42#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
43#include "elf-bfd.h" /* for elfcore_write_* */
44#include "gregset.h" /* for gregset */
45#include "gdbcore.h" /* for get_exec_file */
46#include <ctype.h> /* for isdigit */
53ce3c39 47#include <sys/stat.h> /* for struct stat */
dba24537 48#include <fcntl.h> /* for O_RDONLY */
b84876c2 49#include "inf-loop.h"
400b5eca 50#include "gdbsupport/event-loop.h"
b84876c2 51#include "event-top.h"
07e059b5
VP
52#include <pwd.h>
53#include <sys/types.h>
2978b111 54#include <dirent.h>
07e059b5 55#include "xml-support.h"
efcbbd14 56#include <sys/vfs.h>
6c95b8df 57#include "solib.h"
125f8a3d 58#include "nat/linux-osdata.h"
6432734d 59#include "linux-tdep.h"
7dcd53a0 60#include "symfile.h"
268a13a5 61#include "gdbsupport/agent.h"
5808517f 62#include "tracepoint.h"
268a13a5 63#include "gdbsupport/buffer.h"
6ecd4729 64#include "target-descriptions.h"
268a13a5 65#include "gdbsupport/filestuff.h"
77e371c0 66#include "objfiles.h"
7a6a1731 67#include "nat/linux-namespaces.h"
b146ba14 68#include "gdbsupport/block-signals.h"
268a13a5
TT
69#include "gdbsupport/fileio.h"
70#include "gdbsupport/scope-exit.h"
21987b9c 71#include "gdbsupport/gdb-sigmask.h"
ba988419 72#include "gdbsupport/common-debug.h"
8a89ddbd 73#include <unordered_map>
efcbbd14 74
1777feb0 75/* This comment documents high-level logic of this file.
8a77dff3
VP
76
77Waiting for events in sync mode
78===============================
79
4a6ed09b
PA
80When waiting for an event in a specific thread, we just use waitpid,
81passing the specific pid, and not passing WNOHANG.
82
83When waiting for an event in all threads, waitpid is not quite good:
84
85- If the thread group leader exits while other threads in the thread
86 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
87 return an exit status until the other threads in the group are
88 reaped.
89
90- When a non-leader thread execs, that thread just vanishes without
91 reporting an exit (so we'd hang if we waited for it explicitly in
92 that case). The exec event is instead reported to the TGID pid.
93
94The solution is to always use -1 and WNOHANG, together with
95sigsuspend.
96
97First, we use non-blocking waitpid to check for events. If nothing is
98found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
99it means something happened to a child process. As soon as we know
100there's an event, we get back to calling nonblocking waitpid.
101
102Note that SIGCHLD should be blocked between waitpid and sigsuspend
103calls, so that we don't miss a signal. If SIGCHLD arrives in between,
104when it's blocked, the signal becomes pending and sigsuspend
105immediately notices it and returns.
106
107Waiting for events in async mode (TARGET_WNOHANG)
108=================================================
8a77dff3 109
7feb7d06
PA
110In async mode, GDB should always be ready to handle both user input
111and target events, so neither blocking waitpid nor sigsuspend are
112viable options. Instead, we should asynchronously notify the GDB main
113event loop whenever there's an unprocessed event from the target. We
114detect asynchronous target events by handling SIGCHLD signals. To
c150bdf0
JB
115notify the event loop about target events, an event pipe is used
116--- the pipe is registered as waitable event source in the event loop,
7feb7d06 117the event loop select/poll's on the read end of this pipe (as well on
c150bdf0
JB
118other event sources, e.g., stdin), and the SIGCHLD handler marks the
119event pipe to raise an event. This is more portable than relying on
7feb7d06
PA
120pselect/ppoll, since on kernels that lack those syscalls, libc
121emulates them with select/poll+sigprocmask, and that is racy
122(a.k.a. plain broken).
123
124Obviously, if we fail to notify the event loop if there's a target
125event, it's bad. OTOH, if we notify the event loop when there's no
126event from the target, linux_nat_wait will detect that there's no real
127event to report, and return event of type TARGET_WAITKIND_IGNORE.
128This is mostly harmless, but it will waste time and is better avoided.
129
130The main design point is that every time GDB is outside linux-nat.c,
131we have a SIGCHLD handler installed that is called when something
132happens to the target and notifies the GDB event loop. Whenever GDB
133core decides to handle the event, and calls into linux-nat.c, we
134process things as in sync mode, except that the we never block in
135sigsuspend.
136
137While processing an event, we may end up momentarily blocked in
138waitpid calls. Those waitpid calls, while blocking, are guarantied to
139return quickly. E.g., in all-stop mode, before reporting to the core
140that an LWP hit a breakpoint, all LWPs are stopped by sending them
141SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
142Note that this is different from blocking indefinitely waiting for the
143next event --- here, we're already handling an event.
8a77dff3
VP
144
145Use of signals
146==============
147
148We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
149signal is not entirely significant; we just need for a signal to be delivered,
150so that we can intercept it. SIGSTOP's advantage is that it can not be
151blocked. A disadvantage is that it is not a real-time signal, so it can only
152be queued once; we do not keep track of other sources of SIGSTOP.
153
154Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
155use them, because they have special behavior when the signal is generated -
156not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
157kills the entire thread group.
158
159A delivered SIGSTOP would stop the entire thread group, not just the thread we
160tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
161cancel it (by PTRACE_CONT without passing SIGSTOP).
162
163We could use a real-time signal instead. This would solve those problems; we
164could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
165But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
166generates it, and there are races with trying to find a signal that is not
4a6ed09b
PA
167blocked.
168
169Exec events
170===========
171
172The case of a thread group (process) with 3 or more threads, and a
173thread other than the leader execs is worth detailing:
174
175On an exec, the Linux kernel destroys all threads except the execing
176one in the thread group, and resets the execing thread's tid to the
177tgid. No exit notification is sent for the execing thread -- from the
178ptracer's perspective, it appears as though the execing thread just
179vanishes. Until we reap all other threads except the leader and the
180execing thread, the leader will be zombie, and the execing thread will
181be in `D (disc sleep)' state. As soon as all other threads are
182reaped, the execing thread changes its tid to the tgid, and the
183previous (zombie) leader vanishes, giving place to the "new"
184leader. */
a0ef4274 185
dba24537
AC
186#ifndef O_LARGEFILE
187#define O_LARGEFILE 0
188#endif
0274a8ce 189
f6ac5f3d
PA
190struct linux_nat_target *linux_target;
191
433bbbf8 192/* Does the current host support PTRACE_GETREGSET? */
0bdb2f78 193enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
433bbbf8 194
b6e52a0b
AB
195/* When true, print debug messages relating to the linux native target. */
196
197static bool debug_linux_nat;
198
8864ef42 199/* Implement 'show debug linux-nat'. */
b6e52a0b 200
920d2a44
AC
201static void
202show_debug_linux_nat (struct ui_file *file, int from_tty,
203 struct cmd_list_element *c, const char *value)
204{
6cb06a8c
TT
205 gdb_printf (file, _("Debugging of GNU/Linux native targets is %s.\n"),
206 value);
920d2a44 207}
d6b0e80f 208
17417fb0 209/* Print a linux-nat debug statement. */
9327494e
SM
210
211#define linux_nat_debug_printf(fmt, ...) \
74b773fc 212 debug_prefixed_printf_cond (debug_linux_nat, "linux-nat", fmt, ##__VA_ARGS__)
9327494e 213
b6e52a0b
AB
214/* Print "linux-nat" enter/exit debug statements. */
215
216#define LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT \
217 scoped_debug_enter_exit (debug_linux_nat, "linux-nat")
218
ae087d01
DJ
219struct simple_pid_list
220{
221 int pid;
3d799a95 222 int status;
ae087d01
DJ
223 struct simple_pid_list *next;
224};
05c309a8 225static struct simple_pid_list *stopped_pids;
ae087d01 226
aa01bd36
PA
227/* Whether target_thread_events is in effect. */
228static int report_thread_events;
229
7feb7d06
PA
230static int kill_lwp (int lwpid, int signo);
231
d3a70e03 232static int stop_callback (struct lwp_info *lp);
7feb7d06
PA
233
234static void block_child_signals (sigset_t *prev_mask);
235static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
236
237struct lwp_info;
238static struct lwp_info *add_lwp (ptid_t ptid);
239static void purge_lwp_list (int pid);
4403d8e9 240static void delete_lwp (ptid_t ptid);
2277426b
PA
241static struct lwp_info *find_lwp_pid (ptid_t ptid);
242
8a99810d
PA
243static int lwp_status_pending_p (struct lwp_info *lp);
244
e7ad2f14
PA
245static void save_stop_reason (struct lwp_info *lp);
246
1bcb0708 247static bool proc_mem_file_is_writable ();
8a89ddbd
PA
248static void close_proc_mem_file (pid_t pid);
249static void open_proc_mem_file (ptid_t ptid);
05c06f31 250
6cf20c46
PA
251/* Return TRUE if LWP is the leader thread of the process. */
252
253static bool
254is_leader (lwp_info *lp)
255{
256 return lp->ptid.pid () == lp->ptid.lwp ();
257}
258
cff068da
GB
259\f
260/* LWP accessors. */
261
262/* See nat/linux-nat.h. */
263
264ptid_t
265ptid_of_lwp (struct lwp_info *lwp)
266{
267 return lwp->ptid;
268}
269
270/* See nat/linux-nat.h. */
271
4b134ca1
GB
272void
273lwp_set_arch_private_info (struct lwp_info *lwp,
274 struct arch_lwp_info *info)
275{
276 lwp->arch_private = info;
277}
278
279/* See nat/linux-nat.h. */
280
281struct arch_lwp_info *
282lwp_arch_private_info (struct lwp_info *lwp)
283{
284 return lwp->arch_private;
285}
286
287/* See nat/linux-nat.h. */
288
cff068da
GB
289int
290lwp_is_stopped (struct lwp_info *lwp)
291{
292 return lwp->stopped;
293}
294
295/* See nat/linux-nat.h. */
296
297enum target_stop_reason
298lwp_stop_reason (struct lwp_info *lwp)
299{
300 return lwp->stop_reason;
301}
302
0e00e962
AA
303/* See nat/linux-nat.h. */
304
305int
306lwp_is_stepping (struct lwp_info *lwp)
307{
308 return lwp->step;
309}
310
ae087d01
DJ
311\f
312/* Trivial list manipulation functions to keep track of a list of
313 new stopped processes. */
314static void
3d799a95 315add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01 316{
8d749320 317 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
e0881a8e 318
ae087d01 319 new_pid->pid = pid;
3d799a95 320 new_pid->status = status;
ae087d01
DJ
321 new_pid->next = *listp;
322 *listp = new_pid;
323}
324
325static int
46a96992 326pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
327{
328 struct simple_pid_list **p;
329
330 for (p = listp; *p != NULL; p = &(*p)->next)
331 if ((*p)->pid == pid)
332 {
333 struct simple_pid_list *next = (*p)->next;
e0881a8e 334
46a96992 335 *statusp = (*p)->status;
ae087d01
DJ
336 xfree (*p);
337 *p = next;
338 return 1;
339 }
340 return 0;
341}
342
de0d863e
DB
343/* Return the ptrace options that we want to try to enable. */
344
345static int
346linux_nat_ptrace_options (int attached)
347{
348 int options = 0;
349
350 if (!attached)
351 options |= PTRACE_O_EXITKILL;
352
353 options |= (PTRACE_O_TRACESYSGOOD
354 | PTRACE_O_TRACEVFORKDONE
355 | PTRACE_O_TRACEVFORK
356 | PTRACE_O_TRACEFORK
357 | PTRACE_O_TRACEEXEC);
358
359 return options;
360}
361
1b919490
VB
362/* Initialize ptrace and procfs warnings and check for supported
363 ptrace features given PID.
beed38b8
JB
364
365 ATTACHED should be nonzero iff we attached to the inferior. */
3993f6b1
DJ
366
367static void
1b919490 368linux_init_ptrace_procfs (pid_t pid, int attached)
3993f6b1 369{
de0d863e
DB
370 int options = linux_nat_ptrace_options (attached);
371
372 linux_enable_event_reporting (pid, options);
96d7229d 373 linux_ptrace_init_warnings ();
1b919490 374 linux_proc_init_warnings ();
4de4c07c
DJ
375}
376
f6ac5f3d
PA
377linux_nat_target::~linux_nat_target ()
378{}
379
380void
381linux_nat_target::post_attach (int pid)
4de4c07c 382{
1b919490 383 linux_init_ptrace_procfs (pid, 1);
4de4c07c
DJ
384}
385
200fd287
AB
386/* Implement the virtual inf_ptrace_target::post_startup_inferior method. */
387
f6ac5f3d
PA
388void
389linux_nat_target::post_startup_inferior (ptid_t ptid)
4de4c07c 390{
1b919490 391 linux_init_ptrace_procfs (ptid.pid (), 0);
4de4c07c
DJ
392}
393
4403d8e9
JK
394/* Return the number of known LWPs in the tgid given by PID. */
395
396static int
397num_lwps (int pid)
398{
399 int count = 0;
4403d8e9 400
901b9821 401 for (const lwp_info *lp ATTRIBUTE_UNUSED : all_lwps ())
e99b03dc 402 if (lp->ptid.pid () == pid)
4403d8e9
JK
403 count++;
404
405 return count;
406}
407
169bb27b 408/* Deleter for lwp_info unique_ptr specialisation. */
4403d8e9 409
169bb27b 410struct lwp_deleter
4403d8e9 411{
169bb27b
AB
412 void operator() (struct lwp_info *lwp) const
413 {
414 delete_lwp (lwp->ptid);
415 }
416};
4403d8e9 417
169bb27b
AB
418/* A unique_ptr specialisation for lwp_info. */
419
420typedef std::unique_ptr<struct lwp_info, lwp_deleter> lwp_info_up;
4403d8e9 421
82d1f134 422/* Target hook for follow_fork. */
d83ad864 423
e97007b6 424void
82d1f134
SM
425linux_nat_target::follow_fork (inferior *child_inf, ptid_t child_ptid,
426 target_waitkind fork_kind, bool follow_child,
427 bool detach_fork)
3993f6b1 428{
82d1f134
SM
429 inf_ptrace_target::follow_fork (child_inf, child_ptid, fork_kind,
430 follow_child, detach_fork);
431
d83ad864 432 if (!follow_child)
4de4c07c 433 {
3a849a34
SM
434 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
435 ptid_t parent_ptid = inferior_ptid;
3a849a34
SM
436 int parent_pid = parent_ptid.lwp ();
437 int child_pid = child_ptid.lwp ();
4de4c07c 438
1777feb0 439 /* We're already attached to the parent, by default. */
3a849a34 440 lwp_info *child_lp = add_lwp (child_ptid);
d83ad864
DB
441 child_lp->stopped = 1;
442 child_lp->last_resume_kind = resume_stop;
4de4c07c 443
ac264b3b
MS
444 /* Detach new forked process? */
445 if (detach_fork)
f75c00e4 446 {
95347337
AB
447 int child_stop_signal = 0;
448 bool detach_child = true;
4403d8e9 449
169bb27b
AB
450 /* Move CHILD_LP into a unique_ptr and clear the source pointer
451 to prevent us doing anything stupid with it. */
452 lwp_info_up child_lp_ptr (child_lp);
453 child_lp = nullptr;
454
455 linux_target->low_prepare_to_resume (child_lp_ptr.get ());
c077881a
HZ
456
457 /* When debugging an inferior in an architecture that supports
458 hardware single stepping on a kernel without commit
459 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
460 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
461 set if the parent process had them set.
462 To work around this, single step the child process
463 once before detaching to clear the flags. */
464
2fd9d7ca
PA
465 /* Note that we consult the parent's architecture instead of
466 the child's because there's no inferior for the child at
467 this point. */
c077881a 468 if (!gdbarch_software_single_step_p (target_thread_architecture
2fd9d7ca 469 (parent_ptid)))
c077881a 470 {
95347337
AB
471 int status;
472
c077881a
HZ
473 linux_disable_event_reporting (child_pid);
474 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
475 perror_with_name (_("Couldn't do single step"));
476 if (my_waitpid (child_pid, &status, 0) < 0)
477 perror_with_name (_("Couldn't wait vfork process"));
95347337
AB
478 else
479 {
480 detach_child = WIFSTOPPED (status);
481 child_stop_signal = WSTOPSIG (status);
482 }
c077881a
HZ
483 }
484
95347337 485 if (detach_child)
9caaaa83 486 {
95347337 487 int signo = child_stop_signal;
9caaaa83 488
9caaaa83
PA
489 if (signo != 0
490 && !signal_pass_state (gdb_signal_from_host (signo)))
491 signo = 0;
492 ptrace (PTRACE_DETACH, child_pid, 0, signo);
8a89ddbd
PA
493
494 close_proc_mem_file (child_pid);
9caaaa83 495 }
ac264b3b 496 }
9016a515
DJ
497
498 if (has_vforked)
499 {
a2885186
SM
500 lwp_info *parent_lp = find_lwp_pid (parent_ptid);
501 linux_nat_debug_printf ("waiting for VFORK_DONE on %d", parent_pid);
502 parent_lp->stopped = 1;
6c95b8df 503
a2885186
SM
504 /* We'll handle the VFORK_DONE event like any other
505 event, in target_wait. */
9016a515 506 }
4de4c07c 507 }
3993f6b1 508 else
4de4c07c 509 {
3ced3da4 510 struct lwp_info *child_lp;
4de4c07c 511
82d1f134 512 child_lp = add_lwp (child_ptid);
3ced3da4 513 child_lp->stopped = 1;
25289eb2 514 child_lp->last_resume_kind = resume_stop;
4de4c07c 515 }
4de4c07c
DJ
516}
517
4de4c07c 518\f
f6ac5f3d
PA
519int
520linux_nat_target::insert_fork_catchpoint (int pid)
4de4c07c 521{
a2885186 522 return 0;
3993f6b1
DJ
523}
524
f6ac5f3d
PA
525int
526linux_nat_target::remove_fork_catchpoint (int pid)
eb73ad13
PA
527{
528 return 0;
529}
530
f6ac5f3d
PA
531int
532linux_nat_target::insert_vfork_catchpoint (int pid)
3993f6b1 533{
a2885186 534 return 0;
3993f6b1
DJ
535}
536
f6ac5f3d
PA
537int
538linux_nat_target::remove_vfork_catchpoint (int pid)
eb73ad13
PA
539{
540 return 0;
541}
542
f6ac5f3d
PA
543int
544linux_nat_target::insert_exec_catchpoint (int pid)
3993f6b1 545{
a2885186 546 return 0;
3993f6b1
DJ
547}
548
f6ac5f3d
PA
549int
550linux_nat_target::remove_exec_catchpoint (int pid)
eb73ad13
PA
551{
552 return 0;
553}
554
f6ac5f3d
PA
555int
556linux_nat_target::set_syscall_catchpoint (int pid, bool needed, int any_count,
557 gdb::array_view<const int> syscall_counts)
a96d9b2e 558{
a96d9b2e
SDJ
559 /* On GNU/Linux, we ignore the arguments. It means that we only
560 enable the syscall catchpoints, but do not disable them.
77b06cd7 561
649a140c 562 Also, we do not use the `syscall_counts' information because we do not
a96d9b2e
SDJ
563 filter system calls here. We let GDB do the logic for us. */
564 return 0;
565}
566
774113b0
PA
567/* List of known LWPs, keyed by LWP PID. This speeds up the common
568 case of mapping a PID returned from the kernel to our corresponding
569 lwp_info data structure. */
570static htab_t lwp_lwpid_htab;
571
572/* Calculate a hash from a lwp_info's LWP PID. */
573
574static hashval_t
575lwp_info_hash (const void *ap)
576{
577 const struct lwp_info *lp = (struct lwp_info *) ap;
e38504b3 578 pid_t pid = lp->ptid.lwp ();
774113b0
PA
579
580 return iterative_hash_object (pid, 0);
581}
582
583/* Equality function for the lwp_info hash table. Compares the LWP's
584 PID. */
585
586static int
587lwp_lwpid_htab_eq (const void *a, const void *b)
588{
589 const struct lwp_info *entry = (const struct lwp_info *) a;
590 const struct lwp_info *element = (const struct lwp_info *) b;
591
e38504b3 592 return entry->ptid.lwp () == element->ptid.lwp ();
774113b0
PA
593}
594
595/* Create the lwp_lwpid_htab hash table. */
596
597static void
598lwp_lwpid_htab_create (void)
599{
600 lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL);
601}
602
603/* Add LP to the hash table. */
604
605static void
606lwp_lwpid_htab_add_lwp (struct lwp_info *lp)
607{
608 void **slot;
609
610 slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT);
611 gdb_assert (slot != NULL && *slot == NULL);
612 *slot = lp;
613}
614
615/* Head of doubly-linked list of known LWPs. Sorted by reverse
616 creation order. This order is assumed in some cases. E.g.,
617 reaping status after killing alls lwps of a process: the leader LWP
618 must be reaped last. */
901b9821
SM
619
620static intrusive_list<lwp_info> lwp_list;
621
622/* See linux-nat.h. */
623
624lwp_info_range
625all_lwps ()
626{
627 return lwp_info_range (lwp_list.begin ());
628}
629
630/* See linux-nat.h. */
631
632lwp_info_safe_range
633all_lwps_safe ()
634{
635 return lwp_info_safe_range (lwp_list.begin ());
636}
774113b0
PA
637
638/* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
639
640static void
641lwp_list_add (struct lwp_info *lp)
642{
901b9821 643 lwp_list.push_front (*lp);
774113b0
PA
644}
645
646/* Remove LP from sorted-by-reverse-creation-order doubly-linked
647 list. */
648
649static void
650lwp_list_remove (struct lwp_info *lp)
651{
652 /* Remove from sorted-by-creation-order list. */
901b9821 653 lwp_list.erase (lwp_list.iterator_to (*lp));
774113b0
PA
654}
655
d6b0e80f
AC
656\f
657
d6b0e80f
AC
658/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
659 _initialize_linux_nat. */
660static sigset_t suspend_mask;
661
7feb7d06
PA
662/* Signals to block to make that sigsuspend work. */
663static sigset_t blocked_mask;
664
665/* SIGCHLD action. */
6bd434d6 666static struct sigaction sigchld_action;
b84876c2 667
7feb7d06
PA
668/* Block child signals (SIGCHLD and linux threads signals), and store
669 the previous mask in PREV_MASK. */
84e46146 670
7feb7d06
PA
671static void
672block_child_signals (sigset_t *prev_mask)
673{
674 /* Make sure SIGCHLD is blocked. */
675 if (!sigismember (&blocked_mask, SIGCHLD))
676 sigaddset (&blocked_mask, SIGCHLD);
677
21987b9c 678 gdb_sigmask (SIG_BLOCK, &blocked_mask, prev_mask);
7feb7d06
PA
679}
680
681/* Restore child signals mask, previously returned by
682 block_child_signals. */
683
684static void
685restore_child_signals_mask (sigset_t *prev_mask)
686{
21987b9c 687 gdb_sigmask (SIG_SETMASK, prev_mask, NULL);
7feb7d06 688}
2455069d
UW
689
690/* Mask of signals to pass directly to the inferior. */
691static sigset_t pass_mask;
692
693/* Update signals to pass to the inferior. */
f6ac5f3d 694void
adc6a863
PA
695linux_nat_target::pass_signals
696 (gdb::array_view<const unsigned char> pass_signals)
2455069d
UW
697{
698 int signo;
699
700 sigemptyset (&pass_mask);
701
702 for (signo = 1; signo < NSIG; signo++)
703 {
2ea28649 704 int target_signo = gdb_signal_from_host (signo);
adc6a863 705 if (target_signo < pass_signals.size () && pass_signals[target_signo])
dda83cd7 706 sigaddset (&pass_mask, signo);
2455069d
UW
707 }
708}
709
d6b0e80f
AC
710\f
711
712/* Prototypes for local functions. */
d3a70e03
TT
713static int stop_wait_callback (struct lwp_info *lp);
714static int resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid);
ced2dffb 715static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
710151dd 716
d6b0e80f 717\f
d6b0e80f 718
7b50312a
PA
719/* Destroy and free LP. */
720
676362df 721lwp_info::~lwp_info ()
7b50312a 722{
466eecee 723 /* Let the arch specific bits release arch_lwp_info. */
676362df 724 linux_target->low_delete_thread (this->arch_private);
7b50312a
PA
725}
726
774113b0 727/* Traversal function for purge_lwp_list. */
d90e17a7 728
774113b0
PA
729static int
730lwp_lwpid_htab_remove_pid (void **slot, void *info)
d90e17a7 731{
774113b0
PA
732 struct lwp_info *lp = (struct lwp_info *) *slot;
733 int pid = *(int *) info;
d90e17a7 734
e99b03dc 735 if (lp->ptid.pid () == pid)
d90e17a7 736 {
774113b0
PA
737 htab_clear_slot (lwp_lwpid_htab, slot);
738 lwp_list_remove (lp);
676362df 739 delete lp;
774113b0 740 }
d90e17a7 741
774113b0
PA
742 return 1;
743}
d90e17a7 744
774113b0
PA
745/* Remove all LWPs belong to PID from the lwp list. */
746
747static void
748purge_lwp_list (int pid)
749{
750 htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid);
d90e17a7
PA
751}
752
26cb8b7c
PA
753/* Add the LWP specified by PTID to the list. PTID is the first LWP
754 in the process. Return a pointer to the structure describing the
755 new LWP.
756
757 This differs from add_lwp in that we don't let the arch specific
758 bits know about this new thread. Current clients of this callback
759 take the opportunity to install watchpoints in the new thread, and
760 we shouldn't do that for the first thread. If we're spawning a
761 child ("run"), the thread executes the shell wrapper first, and we
762 shouldn't touch it until it execs the program we want to debug.
763 For "attach", it'd be okay to call the callback, but it's not
764 necessary, because watchpoints can't yet have been inserted into
765 the inferior. */
d6b0e80f
AC
766
767static struct lwp_info *
26cb8b7c 768add_initial_lwp (ptid_t ptid)
d6b0e80f 769{
15a9e13e 770 gdb_assert (ptid.lwp_p ());
d6b0e80f 771
b0f6c8d2 772 lwp_info *lp = new lwp_info (ptid);
d6b0e80f 773
d6b0e80f 774
774113b0
PA
775 /* Add to sorted-by-reverse-creation-order list. */
776 lwp_list_add (lp);
777
778 /* Add to keyed-by-pid htab. */
779 lwp_lwpid_htab_add_lwp (lp);
d6b0e80f 780
26cb8b7c
PA
781 return lp;
782}
783
784/* Add the LWP specified by PID to the list. Return a pointer to the
785 structure describing the new LWP. The LWP should already be
786 stopped. */
787
788static struct lwp_info *
789add_lwp (ptid_t ptid)
790{
791 struct lwp_info *lp;
792
793 lp = add_initial_lwp (ptid);
794
6e012a6c
PA
795 /* Let the arch specific bits know about this new thread. Current
796 clients of this callback take the opportunity to install
26cb8b7c
PA
797 watchpoints in the new thread. We don't do this for the first
798 thread though. See add_initial_lwp. */
135340af 799 linux_target->low_new_thread (lp);
9f0bdab8 800
d6b0e80f
AC
801 return lp;
802}
803
804/* Remove the LWP specified by PID from the list. */
805
806static void
807delete_lwp (ptid_t ptid)
808{
b0f6c8d2 809 lwp_info dummy (ptid);
d6b0e80f 810
b0f6c8d2 811 void **slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
774113b0
PA
812 if (slot == NULL)
813 return;
d6b0e80f 814
b0f6c8d2 815 lwp_info *lp = *(struct lwp_info **) slot;
774113b0 816 gdb_assert (lp != NULL);
d6b0e80f 817
774113b0 818 htab_clear_slot (lwp_lwpid_htab, slot);
d6b0e80f 819
774113b0
PA
820 /* Remove from sorted-by-creation-order list. */
821 lwp_list_remove (lp);
d6b0e80f 822
774113b0 823 /* Release. */
676362df 824 delete lp;
d6b0e80f
AC
825}
826
827/* Return a pointer to the structure describing the LWP corresponding
828 to PID. If no corresponding LWP could be found, return NULL. */
829
830static struct lwp_info *
831find_lwp_pid (ptid_t ptid)
832{
d6b0e80f
AC
833 int lwp;
834
15a9e13e 835 if (ptid.lwp_p ())
e38504b3 836 lwp = ptid.lwp ();
d6b0e80f 837 else
e99b03dc 838 lwp = ptid.pid ();
d6b0e80f 839
b0f6c8d2
SM
840 lwp_info dummy (ptid_t (0, lwp));
841 return (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
d6b0e80f
AC
842}
843
6d4ee8c6 844/* See nat/linux-nat.h. */
d6b0e80f
AC
845
846struct lwp_info *
d90e17a7 847iterate_over_lwps (ptid_t filter,
d3a70e03 848 gdb::function_view<iterate_over_lwps_ftype> callback)
d6b0e80f 849{
901b9821 850 for (lwp_info *lp : all_lwps_safe ())
d6b0e80f 851 {
26a57c92 852 if (lp->ptid.matches (filter))
d90e17a7 853 {
d3a70e03 854 if (callback (lp) != 0)
d90e17a7
PA
855 return lp;
856 }
d6b0e80f
AC
857 }
858
859 return NULL;
860}
861
2277426b
PA
862/* Update our internal state when changing from one checkpoint to
863 another indicated by NEW_PTID. We can only switch single-threaded
864 applications, so we only create one new LWP, and the previous list
865 is discarded. */
f973ed9c
DJ
866
867void
868linux_nat_switch_fork (ptid_t new_ptid)
869{
870 struct lwp_info *lp;
871
e99b03dc 872 purge_lwp_list (inferior_ptid.pid ());
2277426b 873
f973ed9c
DJ
874 lp = add_lwp (new_ptid);
875 lp->stopped = 1;
e26af52f 876
2277426b
PA
877 /* This changes the thread's ptid while preserving the gdb thread
878 num. Also changes the inferior pid, while preserving the
879 inferior num. */
5b6d1e4f 880 thread_change_ptid (linux_target, inferior_ptid, new_ptid);
2277426b
PA
881
882 /* We've just told GDB core that the thread changed target id, but,
883 in fact, it really is a different thread, with different register
884 contents. */
885 registers_changed ();
e26af52f
DJ
886}
887
e26af52f
DJ
888/* Handle the exit of a single thread LP. */
889
890static void
891exit_lwp (struct lwp_info *lp)
892{
5b6d1e4f 893 struct thread_info *th = find_thread_ptid (linux_target, lp->ptid);
063bfe2e
VP
894
895 if (th)
e26af52f 896 {
17faa917 897 if (print_thread_events)
6cb06a8c
TT
898 gdb_printf (_("[%s exited]\n"),
899 target_pid_to_str (lp->ptid).c_str ());
17faa917 900
00431a78 901 delete_thread (th);
e26af52f
DJ
902 }
903
904 delete_lwp (lp->ptid);
905}
906
a0ef4274
DJ
907/* Wait for the LWP specified by LP, which we have just attached to.
908 Returns a wait status for that LWP, to cache. */
909
910static int
22827c51 911linux_nat_post_attach_wait (ptid_t ptid, int *signalled)
a0ef4274 912{
e38504b3 913 pid_t new_pid, pid = ptid.lwp ();
a0ef4274
DJ
914 int status;
915
644cebc9 916 if (linux_proc_pid_is_stopped (pid))
a0ef4274 917 {
9327494e 918 linux_nat_debug_printf ("Attaching to a stopped process");
a0ef4274
DJ
919
920 /* The process is definitely stopped. It is in a job control
921 stop, unless the kernel predates the TASK_STOPPED /
922 TASK_TRACED distinction, in which case it might be in a
923 ptrace stop. Make sure it is in a ptrace stop; from there we
924 can kill it, signal it, et cetera.
925
dda83cd7 926 First make sure there is a pending SIGSTOP. Since we are
a0ef4274
DJ
927 already attached, the process can not transition from stopped
928 to running without a PTRACE_CONT; so we know this signal will
929 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
930 probably already in the queue (unless this kernel is old
931 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
932 is not an RT signal, it can only be queued once. */
933 kill_lwp (pid, SIGSTOP);
934
935 /* Finally, resume the stopped process. This will deliver the SIGSTOP
936 (or a higher priority signal, just like normal PTRACE_ATTACH). */
937 ptrace (PTRACE_CONT, pid, 0, 0);
938 }
939
940 /* Make sure the initial process is stopped. The user-level threads
941 layer might want to poke around in the inferior, and that won't
942 work if things haven't stabilized yet. */
4a6ed09b 943 new_pid = my_waitpid (pid, &status, __WALL);
dacc9cb2
PP
944 gdb_assert (pid == new_pid);
945
946 if (!WIFSTOPPED (status))
947 {
948 /* The pid we tried to attach has apparently just exited. */
9327494e 949 linux_nat_debug_printf ("Failed to stop %d: %s", pid,
8d06918f 950 status_to_str (status).c_str ());
dacc9cb2
PP
951 return status;
952 }
a0ef4274
DJ
953
954 if (WSTOPSIG (status) != SIGSTOP)
955 {
956 *signalled = 1;
9327494e 957 linux_nat_debug_printf ("Received %s after attaching",
8d06918f 958 status_to_str (status).c_str ());
a0ef4274
DJ
959 }
960
961 return status;
962}
963
f6ac5f3d
PA
964void
965linux_nat_target::create_inferior (const char *exec_file,
966 const std::string &allargs,
967 char **env, int from_tty)
b84876c2 968{
41272101
TT
969 maybe_disable_address_space_randomization restore_personality
970 (disable_randomization);
b84876c2
PA
971
972 /* The fork_child mechanism is synchronous and calls target_wait, so
973 we have to mask the async mode. */
974
2455069d 975 /* Make sure we report all signals during startup. */
adc6a863 976 pass_signals ({});
2455069d 977
f6ac5f3d 978 inf_ptrace_target::create_inferior (exec_file, allargs, env, from_tty);
8a89ddbd
PA
979
980 open_proc_mem_file (inferior_ptid);
b84876c2
PA
981}
982
8784d563
PA
983/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
984 already attached. Returns true if a new LWP is found, false
985 otherwise. */
986
987static int
988attach_proc_task_lwp_callback (ptid_t ptid)
989{
990 struct lwp_info *lp;
991
992 /* Ignore LWPs we're already attached to. */
993 lp = find_lwp_pid (ptid);
994 if (lp == NULL)
995 {
e38504b3 996 int lwpid = ptid.lwp ();
8784d563
PA
997
998 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
999 {
1000 int err = errno;
1001
1002 /* Be quiet if we simply raced with the thread exiting.
1003 EPERM is returned if the thread's task still exists, and
1004 is marked as exited or zombie, as well as other
1005 conditions, so in that case, confirm the status in
1006 /proc/PID/status. */
1007 if (err == ESRCH
1008 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1009 {
9327494e
SM
1010 linux_nat_debug_printf
1011 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1012 lwpid, err, safe_strerror (err));
1013
8784d563
PA
1014 }
1015 else
1016 {
4d9b86e1 1017 std::string reason
50fa3001 1018 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1 1019
f71f0b0d 1020 warning (_("Cannot attach to lwp %d: %s"),
4d9b86e1 1021 lwpid, reason.c_str ());
8784d563
PA
1022 }
1023 }
1024 else
1025 {
9327494e 1026 linux_nat_debug_printf ("PTRACE_ATTACH %s, 0, 0 (OK)",
e53c95d4 1027 ptid.to_string ().c_str ());
8784d563
PA
1028
1029 lp = add_lwp (ptid);
8784d563
PA
1030
1031 /* The next time we wait for this LWP we'll see a SIGSTOP as
1032 PTRACE_ATTACH brings it to a halt. */
1033 lp->signalled = 1;
1034
1035 /* We need to wait for a stop before being able to make the
1036 next ptrace call on this LWP. */
1037 lp->must_set_ptrace_flags = 1;
026a9174
PA
1038
1039 /* So that wait collects the SIGSTOP. */
1040 lp->resumed = 1;
1041
1042 /* Also add the LWP to gdb's thread list, in case a
1043 matching libthread_db is not found (or the process uses
1044 raw clone). */
5b6d1e4f 1045 add_thread (linux_target, lp->ptid);
719546c4
SM
1046 set_running (linux_target, lp->ptid, true);
1047 set_executing (linux_target, lp->ptid, true);
8784d563
PA
1048 }
1049
1050 return 1;
1051 }
1052 return 0;
1053}
1054
f6ac5f3d
PA
1055void
1056linux_nat_target::attach (const char *args, int from_tty)
d6b0e80f
AC
1057{
1058 struct lwp_info *lp;
d6b0e80f 1059 int status;
af990527 1060 ptid_t ptid;
d6b0e80f 1061
2455069d 1062 /* Make sure we report all signals during attach. */
adc6a863 1063 pass_signals ({});
2455069d 1064
a70b8144 1065 try
87b0bb13 1066 {
f6ac5f3d 1067 inf_ptrace_target::attach (args, from_tty);
87b0bb13 1068 }
230d2906 1069 catch (const gdb_exception_error &ex)
87b0bb13
JK
1070 {
1071 pid_t pid = parse_pid_to_attach (args);
50fa3001 1072 std::string reason = linux_ptrace_attach_fail_reason (pid);
87b0bb13 1073
4d9b86e1 1074 if (!reason.empty ())
3d6e9d23
TT
1075 throw_error (ex.error, "warning: %s\n%s", reason.c_str (),
1076 ex.what ());
7ae1a6a6 1077 else
3d6e9d23 1078 throw_error (ex.error, "%s", ex.what ());
87b0bb13 1079 }
d6b0e80f 1080
af990527
PA
1081 /* The ptrace base target adds the main thread with (pid,0,0)
1082 format. Decorate it with lwp info. */
e99b03dc 1083 ptid = ptid_t (inferior_ptid.pid (),
184ea2f7 1084 inferior_ptid.pid ());
5b6d1e4f 1085 thread_change_ptid (linux_target, inferior_ptid, ptid);
af990527 1086
9f0bdab8 1087 /* Add the initial process as the first LWP to the list. */
26cb8b7c 1088 lp = add_initial_lwp (ptid);
a0ef4274 1089
22827c51 1090 status = linux_nat_post_attach_wait (lp->ptid, &lp->signalled);
dacc9cb2
PP
1091 if (!WIFSTOPPED (status))
1092 {
1093 if (WIFEXITED (status))
1094 {
1095 int exit_code = WEXITSTATUS (status);
1096
223ffa71 1097 target_terminal::ours ();
bc1e6c81 1098 target_mourn_inferior (inferior_ptid);
dacc9cb2
PP
1099 if (exit_code == 0)
1100 error (_("Unable to attach: program exited normally."));
1101 else
1102 error (_("Unable to attach: program exited with code %d."),
1103 exit_code);
1104 }
1105 else if (WIFSIGNALED (status))
1106 {
2ea28649 1107 enum gdb_signal signo;
dacc9cb2 1108
223ffa71 1109 target_terminal::ours ();
bc1e6c81 1110 target_mourn_inferior (inferior_ptid);
dacc9cb2 1111
2ea28649 1112 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1113 error (_("Unable to attach: program terminated with signal "
1114 "%s, %s."),
2ea28649
PA
1115 gdb_signal_to_name (signo),
1116 gdb_signal_to_string (signo));
dacc9cb2
PP
1117 }
1118
f34652de 1119 internal_error (_("unexpected status %d for PID %ld"),
e38504b3 1120 status, (long) ptid.lwp ());
dacc9cb2
PP
1121 }
1122
a0ef4274 1123 lp->stopped = 1;
9f0bdab8 1124
8a89ddbd
PA
1125 open_proc_mem_file (lp->ptid);
1126
a0ef4274 1127 /* Save the wait status to report later. */
d6b0e80f 1128 lp->resumed = 1;
9327494e 1129 linux_nat_debug_printf ("waitpid %ld, saving status %s",
8d06918f
SM
1130 (long) lp->ptid.pid (),
1131 status_to_str (status).c_str ());
710151dd 1132
7feb7d06
PA
1133 lp->status = status;
1134
8784d563
PA
1135 /* We must attach to every LWP. If /proc is mounted, use that to
1136 find them now. The inferior may be using raw clone instead of
1137 using pthreads. But even if it is using pthreads, thread_db
1138 walks structures in the inferior's address space to find the list
1139 of threads/LWPs, and those structures may well be corrupted.
1140 Note that once thread_db is loaded, we'll still use it to list
1141 threads and associate pthread info with each LWP. */
e99b03dc 1142 linux_proc_attach_tgid_threads (lp->ptid.pid (),
8784d563 1143 attach_proc_task_lwp_callback);
d6b0e80f
AC
1144}
1145
4a3ee32a
SM
1146/* Ptrace-detach the thread with pid PID. */
1147
1148static void
1149detach_one_pid (int pid, int signo)
1150{
1151 if (ptrace (PTRACE_DETACH, pid, 0, signo) < 0)
1152 {
1153 int save_errno = errno;
1154
1155 /* We know the thread exists, so ESRCH must mean the lwp is
1156 zombie. This can happen if one of the already-detached
1157 threads exits the whole thread group. In that case we're
1158 still attached, and must reap the lwp. */
1159 if (save_errno == ESRCH)
1160 {
1161 int ret, status;
1162
1163 ret = my_waitpid (pid, &status, __WALL);
1164 if (ret == -1)
1165 {
1166 warning (_("Couldn't reap LWP %d while detaching: %s"),
1167 pid, safe_strerror (errno));
1168 }
1169 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1170 {
1171 warning (_("Reaping LWP %d while detaching "
1172 "returned unexpected status 0x%x"),
1173 pid, status);
1174 }
1175 }
1176 else
1177 error (_("Can't detach %d: %s"),
1178 pid, safe_strerror (save_errno));
1179 }
1180 else
1181 linux_nat_debug_printf ("PTRACE_DETACH (%d, %s, 0) (OK)",
1182 pid, strsignal (signo));
1183}
1184
ced2dffb
PA
1185/* Get pending signal of THREAD as a host signal number, for detaching
1186 purposes. This is the signal the thread last stopped for, which we
1187 need to deliver to the thread when detaching, otherwise, it'd be
1188 suppressed/lost. */
1189
a0ef4274 1190static int
ced2dffb 1191get_detach_signal (struct lwp_info *lp)
a0ef4274 1192{
a493e3e2 1193 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1194
1195 /* If we paused threads momentarily, we may have stored pending
1196 events in lp->status or lp->waitstatus (see stop_wait_callback),
1197 and GDB core hasn't seen any signal for those threads.
1198 Otherwise, the last signal reported to the core is found in the
1199 thread object's stop_signal.
1200
1201 There's a corner case that isn't handled here at present. Only
1202 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1203 stop_signal make sense as a real signal to pass to the inferior.
1204 Some catchpoint related events, like
1205 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1206 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1207 those traps are debug API (ptrace in our case) related and
1208 induced; the inferior wouldn't see them if it wasn't being
1209 traced. Hence, we should never pass them to the inferior, even
1210 when set to pass state. Since this corner case isn't handled by
1211 infrun.c when proceeding with a signal, for consistency, neither
1212 do we handle it here (or elsewhere in the file we check for
1213 signal pass state). Normally SIGTRAP isn't set to pass state, so
1214 this is really a corner case. */
1215
183be222 1216 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
a493e3e2 1217 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1218 else if (lp->status)
2ea28649 1219 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
00431a78 1220 else
ca2163eb 1221 {
5b6d1e4f 1222 struct thread_info *tp = find_thread_ptid (linux_target, lp->ptid);
e0881a8e 1223
611841bb 1224 if (target_is_non_stop_p () && !tp->executing ())
ca2163eb 1225 {
1edb66d8 1226 if (tp->has_pending_waitstatus ())
df5ad102
SM
1227 {
1228 /* If the thread has a pending event, and it was stopped with a
1229 signal, use that signal to resume it. If it has a pending
1230 event of another kind, it was not stopped with a signal, so
1231 resume it without a signal. */
1232 if (tp->pending_waitstatus ().kind () == TARGET_WAITKIND_STOPPED)
1233 signo = tp->pending_waitstatus ().sig ();
1234 else
1235 signo = GDB_SIGNAL_0;
1236 }
00431a78 1237 else
1edb66d8 1238 signo = tp->stop_signal ();
00431a78
PA
1239 }
1240 else if (!target_is_non_stop_p ())
1241 {
00431a78 1242 ptid_t last_ptid;
5b6d1e4f 1243 process_stratum_target *last_target;
00431a78 1244
5b6d1e4f 1245 get_last_target_status (&last_target, &last_ptid, nullptr);
e0881a8e 1246
5b6d1e4f
PA
1247 if (last_target == linux_target
1248 && lp->ptid.lwp () == last_ptid.lwp ())
1edb66d8 1249 signo = tp->stop_signal ();
4c28f408 1250 }
ca2163eb 1251 }
4c28f408 1252
a493e3e2 1253 if (signo == GDB_SIGNAL_0)
ca2163eb 1254 {
9327494e 1255 linux_nat_debug_printf ("lwp %s has no pending signal",
e53c95d4 1256 lp->ptid.to_string ().c_str ());
ca2163eb
PA
1257 }
1258 else if (!signal_pass_state (signo))
1259 {
9327494e
SM
1260 linux_nat_debug_printf
1261 ("lwp %s had signal %s but it is in no pass state",
e53c95d4 1262 lp->ptid.to_string ().c_str (), gdb_signal_to_string (signo));
a0ef4274 1263 }
a0ef4274 1264 else
4c28f408 1265 {
9327494e 1266 linux_nat_debug_printf ("lwp %s has pending signal %s",
e53c95d4 1267 lp->ptid.to_string ().c_str (),
9327494e 1268 gdb_signal_to_string (signo));
ced2dffb
PA
1269
1270 return gdb_signal_to_host (signo);
4c28f408 1271 }
a0ef4274
DJ
1272
1273 return 0;
1274}
1275
ced2dffb
PA
1276/* Detach from LP. If SIGNO_P is non-NULL, then it points to the
1277 signal number that should be passed to the LWP when detaching.
1278 Otherwise pass any pending signal the LWP may have, if any. */
1279
1280static void
1281detach_one_lwp (struct lwp_info *lp, int *signo_p)
d6b0e80f 1282{
e38504b3 1283 int lwpid = lp->ptid.lwp ();
ced2dffb
PA
1284 int signo;
1285
d6b0e80f
AC
1286 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1287
df5ad102
SM
1288 /* If the lwp/thread we are about to detach has a pending fork event,
1289 there is a process GDB is attached to that the core of GDB doesn't know
1290 about. Detach from it. */
1291
1292 /* Check in lwp_info::status. */
1293 if (WIFSTOPPED (lp->status) && linux_is_extended_waitstatus (lp->status))
1294 {
1295 int event = linux_ptrace_get_extended_event (lp->status);
1296
1297 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1298 {
1299 unsigned long child_pid;
1300 int ret = ptrace (PTRACE_GETEVENTMSG, lp->ptid.lwp (), 0, &child_pid);
1301 if (ret == 0)
1302 detach_one_pid (child_pid, 0);
1303 else
1304 perror_warning_with_name (_("Failed to detach fork child"));
1305 }
1306 }
1307
1308 /* Check in lwp_info::waitstatus. */
1309 if (lp->waitstatus.kind () == TARGET_WAITKIND_VFORKED
1310 || lp->waitstatus.kind () == TARGET_WAITKIND_FORKED)
1311 detach_one_pid (lp->waitstatus.child_ptid ().pid (), 0);
1312
1313
1314 /* Check in thread_info::pending_waitstatus. */
1315 thread_info *tp = find_thread_ptid (linux_target, lp->ptid);
1316 if (tp->has_pending_waitstatus ())
1317 {
1318 const target_waitstatus &ws = tp->pending_waitstatus ();
1319
1320 if (ws.kind () == TARGET_WAITKIND_VFORKED
1321 || ws.kind () == TARGET_WAITKIND_FORKED)
1322 detach_one_pid (ws.child_ptid ().pid (), 0);
1323 }
1324
1325 /* Check in thread_info::pending_follow. */
1326 if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED
1327 || tp->pending_follow.kind () == TARGET_WAITKIND_FORKED)
1328 detach_one_pid (tp->pending_follow.child_ptid ().pid (), 0);
1329
9327494e
SM
1330 if (lp->status != 0)
1331 linux_nat_debug_printf ("Pending %s for %s on detach.",
1332 strsignal (WSTOPSIG (lp->status)),
e53c95d4 1333 lp->ptid.to_string ().c_str ());
d6b0e80f 1334
a0ef4274
DJ
1335 /* If there is a pending SIGSTOP, get rid of it. */
1336 if (lp->signalled)
d6b0e80f 1337 {
9327494e 1338 linux_nat_debug_printf ("Sending SIGCONT to %s",
e53c95d4 1339 lp->ptid.to_string ().c_str ());
d6b0e80f 1340
ced2dffb 1341 kill_lwp (lwpid, SIGCONT);
d6b0e80f 1342 lp->signalled = 0;
d6b0e80f
AC
1343 }
1344
ced2dffb 1345 if (signo_p == NULL)
d6b0e80f 1346 {
a0ef4274 1347 /* Pass on any pending signal for this LWP. */
ced2dffb
PA
1348 signo = get_detach_signal (lp);
1349 }
1350 else
1351 signo = *signo_p;
a0ef4274 1352
ced2dffb
PA
1353 /* Preparing to resume may try to write registers, and fail if the
1354 lwp is zombie. If that happens, ignore the error. We'll handle
1355 it below, when detach fails with ESRCH. */
a70b8144 1356 try
ced2dffb 1357 {
135340af 1358 linux_target->low_prepare_to_resume (lp);
ced2dffb 1359 }
230d2906 1360 catch (const gdb_exception_error &ex)
ced2dffb
PA
1361 {
1362 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 1363 throw;
ced2dffb 1364 }
d6b0e80f 1365
4a3ee32a 1366 detach_one_pid (lwpid, signo);
ced2dffb
PA
1367
1368 delete_lwp (lp->ptid);
1369}
d6b0e80f 1370
ced2dffb 1371static int
d3a70e03 1372detach_callback (struct lwp_info *lp)
ced2dffb
PA
1373{
1374 /* We don't actually detach from the thread group leader just yet.
1375 If the thread group exits, we must reap the zombie clone lwps
1376 before we're able to reap the leader. */
e38504b3 1377 if (lp->ptid.lwp () != lp->ptid.pid ())
ced2dffb 1378 detach_one_lwp (lp, NULL);
d6b0e80f
AC
1379 return 0;
1380}
1381
f6ac5f3d
PA
1382void
1383linux_nat_target::detach (inferior *inf, int from_tty)
d6b0e80f 1384{
d90e17a7 1385 struct lwp_info *main_lwp;
bc09b0c1 1386 int pid = inf->pid;
a0ef4274 1387
ae5e0686
MK
1388 /* Don't unregister from the event loop, as there may be other
1389 inferiors running. */
b84876c2 1390
4c28f408 1391 /* Stop all threads before detaching. ptrace requires that the
30baf67b 1392 thread is stopped to successfully detach. */
d3a70e03 1393 iterate_over_lwps (ptid_t (pid), stop_callback);
4c28f408
PA
1394 /* ... and wait until all of them have reported back that
1395 they're no longer running. */
d3a70e03 1396 iterate_over_lwps (ptid_t (pid), stop_wait_callback);
4c28f408 1397
e87f0fe8
PA
1398 /* We can now safely remove breakpoints. We don't this in earlier
1399 in common code because this target doesn't currently support
1400 writing memory while the inferior is running. */
1401 remove_breakpoints_inf (current_inferior ());
1402
d3a70e03 1403 iterate_over_lwps (ptid_t (pid), detach_callback);
d6b0e80f
AC
1404
1405 /* Only the initial process should be left right now. */
bc09b0c1 1406 gdb_assert (num_lwps (pid) == 1);
d90e17a7 1407
f2907e49 1408 main_lwp = find_lwp_pid (ptid_t (pid));
d6b0e80f 1409
7a7d3353
PA
1410 if (forks_exist_p ())
1411 {
1412 /* Multi-fork case. The current inferior_ptid is being detached
1413 from, but there are other viable forks to debug. Detach from
1414 the current fork, and context-switch to the first
1415 available. */
6bd6f3b6 1416 linux_fork_detach (from_tty);
7a7d3353
PA
1417 }
1418 else
ced2dffb 1419 {
ced2dffb
PA
1420 target_announce_detach (from_tty);
1421
6bd6f3b6
SM
1422 /* Pass on any pending signal for the last LWP. */
1423 int signo = get_detach_signal (main_lwp);
ced2dffb
PA
1424
1425 detach_one_lwp (main_lwp, &signo);
1426
f6ac5f3d 1427 detach_success (inf);
ced2dffb 1428 }
05c06f31 1429
8a89ddbd 1430 close_proc_mem_file (pid);
d6b0e80f
AC
1431}
1432
8a99810d
PA
1433/* Resume execution of the inferior process. If STEP is nonzero,
1434 single-step it. If SIGNAL is nonzero, give it that signal. */
1435
1436static void
23f238d3
PA
1437linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1438 enum gdb_signal signo)
8a99810d 1439{
8a99810d 1440 lp->step = step;
9c02b525
PA
1441
1442 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1443 We only presently need that if the LWP is stepped though (to
1444 handle the case of stepping a breakpoint instruction). */
1445 if (step)
1446 {
5b6d1e4f 1447 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
9c02b525
PA
1448
1449 lp->stop_pc = regcache_read_pc (regcache);
1450 }
1451 else
1452 lp->stop_pc = 0;
1453
135340af 1454 linux_target->low_prepare_to_resume (lp);
f6ac5f3d 1455 linux_target->low_resume (lp->ptid, step, signo);
23f238d3
PA
1456
1457 /* Successfully resumed. Clear state that no longer makes sense,
1458 and mark the LWP as running. Must not do this before resuming
1459 otherwise if that fails other code will be confused. E.g., we'd
1460 later try to stop the LWP and hang forever waiting for a stop
1461 status. Note that we must not throw after this is cleared,
1462 otherwise handle_zombie_lwp_error would get confused. */
8a99810d 1463 lp->stopped = 0;
1ad3de98 1464 lp->core = -1;
23f238d3 1465 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
5b6d1e4f 1466 registers_changed_ptid (linux_target, lp->ptid);
8a99810d
PA
1467}
1468
23f238d3
PA
1469/* Called when we try to resume a stopped LWP and that errors out. If
1470 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1471 or about to become), discard the error, clear any pending status
1472 the LWP may have, and return true (we'll collect the exit status
1473 soon enough). Otherwise, return false. */
1474
1475static int
1476check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1477{
1478 /* If we get an error after resuming the LWP successfully, we'd
1479 confuse !T state for the LWP being gone. */
1480 gdb_assert (lp->stopped);
1481
1482 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1483 because even if ptrace failed with ESRCH, the tracee may be "not
1484 yet fully dead", but already refusing ptrace requests. In that
1485 case the tracee has 'R (Running)' state for a little bit
1486 (observed in Linux 3.18). See also the note on ESRCH in the
1487 ptrace(2) man page. Instead, check whether the LWP has any state
1488 other than ptrace-stopped. */
1489
1490 /* Don't assume anything if /proc/PID/status can't be read. */
e38504b3 1491 if (linux_proc_pid_is_trace_stopped_nowarn (lp->ptid.lwp ()) == 0)
23f238d3
PA
1492 {
1493 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1494 lp->status = 0;
183be222 1495 lp->waitstatus.set_ignore ();
23f238d3
PA
1496 return 1;
1497 }
1498 return 0;
1499}
1500
1501/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1502 disappears while we try to resume it. */
1503
1504static void
1505linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1506{
a70b8144 1507 try
23f238d3
PA
1508 {
1509 linux_resume_one_lwp_throw (lp, step, signo);
1510 }
230d2906 1511 catch (const gdb_exception_error &ex)
23f238d3
PA
1512 {
1513 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 1514 throw;
23f238d3 1515 }
23f238d3
PA
1516}
1517
d6b0e80f
AC
1518/* Resume LP. */
1519
25289eb2 1520static void
e5ef252a 1521resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1522{
25289eb2 1523 if (lp->stopped)
6c95b8df 1524 {
5b6d1e4f 1525 struct inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
25289eb2
PA
1526
1527 if (inf->vfork_child != NULL)
1528 {
9327494e 1529 linux_nat_debug_printf ("Not resuming %s (vfork parent)",
e53c95d4 1530 lp->ptid.to_string ().c_str ());
25289eb2 1531 }
8a99810d 1532 else if (!lwp_status_pending_p (lp))
25289eb2 1533 {
9327494e 1534 linux_nat_debug_printf ("Resuming sibling %s, %s, %s",
e53c95d4 1535 lp->ptid.to_string ().c_str (),
9327494e
SM
1536 (signo != GDB_SIGNAL_0
1537 ? strsignal (gdb_signal_to_host (signo))
1538 : "0"),
1539 step ? "step" : "resume");
25289eb2 1540
8a99810d 1541 linux_resume_one_lwp (lp, step, signo);
25289eb2
PA
1542 }
1543 else
1544 {
9327494e 1545 linux_nat_debug_printf ("Not resuming sibling %s (has pending)",
e53c95d4 1546 lp->ptid.to_string ().c_str ());
25289eb2 1547 }
6c95b8df 1548 }
25289eb2 1549 else
9327494e 1550 linux_nat_debug_printf ("Not resuming sibling %s (not stopped)",
e53c95d4 1551 lp->ptid.to_string ().c_str ());
25289eb2 1552}
d6b0e80f 1553
8817a6f2
PA
1554/* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1555 Resume LWP with the last stop signal, if it is in pass state. */
e5ef252a 1556
25289eb2 1557static int
d3a70e03 1558linux_nat_resume_callback (struct lwp_info *lp, struct lwp_info *except)
25289eb2 1559{
e5ef252a
PA
1560 enum gdb_signal signo = GDB_SIGNAL_0;
1561
8817a6f2
PA
1562 if (lp == except)
1563 return 0;
1564
e5ef252a
PA
1565 if (lp->stopped)
1566 {
1567 struct thread_info *thread;
1568
5b6d1e4f 1569 thread = find_thread_ptid (linux_target, lp->ptid);
e5ef252a
PA
1570 if (thread != NULL)
1571 {
1edb66d8
SM
1572 signo = thread->stop_signal ();
1573 thread->set_stop_signal (GDB_SIGNAL_0);
e5ef252a
PA
1574 }
1575 }
1576
1577 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1578 return 0;
1579}
1580
1581static int
d3a70e03 1582resume_clear_callback (struct lwp_info *lp)
d6b0e80f
AC
1583{
1584 lp->resumed = 0;
25289eb2 1585 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1586 return 0;
1587}
1588
1589static int
d3a70e03 1590resume_set_callback (struct lwp_info *lp)
d6b0e80f
AC
1591{
1592 lp->resumed = 1;
25289eb2 1593 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1594 return 0;
1595}
1596
f6ac5f3d 1597void
d51926f0 1598linux_nat_target::resume (ptid_t scope_ptid, int step, enum gdb_signal signo)
d6b0e80f
AC
1599{
1600 struct lwp_info *lp;
d6b0e80f 1601
9327494e
SM
1602 linux_nat_debug_printf ("Preparing to %s %s, %s, inferior_ptid %s",
1603 step ? "step" : "resume",
d51926f0 1604 scope_ptid.to_string ().c_str (),
9327494e
SM
1605 (signo != GDB_SIGNAL_0
1606 ? strsignal (gdb_signal_to_host (signo)) : "0"),
e53c95d4 1607 inferior_ptid.to_string ().c_str ());
76f50ad1 1608
7da6a5b9
LM
1609 /* Mark the lwps we're resuming as resumed and update their
1610 last_resume_kind to resume_continue. */
d51926f0 1611 iterate_over_lwps (scope_ptid, resume_set_callback);
d6b0e80f 1612
d51926f0 1613 lp = find_lwp_pid (inferior_ptid);
9f0bdab8 1614 gdb_assert (lp != NULL);
d6b0e80f 1615
9f0bdab8 1616 /* Remember if we're stepping. */
25289eb2 1617 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1618
9f0bdab8
DJ
1619 /* If we have a pending wait status for this thread, there is no
1620 point in resuming the process. But first make sure that
1621 linux_nat_wait won't preemptively handle the event - we
1622 should never take this short-circuit if we are going to
1623 leave LP running, since we have skipped resuming all the
1624 other threads. This bit of code needs to be synchronized
1625 with linux_nat_wait. */
76f50ad1 1626
9f0bdab8
DJ
1627 if (lp->status && WIFSTOPPED (lp->status))
1628 {
2455069d
UW
1629 if (!lp->step
1630 && WSTOPSIG (lp->status)
1631 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1632 {
9327494e
SM
1633 linux_nat_debug_printf
1634 ("Not short circuiting for ignored status 0x%x", lp->status);
9f0bdab8 1635
d6b0e80f
AC
1636 /* FIXME: What should we do if we are supposed to continue
1637 this thread with a signal? */
a493e3e2 1638 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 1639 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1640 lp->status = 0;
1641 }
1642 }
76f50ad1 1643
8a99810d 1644 if (lwp_status_pending_p (lp))
9f0bdab8
DJ
1645 {
1646 /* FIXME: What should we do if we are supposed to continue
1647 this thread with a signal? */
a493e3e2 1648 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 1649
9327494e
SM
1650 linux_nat_debug_printf ("Short circuiting for status 0x%x",
1651 lp->status);
d6b0e80f 1652
7feb7d06
PA
1653 if (target_can_async_p ())
1654 {
4a570176 1655 target_async (true);
7feb7d06
PA
1656 /* Tell the event loop we have something to process. */
1657 async_file_mark ();
1658 }
9f0bdab8 1659 return;
d6b0e80f
AC
1660 }
1661
d51926f0
PA
1662 /* No use iterating unless we're resuming other threads. */
1663 if (scope_ptid != lp->ptid)
1664 iterate_over_lwps (scope_ptid, [=] (struct lwp_info *info)
1665 {
1666 return linux_nat_resume_callback (info, lp);
1667 });
d90e17a7 1668
9327494e
SM
1669 linux_nat_debug_printf ("%s %s, %s (resume event thread)",
1670 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 1671 lp->ptid.to_string ().c_str (),
9327494e
SM
1672 (signo != GDB_SIGNAL_0
1673 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2 1674
2bf6fb9d 1675 linux_resume_one_lwp (lp, step, signo);
d6b0e80f
AC
1676}
1677
c5f62d5f 1678/* Send a signal to an LWP. */
d6b0e80f
AC
1679
1680static int
1681kill_lwp (int lwpid, int signo)
1682{
4a6ed09b 1683 int ret;
d6b0e80f 1684
4a6ed09b
PA
1685 errno = 0;
1686 ret = syscall (__NR_tkill, lwpid, signo);
1687 if (errno == ENOSYS)
1688 {
1689 /* If tkill fails, then we are not using nptl threads, a
1690 configuration we no longer support. */
1691 perror_with_name (("tkill"));
1692 }
1693 return ret;
d6b0e80f
AC
1694}
1695
ca2163eb
PA
1696/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1697 event, check if the core is interested in it: if not, ignore the
1698 event, and keep waiting; otherwise, we need to toggle the LWP's
1699 syscall entry/exit status, since the ptrace event itself doesn't
1700 indicate it, and report the trap to higher layers. */
1701
1702static int
1703linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1704{
1705 struct target_waitstatus *ourstatus = &lp->waitstatus;
1706 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
5b6d1e4f 1707 thread_info *thread = find_thread_ptid (linux_target, lp->ptid);
00431a78 1708 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, thread);
ca2163eb
PA
1709
1710 if (stopping)
1711 {
1712 /* If we're stopping threads, there's a SIGSTOP pending, which
1713 makes it so that the LWP reports an immediate syscall return,
1714 followed by the SIGSTOP. Skip seeing that "return" using
1715 PTRACE_CONT directly, and let stop_wait_callback collect the
1716 SIGSTOP. Later when the thread is resumed, a new syscall
1717 entry event. If we didn't do this (and returned 0), we'd
1718 leave a syscall entry pending, and our caller, by using
1719 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1720 itself. Later, when the user re-resumes this LWP, we'd see
1721 another syscall entry event and we'd mistake it for a return.
1722
1723 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1724 (leaving immediately with LWP->signalled set, without issuing
1725 a PTRACE_CONT), it would still be problematic to leave this
1726 syscall enter pending, as later when the thread is resumed,
1727 it would then see the same syscall exit mentioned above,
1728 followed by the delayed SIGSTOP, while the syscall didn't
1729 actually get to execute. It seems it would be even more
1730 confusing to the user. */
1731
9327494e
SM
1732 linux_nat_debug_printf
1733 ("ignoring syscall %d for LWP %ld (stopping threads), resuming with "
1734 "PTRACE_CONT for SIGSTOP", syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1735
1736 lp->syscall_state = TARGET_WAITKIND_IGNORE;
e38504b3 1737 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
8817a6f2 1738 lp->stopped = 0;
ca2163eb
PA
1739 return 1;
1740 }
1741
bfd09d20
JS
1742 /* Always update the entry/return state, even if this particular
1743 syscall isn't interesting to the core now. In async mode,
1744 the user could install a new catchpoint for this syscall
1745 between syscall enter/return, and we'll need to know to
1746 report a syscall return if that happens. */
1747 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1748 ? TARGET_WAITKIND_SYSCALL_RETURN
1749 : TARGET_WAITKIND_SYSCALL_ENTRY);
1750
ca2163eb
PA
1751 if (catch_syscall_enabled ())
1752 {
ca2163eb
PA
1753 if (catching_syscall_number (syscall_number))
1754 {
1755 /* Alright, an event to report. */
183be222
SM
1756 if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
1757 ourstatus->set_syscall_entry (syscall_number);
1758 else if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
1759 ourstatus->set_syscall_return (syscall_number);
1760 else
1761 gdb_assert_not_reached ("unexpected syscall state");
ca2163eb 1762
9327494e
SM
1763 linux_nat_debug_printf
1764 ("stopping for %s of syscall %d for LWP %ld",
1765 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1766 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1767
ca2163eb
PA
1768 return 0;
1769 }
1770
9327494e
SM
1771 linux_nat_debug_printf
1772 ("ignoring %s of syscall %d for LWP %ld",
1773 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1774 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1775 }
1776 else
1777 {
1778 /* If we had been syscall tracing, and hence used PT_SYSCALL
1779 before on this LWP, it could happen that the user removes all
1780 syscall catchpoints before we get to process this event.
1781 There are two noteworthy issues here:
1782
1783 - When stopped at a syscall entry event, resuming with
1784 PT_STEP still resumes executing the syscall and reports a
1785 syscall return.
1786
1787 - Only PT_SYSCALL catches syscall enters. If we last
1788 single-stepped this thread, then this event can't be a
1789 syscall enter. If we last single-stepped this thread, this
1790 has to be a syscall exit.
1791
1792 The points above mean that the next resume, be it PT_STEP or
1793 PT_CONTINUE, can not trigger a syscall trace event. */
9327494e
SM
1794 linux_nat_debug_printf
1795 ("caught syscall event with no syscall catchpoints. %d for LWP %ld, "
1796 "ignoring", syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1797 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1798 }
1799
1800 /* The core isn't interested in this event. For efficiency, avoid
1801 stopping all threads only to have the core resume them all again.
1802 Since we're not stopping threads, if we're still syscall tracing
1803 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1804 subsequent syscall. Simply resume using the inf-ptrace layer,
1805 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1806
8a99810d 1807 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
ca2163eb
PA
1808 return 1;
1809}
1810
3d799a95
DJ
1811/* Handle a GNU/Linux extended wait response. If we see a clone
1812 event, we need to add the new LWP to our list (and not report the
1813 trap to higher layers). This function returns non-zero if the
1814 event should be ignored and we should wait again. If STOPPING is
1815 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1816
1817static int
4dd63d48 1818linux_handle_extended_wait (struct lwp_info *lp, int status)
d6b0e80f 1819{
e38504b3 1820 int pid = lp->ptid.lwp ();
3d799a95 1821 struct target_waitstatus *ourstatus = &lp->waitstatus;
89a5711c 1822 int event = linux_ptrace_get_extended_event (status);
d6b0e80f 1823
bfd09d20
JS
1824 /* All extended events we currently use are mid-syscall. Only
1825 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
1826 you have to be using PTRACE_SEIZE to get that. */
1827 lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
1828
3d799a95
DJ
1829 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1830 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1831 {
3d799a95
DJ
1832 unsigned long new_pid;
1833 int ret;
1834
1835 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1836
3d799a95
DJ
1837 /* If we haven't already seen the new PID stop, wait for it now. */
1838 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1839 {
1840 /* The new child has a pending SIGSTOP. We can't affect it until it
1841 hits the SIGSTOP, but we're already attached. */
4a6ed09b 1842 ret = my_waitpid (new_pid, &status, __WALL);
3d799a95
DJ
1843 if (ret == -1)
1844 perror_with_name (_("waiting for new child"));
1845 else if (ret != new_pid)
f34652de 1846 internal_error (_("wait returned unexpected PID %d"), ret);
3d799a95 1847 else if (!WIFSTOPPED (status))
f34652de 1848 internal_error (_("wait returned unexpected status 0x%x"), status);
3d799a95
DJ
1849 }
1850
183be222 1851 ptid_t child_ptid (new_pid, new_pid);
3d799a95 1852
26cb8b7c
PA
1853 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1854 {
8a89ddbd
PA
1855 open_proc_mem_file (child_ptid);
1856
26cb8b7c
PA
1857 /* The arch-specific native code may need to know about new
1858 forks even if those end up never mapped to an
1859 inferior. */
135340af 1860 linux_target->low_new_fork (lp, new_pid);
26cb8b7c 1861 }
1310c1b0
PFC
1862 else if (event == PTRACE_EVENT_CLONE)
1863 {
1864 linux_target->low_new_clone (lp, new_pid);
1865 }
26cb8b7c 1866
2277426b 1867 if (event == PTRACE_EVENT_FORK
e99b03dc 1868 && linux_fork_checkpointing_p (lp->ptid.pid ()))
2277426b 1869 {
2277426b
PA
1870 /* Handle checkpointing by linux-fork.c here as a special
1871 case. We don't want the follow-fork-mode or 'catch fork'
1872 to interfere with this. */
1873
1874 /* This won't actually modify the breakpoint list, but will
1875 physically remove the breakpoints from the child. */
184ea2f7 1876 detach_breakpoints (ptid_t (new_pid, new_pid));
2277426b
PA
1877
1878 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
1879 if (!find_fork_pid (new_pid))
1880 add_fork (new_pid);
2277426b
PA
1881
1882 /* Report as spurious, so that infrun doesn't want to follow
1883 this fork. We're actually doing an infcall in
1884 linux-fork.c. */
183be222 1885 ourstatus->set_spurious ();
2277426b
PA
1886
1887 /* Report the stop to the core. */
1888 return 0;
1889 }
1890
3d799a95 1891 if (event == PTRACE_EVENT_FORK)
183be222 1892 ourstatus->set_forked (child_ptid);
3d799a95 1893 else if (event == PTRACE_EVENT_VFORK)
183be222 1894 ourstatus->set_vforked (child_ptid);
4dd63d48 1895 else if (event == PTRACE_EVENT_CLONE)
3d799a95 1896 {
78768c4a
JK
1897 struct lwp_info *new_lp;
1898
183be222 1899 ourstatus->set_ignore ();
78768c4a 1900
9327494e
SM
1901 linux_nat_debug_printf
1902 ("Got clone event from LWP %d, new child is LWP %ld", pid, new_pid);
3c4d7e12 1903
184ea2f7 1904 new_lp = add_lwp (ptid_t (lp->ptid.pid (), new_pid));
4c28f408 1905 new_lp->stopped = 1;
4dd63d48 1906 new_lp->resumed = 1;
d6b0e80f 1907
2db9a427
PA
1908 /* If the thread_db layer is active, let it record the user
1909 level thread id and status, and add the thread to GDB's
1910 list. */
1911 if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
3d799a95 1912 {
2db9a427
PA
1913 /* The process is not using thread_db. Add the LWP to
1914 GDB's list. */
5b6d1e4f 1915 add_thread (linux_target, new_lp->ptid);
2db9a427 1916 }
4c28f408 1917
2ee52aa4 1918 /* Even if we're stopping the thread for some reason
4dd63d48
PA
1919 internal to this module, from the perspective of infrun
1920 and the user/frontend, this new thread is running until
1921 it next reports a stop. */
719546c4
SM
1922 set_running (linux_target, new_lp->ptid, true);
1923 set_executing (linux_target, new_lp->ptid, true);
4c28f408 1924
4dd63d48 1925 if (WSTOPSIG (status) != SIGSTOP)
79395f92 1926 {
4dd63d48
PA
1927 /* This can happen if someone starts sending signals to
1928 the new thread before it gets a chance to run, which
1929 have a lower number than SIGSTOP (e.g. SIGUSR1).
1930 This is an unlikely case, and harder to handle for
1931 fork / vfork than for clone, so we do not try - but
1932 we handle it for clone events here. */
1933
1934 new_lp->signalled = 1;
1935
79395f92
PA
1936 /* We created NEW_LP so it cannot yet contain STATUS. */
1937 gdb_assert (new_lp->status == 0);
1938
1939 /* Save the wait status to report later. */
9327494e
SM
1940 linux_nat_debug_printf
1941 ("waitpid of new LWP %ld, saving status %s",
8d06918f 1942 (long) new_lp->ptid.lwp (), status_to_str (status).c_str ());
79395f92
PA
1943 new_lp->status = status;
1944 }
aa01bd36
PA
1945 else if (report_thread_events)
1946 {
183be222 1947 new_lp->waitstatus.set_thread_created ();
aa01bd36
PA
1948 new_lp->status = status;
1949 }
79395f92 1950
3d799a95
DJ
1951 return 1;
1952 }
1953
1954 return 0;
d6b0e80f
AC
1955 }
1956
3d799a95
DJ
1957 if (event == PTRACE_EVENT_EXEC)
1958 {
9327494e 1959 linux_nat_debug_printf ("Got exec event from LWP %ld", lp->ptid.lwp ());
a75724bc 1960
8a89ddbd
PA
1961 /* Close the previous /proc/PID/mem file for this inferior,
1962 which was using the address space which is now gone.
1963 Reading/writing from this file would return 0/EOF. */
1964 close_proc_mem_file (lp->ptid.pid ());
1965
1966 /* Open a new file for the new address space. */
1967 open_proc_mem_file (lp->ptid);
05c06f31 1968
183be222
SM
1969 ourstatus->set_execd
1970 (make_unique_xstrdup (linux_proc_pid_to_exec_file (pid)));
3d799a95 1971
8af756ef
PA
1972 /* The thread that execed must have been resumed, but, when a
1973 thread execs, it changes its tid to the tgid, and the old
1974 tgid thread might have not been resumed. */
1975 lp->resumed = 1;
6c95b8df
PA
1976 return 0;
1977 }
1978
1979 if (event == PTRACE_EVENT_VFORK_DONE)
1980 {
9327494e 1981 linux_nat_debug_printf
5a0c4a06
SM
1982 ("Got PTRACE_EVENT_VFORK_DONE from LWP %ld",
1983 lp->ptid.lwp ());
1984 ourstatus->set_vfork_done ();
1985 return 0;
3d799a95
DJ
1986 }
1987
f34652de 1988 internal_error (_("unknown ptrace event %d"), event);
d6b0e80f
AC
1989}
1990
9c3a5d93
PA
1991/* Suspend waiting for a signal. We're mostly interested in
1992 SIGCHLD/SIGINT. */
1993
1994static void
1995wait_for_signal ()
1996{
9327494e 1997 linux_nat_debug_printf ("about to sigsuspend");
9c3a5d93
PA
1998 sigsuspend (&suspend_mask);
1999
2000 /* If the quit flag is set, it means that the user pressed Ctrl-C
2001 and we're debugging a process that is running on a separate
2002 terminal, so we must forward the Ctrl-C to the inferior. (If the
2003 inferior is sharing GDB's terminal, then the Ctrl-C reaches the
2004 inferior directly.) We must do this here because functions that
2005 need to block waiting for a signal loop forever until there's an
2006 event to report before returning back to the event loop. */
2007 if (!target_terminal::is_ours ())
2008 {
2009 if (check_quit_flag ())
2010 target_pass_ctrlc ();
2011 }
2012}
2013
d6b0e80f
AC
2014/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2015 exited. */
2016
2017static int
2018wait_lwp (struct lwp_info *lp)
2019{
2020 pid_t pid;
432b4d03 2021 int status = 0;
d6b0e80f 2022 int thread_dead = 0;
432b4d03 2023 sigset_t prev_mask;
d6b0e80f
AC
2024
2025 gdb_assert (!lp->stopped);
2026 gdb_assert (lp->status == 0);
2027
432b4d03
JK
2028 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2029 block_child_signals (&prev_mask);
2030
2031 for (;;)
d6b0e80f 2032 {
e38504b3 2033 pid = my_waitpid (lp->ptid.lwp (), &status, __WALL | WNOHANG);
a9f4bb21
PA
2034 if (pid == -1 && errno == ECHILD)
2035 {
2036 /* The thread has previously exited. We need to delete it
4a6ed09b
PA
2037 now because if this was a non-leader thread execing, we
2038 won't get an exit event. See comments on exec events at
2039 the top of the file. */
a9f4bb21 2040 thread_dead = 1;
9327494e 2041 linux_nat_debug_printf ("%s vanished.",
e53c95d4 2042 lp->ptid.to_string ().c_str ());
a9f4bb21 2043 }
432b4d03
JK
2044 if (pid != 0)
2045 break;
2046
2047 /* Bugs 10970, 12702.
2048 Thread group leader may have exited in which case we'll lock up in
2049 waitpid if there are other threads, even if they are all zombies too.
2050 Basically, we're not supposed to use waitpid this way.
4a6ed09b
PA
2051 tkill(pid,0) cannot be used here as it gets ESRCH for both
2052 for zombie and running processes.
432b4d03
JK
2053
2054 As a workaround, check if we're waiting for the thread group leader and
2055 if it's a zombie, and avoid calling waitpid if it is.
2056
2057 This is racy, what if the tgl becomes a zombie right after we check?
2058 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2059 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03 2060
e38504b3
TT
2061 if (lp->ptid.pid () == lp->ptid.lwp ()
2062 && linux_proc_pid_is_zombie (lp->ptid.lwp ()))
d6b0e80f 2063 {
d6b0e80f 2064 thread_dead = 1;
9327494e 2065 linux_nat_debug_printf ("Thread group leader %s vanished.",
e53c95d4 2066 lp->ptid.to_string ().c_str ());
432b4d03 2067 break;
d6b0e80f 2068 }
432b4d03
JK
2069
2070 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2071 get invoked despite our caller had them intentionally blocked by
2072 block_child_signals. This is sensitive only to the loop of
2073 linux_nat_wait_1 and there if we get called my_waitpid gets called
2074 again before it gets to sigsuspend so we can safely let the handlers
2075 get executed here. */
9c3a5d93 2076 wait_for_signal ();
432b4d03
JK
2077 }
2078
2079 restore_child_signals_mask (&prev_mask);
2080
d6b0e80f
AC
2081 if (!thread_dead)
2082 {
e38504b3 2083 gdb_assert (pid == lp->ptid.lwp ());
d6b0e80f 2084
9327494e 2085 linux_nat_debug_printf ("waitpid %s received %s",
e53c95d4 2086 lp->ptid.to_string ().c_str (),
8d06918f 2087 status_to_str (status).c_str ());
d6b0e80f 2088
a9f4bb21
PA
2089 /* Check if the thread has exited. */
2090 if (WIFEXITED (status) || WIFSIGNALED (status))
2091 {
aa01bd36 2092 if (report_thread_events
e38504b3 2093 || lp->ptid.pid () == lp->ptid.lwp ())
69dde7dc 2094 {
9327494e 2095 linux_nat_debug_printf ("LWP %d exited.", lp->ptid.pid ());
69dde7dc 2096
aa01bd36 2097 /* If this is the leader exiting, it means the whole
69dde7dc
PA
2098 process is gone. Store the status to report to the
2099 core. Store it in lp->waitstatus, because lp->status
2100 would be ambiguous (W_EXITCODE(0,0) == 0). */
7509b829 2101 lp->waitstatus = host_status_to_waitstatus (status);
69dde7dc
PA
2102 return 0;
2103 }
2104
a9f4bb21 2105 thread_dead = 1;
9327494e 2106 linux_nat_debug_printf ("%s exited.",
e53c95d4 2107 lp->ptid.to_string ().c_str ());
a9f4bb21 2108 }
d6b0e80f
AC
2109 }
2110
2111 if (thread_dead)
2112 {
e26af52f 2113 exit_lwp (lp);
d6b0e80f
AC
2114 return 0;
2115 }
2116
2117 gdb_assert (WIFSTOPPED (status));
8817a6f2 2118 lp->stopped = 1;
d6b0e80f 2119
8784d563
PA
2120 if (lp->must_set_ptrace_flags)
2121 {
5b6d1e4f 2122 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
de0d863e 2123 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2124
e38504b3 2125 linux_enable_event_reporting (lp->ptid.lwp (), options);
8784d563
PA
2126 lp->must_set_ptrace_flags = 0;
2127 }
2128
ca2163eb
PA
2129 /* Handle GNU/Linux's syscall SIGTRAPs. */
2130 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2131 {
2132 /* No longer need the sysgood bit. The ptrace event ends up
2133 recorded in lp->waitstatus if we care for it. We can carry
2134 on handling the event like a regular SIGTRAP from here
2135 on. */
2136 status = W_STOPCODE (SIGTRAP);
2137 if (linux_handle_syscall_trap (lp, 1))
2138 return wait_lwp (lp);
2139 }
bfd09d20
JS
2140 else
2141 {
2142 /* Almost all other ptrace-stops are known to be outside of system
2143 calls, with further exceptions in linux_handle_extended_wait. */
2144 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2145 }
ca2163eb 2146
d6b0e80f 2147 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2148 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2149 && linux_is_extended_waitstatus (status))
d6b0e80f 2150 {
9327494e 2151 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
4dd63d48 2152 linux_handle_extended_wait (lp, status);
20ba1ce6 2153 return 0;
d6b0e80f
AC
2154 }
2155
2156 return status;
2157}
2158
2159/* Send a SIGSTOP to LP. */
2160
2161static int
d3a70e03 2162stop_callback (struct lwp_info *lp)
d6b0e80f
AC
2163{
2164 if (!lp->stopped && !lp->signalled)
2165 {
2166 int ret;
2167
9327494e 2168 linux_nat_debug_printf ("kill %s **<SIGSTOP>**",
e53c95d4 2169 lp->ptid.to_string ().c_str ());
9327494e 2170
d6b0e80f 2171 errno = 0;
e38504b3 2172 ret = kill_lwp (lp->ptid.lwp (), SIGSTOP);
9327494e 2173 linux_nat_debug_printf ("lwp kill %d %s", ret,
d6b0e80f 2174 errno ? safe_strerror (errno) : "ERRNO-OK");
d6b0e80f
AC
2175
2176 lp->signalled = 1;
2177 gdb_assert (lp->status == 0);
2178 }
2179
2180 return 0;
2181}
2182
7b50312a
PA
2183/* Request a stop on LWP. */
2184
2185void
2186linux_stop_lwp (struct lwp_info *lwp)
2187{
d3a70e03 2188 stop_callback (lwp);
7b50312a
PA
2189}
2190
2db9a427
PA
2191/* See linux-nat.h */
2192
2193void
2194linux_stop_and_wait_all_lwps (void)
2195{
2196 /* Stop all LWP's ... */
d3a70e03 2197 iterate_over_lwps (minus_one_ptid, stop_callback);
2db9a427
PA
2198
2199 /* ... and wait until all of them have reported back that
2200 they're no longer running. */
d3a70e03 2201 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
2db9a427
PA
2202}
2203
2204/* See linux-nat.h */
2205
2206void
2207linux_unstop_all_lwps (void)
2208{
2209 iterate_over_lwps (minus_one_ptid,
d3a70e03
TT
2210 [] (struct lwp_info *info)
2211 {
2212 return resume_stopped_resumed_lwps (info, minus_one_ptid);
2213 });
2db9a427
PA
2214}
2215
57380f4e 2216/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2217
2218static int
57380f4e
DJ
2219linux_nat_has_pending_sigint (int pid)
2220{
2221 sigset_t pending, blocked, ignored;
57380f4e
DJ
2222
2223 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2224
2225 if (sigismember (&pending, SIGINT)
2226 && !sigismember (&ignored, SIGINT))
2227 return 1;
2228
2229 return 0;
2230}
2231
2232/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2233
2234static int
d3a70e03 2235set_ignore_sigint (struct lwp_info *lp)
d6b0e80f 2236{
57380f4e
DJ
2237 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2238 flag to consume the next one. */
2239 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2240 && WSTOPSIG (lp->status) == SIGINT)
2241 lp->status = 0;
2242 else
2243 lp->ignore_sigint = 1;
2244
2245 return 0;
2246}
2247
2248/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2249 This function is called after we know the LWP has stopped; if the LWP
2250 stopped before the expected SIGINT was delivered, then it will never have
2251 arrived. Also, if the signal was delivered to a shared queue and consumed
2252 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2253
57380f4e
DJ
2254static void
2255maybe_clear_ignore_sigint (struct lwp_info *lp)
2256{
2257 if (!lp->ignore_sigint)
2258 return;
2259
e38504b3 2260 if (!linux_nat_has_pending_sigint (lp->ptid.lwp ()))
57380f4e 2261 {
9327494e 2262 linux_nat_debug_printf ("Clearing bogus flag for %s",
e53c95d4 2263 lp->ptid.to_string ().c_str ());
57380f4e
DJ
2264 lp->ignore_sigint = 0;
2265 }
2266}
2267
ebec9a0f
PA
2268/* Fetch the possible triggered data watchpoint info and store it in
2269 LP.
2270
2271 On some archs, like x86, that use debug registers to set
2272 watchpoints, it's possible that the way to know which watched
2273 address trapped, is to check the register that is used to select
2274 which address to watch. Problem is, between setting the watchpoint
2275 and reading back which data address trapped, the user may change
2276 the set of watchpoints, and, as a consequence, GDB changes the
2277 debug registers in the inferior. To avoid reading back a stale
2278 stopped-data-address when that happens, we cache in LP the fact
2279 that a watchpoint trapped, and the corresponding data address, as
2280 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2281 registers meanwhile, we have the cached data we can rely on. */
2282
9c02b525
PA
2283static int
2284check_stopped_by_watchpoint (struct lwp_info *lp)
ebec9a0f 2285{
2989a365 2286 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
ebec9a0f
PA
2287 inferior_ptid = lp->ptid;
2288
f6ac5f3d 2289 if (linux_target->low_stopped_by_watchpoint ())
ebec9a0f 2290 {
15c66dd6 2291 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
f6ac5f3d
PA
2292 lp->stopped_data_address_p
2293 = linux_target->low_stopped_data_address (&lp->stopped_data_address);
ebec9a0f
PA
2294 }
2295
15c66dd6 2296 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
9c02b525
PA
2297}
2298
9c02b525 2299/* Returns true if the LWP had stopped for a watchpoint. */
ebec9a0f 2300
57810aa7 2301bool
f6ac5f3d 2302linux_nat_target::stopped_by_watchpoint ()
ebec9a0f
PA
2303{
2304 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2305
2306 gdb_assert (lp != NULL);
2307
15c66dd6 2308 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
ebec9a0f
PA
2309}
2310
57810aa7 2311bool
f6ac5f3d 2312linux_nat_target::stopped_data_address (CORE_ADDR *addr_p)
ebec9a0f
PA
2313{
2314 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2315
2316 gdb_assert (lp != NULL);
2317
2318 *addr_p = lp->stopped_data_address;
2319
2320 return lp->stopped_data_address_p;
2321}
2322
26ab7092
JK
2323/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2324
135340af
PA
2325bool
2326linux_nat_target::low_status_is_event (int status)
26ab7092
JK
2327{
2328 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2329}
2330
57380f4e
DJ
2331/* Wait until LP is stopped. */
2332
2333static int
d3a70e03 2334stop_wait_callback (struct lwp_info *lp)
57380f4e 2335{
5b6d1e4f 2336 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
6c95b8df
PA
2337
2338 /* If this is a vfork parent, bail out, it is not going to report
2339 any SIGSTOP until the vfork is done with. */
2340 if (inf->vfork_child != NULL)
2341 return 0;
2342
d6b0e80f
AC
2343 if (!lp->stopped)
2344 {
2345 int status;
2346
2347 status = wait_lwp (lp);
2348 if (status == 0)
2349 return 0;
2350
57380f4e
DJ
2351 if (lp->ignore_sigint && WIFSTOPPED (status)
2352 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2353 {
57380f4e 2354 lp->ignore_sigint = 0;
d6b0e80f
AC
2355
2356 errno = 0;
e38504b3 2357 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
8817a6f2 2358 lp->stopped = 0;
9327494e
SM
2359 linux_nat_debug_printf
2360 ("PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)",
e53c95d4 2361 lp->ptid.to_string ().c_str (),
9327494e 2362 errno ? safe_strerror (errno) : "OK");
d6b0e80f 2363
d3a70e03 2364 return stop_wait_callback (lp);
d6b0e80f
AC
2365 }
2366
57380f4e
DJ
2367 maybe_clear_ignore_sigint (lp);
2368
d6b0e80f
AC
2369 if (WSTOPSIG (status) != SIGSTOP)
2370 {
e5ef252a 2371 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2372
9327494e 2373 linux_nat_debug_printf ("Pending event %s in %s",
8d06918f 2374 status_to_str ((int) status).c_str (),
e53c95d4 2375 lp->ptid.to_string ().c_str ());
e5ef252a
PA
2376
2377 /* Save the sigtrap event. */
2378 lp->status = status;
e5ef252a 2379 gdb_assert (lp->signalled);
e7ad2f14 2380 save_stop_reason (lp);
d6b0e80f
AC
2381 }
2382 else
2383 {
7010835a 2384 /* We caught the SIGSTOP that we intended to catch. */
e5ef252a 2385
9327494e 2386 linux_nat_debug_printf ("Expected SIGSTOP caught for %s.",
e53c95d4 2387 lp->ptid.to_string ().c_str ());
e5ef252a 2388
d6b0e80f 2389 lp->signalled = 0;
7010835a
AB
2390
2391 /* If we are waiting for this stop so we can report the thread
2392 stopped then we need to record this status. Otherwise, we can
2393 now discard this stop event. */
2394 if (lp->last_resume_kind == resume_stop)
2395 {
2396 lp->status = status;
2397 save_stop_reason (lp);
2398 }
d6b0e80f
AC
2399 }
2400 }
2401
2402 return 0;
2403}
2404
9c02b525
PA
2405/* Return non-zero if LP has a wait status pending. Discard the
2406 pending event and resume the LWP if the event that originally
2407 caused the stop became uninteresting. */
d6b0e80f
AC
2408
2409static int
d3a70e03 2410status_callback (struct lwp_info *lp)
d6b0e80f
AC
2411{
2412 /* Only report a pending wait status if we pretend that this has
2413 indeed been resumed. */
ca2163eb
PA
2414 if (!lp->resumed)
2415 return 0;
2416
eb54c8bf
PA
2417 if (!lwp_status_pending_p (lp))
2418 return 0;
2419
15c66dd6
PA
2420 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2421 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525 2422 {
5b6d1e4f 2423 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
9c02b525
PA
2424 CORE_ADDR pc;
2425 int discard = 0;
2426
9c02b525
PA
2427 pc = regcache_read_pc (regcache);
2428
2429 if (pc != lp->stop_pc)
2430 {
9327494e 2431 linux_nat_debug_printf ("PC of %s changed. was=%s, now=%s",
e53c95d4 2432 lp->ptid.to_string ().c_str (),
9327494e
SM
2433 paddress (target_gdbarch (), lp->stop_pc),
2434 paddress (target_gdbarch (), pc));
9c02b525
PA
2435 discard = 1;
2436 }
faf09f01
PA
2437
2438#if !USE_SIGTRAP_SIGINFO
a01bda52 2439 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
9c02b525 2440 {
9327494e 2441 linux_nat_debug_printf ("previous breakpoint of %s, at %s gone",
e53c95d4 2442 lp->ptid.to_string ().c_str (),
9327494e 2443 paddress (target_gdbarch (), lp->stop_pc));
9c02b525
PA
2444
2445 discard = 1;
2446 }
faf09f01 2447#endif
9c02b525
PA
2448
2449 if (discard)
2450 {
9327494e 2451 linux_nat_debug_printf ("pending event of %s cancelled.",
e53c95d4 2452 lp->ptid.to_string ().c_str ());
9c02b525
PA
2453
2454 lp->status = 0;
2455 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2456 return 0;
2457 }
9c02b525
PA
2458 }
2459
eb54c8bf 2460 return 1;
d6b0e80f
AC
2461}
2462
d6b0e80f
AC
2463/* Count the LWP's that have had events. */
2464
2465static int
d3a70e03 2466count_events_callback (struct lwp_info *lp, int *count)
d6b0e80f 2467{
d6b0e80f
AC
2468 gdb_assert (count != NULL);
2469
9c02b525
PA
2470 /* Select only resumed LWPs that have an event pending. */
2471 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2472 (*count)++;
2473
2474 return 0;
2475}
2476
2477/* Select the LWP (if any) that is currently being single-stepped. */
2478
2479static int
d3a70e03 2480select_singlestep_lwp_callback (struct lwp_info *lp)
d6b0e80f 2481{
25289eb2
PA
2482 if (lp->last_resume_kind == resume_step
2483 && lp->status != 0)
d6b0e80f
AC
2484 return 1;
2485 else
2486 return 0;
2487}
2488
8a99810d
PA
2489/* Returns true if LP has a status pending. */
2490
2491static int
2492lwp_status_pending_p (struct lwp_info *lp)
2493{
2494 /* We check for lp->waitstatus in addition to lp->status, because we
2495 can have pending process exits recorded in lp->status and
2496 W_EXITCODE(0,0) happens to be 0. */
183be222 2497 return lp->status != 0 || lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE;
8a99810d
PA
2498}
2499
b90fc188 2500/* Select the Nth LWP that has had an event. */
d6b0e80f
AC
2501
2502static int
d3a70e03 2503select_event_lwp_callback (struct lwp_info *lp, int *selector)
d6b0e80f 2504{
d6b0e80f
AC
2505 gdb_assert (selector != NULL);
2506
9c02b525
PA
2507 /* Select only resumed LWPs that have an event pending. */
2508 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2509 if ((*selector)-- == 0)
2510 return 1;
2511
2512 return 0;
2513}
2514
e7ad2f14
PA
2515/* Called when the LWP stopped for a signal/trap. If it stopped for a
2516 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2517 and save the result in the LWP's stop_reason field. If it stopped
2518 for a breakpoint, decrement the PC if necessary on the lwp's
2519 architecture. */
9c02b525 2520
e7ad2f14
PA
2521static void
2522save_stop_reason (struct lwp_info *lp)
710151dd 2523{
e7ad2f14
PA
2524 struct regcache *regcache;
2525 struct gdbarch *gdbarch;
515630c5 2526 CORE_ADDR pc;
9c02b525 2527 CORE_ADDR sw_bp_pc;
faf09f01
PA
2528#if USE_SIGTRAP_SIGINFO
2529 siginfo_t siginfo;
2530#endif
9c02b525 2531
e7ad2f14
PA
2532 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
2533 gdb_assert (lp->status != 0);
2534
135340af 2535 if (!linux_target->low_status_is_event (lp->status))
e7ad2f14
PA
2536 return;
2537
a9deee17
PA
2538 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
2539 if (inf->starting_up)
2540 return;
2541
5b6d1e4f 2542 regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 2543 gdbarch = regcache->arch ();
e7ad2f14 2544
9c02b525 2545 pc = regcache_read_pc (regcache);
527a273a 2546 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
515630c5 2547
faf09f01
PA
2548#if USE_SIGTRAP_SIGINFO
2549 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2550 {
2551 if (siginfo.si_signo == SIGTRAP)
2552 {
e7ad2f14
PA
2553 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
2554 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2555 {
e7ad2f14
PA
2556 /* The si_code is ambiguous on this arch -- check debug
2557 registers. */
2558 if (!check_stopped_by_watchpoint (lp))
2559 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2560 }
2561 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
2562 {
2563 /* If we determine the LWP stopped for a SW breakpoint,
2564 trust it. Particularly don't check watchpoint
7da6a5b9 2565 registers, because, at least on s390, we'd find
e7ad2f14
PA
2566 stopped-by-watchpoint as long as there's a watchpoint
2567 set. */
faf09f01 2568 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
faf09f01 2569 }
e7ad2f14 2570 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2571 {
e7ad2f14
PA
2572 /* This can indicate either a hardware breakpoint or
2573 hardware watchpoint. Check debug registers. */
2574 if (!check_stopped_by_watchpoint (lp))
2575 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
faf09f01 2576 }
2bf6fb9d
PA
2577 else if (siginfo.si_code == TRAP_TRACE)
2578 {
9327494e 2579 linux_nat_debug_printf ("%s stopped by trace",
e53c95d4 2580 lp->ptid.to_string ().c_str ());
e7ad2f14
PA
2581
2582 /* We may have single stepped an instruction that
2583 triggered a watchpoint. In that case, on some
2584 architectures (such as x86), instead of TRAP_HWBKPT,
2585 si_code indicates TRAP_TRACE, and we need to check
2586 the debug registers separately. */
2587 check_stopped_by_watchpoint (lp);
2bf6fb9d 2588 }
faf09f01
PA
2589 }
2590 }
2591#else
9c02b525 2592 if ((!lp->step || lp->stop_pc == sw_bp_pc)
a01bda52 2593 && software_breakpoint_inserted_here_p (regcache->aspace (),
9c02b525 2594 sw_bp_pc))
710151dd 2595 {
9c02b525
PA
2596 /* The LWP was either continued, or stepped a software
2597 breakpoint instruction. */
e7ad2f14
PA
2598 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2599 }
2600
a01bda52 2601 if (hardware_breakpoint_inserted_here_p (regcache->aspace (), pc))
e7ad2f14
PA
2602 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2603
2604 if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
2605 check_stopped_by_watchpoint (lp);
2606#endif
2607
2608 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2609 {
9327494e 2610 linux_nat_debug_printf ("%s stopped by software breakpoint",
e53c95d4 2611 lp->ptid.to_string ().c_str ());
710151dd
PA
2612
2613 /* Back up the PC if necessary. */
9c02b525
PA
2614 if (pc != sw_bp_pc)
2615 regcache_write_pc (regcache, sw_bp_pc);
515630c5 2616
e7ad2f14
PA
2617 /* Update this so we record the correct stop PC below. */
2618 pc = sw_bp_pc;
710151dd 2619 }
e7ad2f14 2620 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525 2621 {
9327494e 2622 linux_nat_debug_printf ("%s stopped by hardware breakpoint",
e53c95d4 2623 lp->ptid.to_string ().c_str ());
e7ad2f14
PA
2624 }
2625 else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2626 {
9327494e 2627 linux_nat_debug_printf ("%s stopped by hardware watchpoint",
e53c95d4 2628 lp->ptid.to_string ().c_str ());
9c02b525 2629 }
d6b0e80f 2630
e7ad2f14 2631 lp->stop_pc = pc;
d6b0e80f
AC
2632}
2633
faf09f01
PA
2634
2635/* Returns true if the LWP had stopped for a software breakpoint. */
2636
57810aa7 2637bool
f6ac5f3d 2638linux_nat_target::stopped_by_sw_breakpoint ()
faf09f01
PA
2639{
2640 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2641
2642 gdb_assert (lp != NULL);
2643
2644 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2645}
2646
2647/* Implement the supports_stopped_by_sw_breakpoint method. */
2648
57810aa7 2649bool
f6ac5f3d 2650linux_nat_target::supports_stopped_by_sw_breakpoint ()
faf09f01
PA
2651{
2652 return USE_SIGTRAP_SIGINFO;
2653}
2654
2655/* Returns true if the LWP had stopped for a hardware
2656 breakpoint/watchpoint. */
2657
57810aa7 2658bool
f6ac5f3d 2659linux_nat_target::stopped_by_hw_breakpoint ()
faf09f01
PA
2660{
2661 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2662
2663 gdb_assert (lp != NULL);
2664
2665 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2666}
2667
2668/* Implement the supports_stopped_by_hw_breakpoint method. */
2669
57810aa7 2670bool
f6ac5f3d 2671linux_nat_target::supports_stopped_by_hw_breakpoint ()
faf09f01
PA
2672{
2673 return USE_SIGTRAP_SIGINFO;
2674}
2675
d6b0e80f
AC
2676/* Select one LWP out of those that have events pending. */
2677
2678static void
d90e17a7 2679select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2680{
2681 int num_events = 0;
2682 int random_selector;
9c02b525 2683 struct lwp_info *event_lp = NULL;
d6b0e80f 2684
ac264b3b 2685 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2686 (*orig_lp)->status = *status;
2687
9c02b525
PA
2688 /* In all-stop, give preference to the LWP that is being
2689 single-stepped. There will be at most one, and it will be the
2690 LWP that the core is most interested in. If we didn't do this,
2691 then we'd have to handle pending step SIGTRAPs somehow in case
2692 the core later continues the previously-stepped thread, as
2693 otherwise we'd report the pending SIGTRAP then, and the core, not
2694 having stepped the thread, wouldn't understand what the trap was
2695 for, and therefore would report it to the user as a random
2696 signal. */
fbea99ea 2697 if (!target_is_non_stop_p ())
d6b0e80f 2698 {
d3a70e03 2699 event_lp = iterate_over_lwps (filter, select_singlestep_lwp_callback);
9c02b525
PA
2700 if (event_lp != NULL)
2701 {
9327494e 2702 linux_nat_debug_printf ("Select single-step %s",
e53c95d4 2703 event_lp->ptid.to_string ().c_str ());
9c02b525 2704 }
d6b0e80f 2705 }
9c02b525
PA
2706
2707 if (event_lp == NULL)
d6b0e80f 2708 {
9c02b525 2709 /* Pick one at random, out of those which have had events. */
d6b0e80f 2710
9c02b525 2711 /* First see how many events we have. */
d3a70e03
TT
2712 iterate_over_lwps (filter,
2713 [&] (struct lwp_info *info)
2714 {
2715 return count_events_callback (info, &num_events);
2716 });
8bf3b159 2717 gdb_assert (num_events > 0);
d6b0e80f 2718
9c02b525
PA
2719 /* Now randomly pick a LWP out of those that have had
2720 events. */
d6b0e80f
AC
2721 random_selector = (int)
2722 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2723
9327494e
SM
2724 if (num_events > 1)
2725 linux_nat_debug_printf ("Found %d events, selecting #%d",
2726 num_events, random_selector);
d6b0e80f 2727
d3a70e03
TT
2728 event_lp
2729 = (iterate_over_lwps
2730 (filter,
2731 [&] (struct lwp_info *info)
2732 {
2733 return select_event_lwp_callback (info,
2734 &random_selector);
2735 }));
d6b0e80f
AC
2736 }
2737
2738 if (event_lp != NULL)
2739 {
2740 /* Switch the event LWP. */
2741 *orig_lp = event_lp;
2742 *status = event_lp->status;
2743 }
2744
2745 /* Flush the wait status for the event LWP. */
2746 (*orig_lp)->status = 0;
2747}
2748
2749/* Return non-zero if LP has been resumed. */
2750
2751static int
d3a70e03 2752resumed_callback (struct lwp_info *lp)
d6b0e80f
AC
2753{
2754 return lp->resumed;
2755}
2756
02f3fc28 2757/* Check if we should go on and pass this event to common code.
12d9289a 2758
897608ed
SM
2759 If so, save the status to the lwp_info structure associated to LWPID. */
2760
2761static void
9c02b525 2762linux_nat_filter_event (int lwpid, int status)
02f3fc28
PA
2763{
2764 struct lwp_info *lp;
89a5711c 2765 int event = linux_ptrace_get_extended_event (status);
02f3fc28 2766
f2907e49 2767 lp = find_lwp_pid (ptid_t (lwpid));
02f3fc28 2768
1abeb1e9
PA
2769 /* Check for events reported by anything not in our LWP list. */
2770 if (lp == nullptr)
0e5bf2a8 2771 {
1abeb1e9
PA
2772 if (WIFSTOPPED (status))
2773 {
2774 if (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC)
2775 {
2776 /* A non-leader thread exec'ed after we've seen the
2777 leader zombie, and removed it from our lists (in
2778 check_zombie_leaders). The non-leader thread changes
2779 its tid to the tgid. */
2780 linux_nat_debug_printf
2781 ("Re-adding thread group leader LWP %d after exec.",
2782 lwpid);
0e5bf2a8 2783
1abeb1e9
PA
2784 lp = add_lwp (ptid_t (lwpid, lwpid));
2785 lp->stopped = 1;
2786 lp->resumed = 1;
2787 add_thread (linux_target, lp->ptid);
2788 }
2789 else
2790 {
2791 /* A process we are controlling has forked and the new
2792 child's stop was reported to us by the kernel. Save
2793 its PID and go back to waiting for the fork event to
2794 be reported - the stopped process might be returned
2795 from waitpid before or after the fork event is. */
2796 linux_nat_debug_printf
2797 ("Saving LWP %d status %s in stopped_pids list",
2798 lwpid, status_to_str (status).c_str ());
2799 add_to_pid_list (&stopped_pids, lwpid, status);
2800 }
2801 }
2802 else
2803 {
2804 /* Don't report an event for the exit of an LWP not in our
2805 list, i.e. not part of any inferior we're debugging.
2806 This can happen if we detach from a program we originally
6cf20c46
PA
2807 forked and then it exits. However, note that we may have
2808 earlier deleted a leader of an inferior we're debugging,
2809 in check_zombie_leaders. Re-add it back here if so. */
2810 for (inferior *inf : all_inferiors (linux_target))
2811 {
2812 if (inf->pid == lwpid)
2813 {
2814 linux_nat_debug_printf
2815 ("Re-adding thread group leader LWP %d after exit.",
2816 lwpid);
2817
2818 lp = add_lwp (ptid_t (lwpid, lwpid));
2819 lp->resumed = 1;
2820 add_thread (linux_target, lp->ptid);
2821 break;
2822 }
2823 }
1abeb1e9 2824 }
0e5bf2a8 2825
1abeb1e9
PA
2826 if (lp == nullptr)
2827 return;
02f3fc28
PA
2828 }
2829
8817a6f2
PA
2830 /* This LWP is stopped now. (And if dead, this prevents it from
2831 ever being continued.) */
2832 lp->stopped = 1;
2833
8784d563
PA
2834 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2835 {
5b6d1e4f 2836 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
de0d863e 2837 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2838
e38504b3 2839 linux_enable_event_reporting (lp->ptid.lwp (), options);
8784d563
PA
2840 lp->must_set_ptrace_flags = 0;
2841 }
2842
ca2163eb
PA
2843 /* Handle GNU/Linux's syscall SIGTRAPs. */
2844 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2845 {
2846 /* No longer need the sysgood bit. The ptrace event ends up
2847 recorded in lp->waitstatus if we care for it. We can carry
2848 on handling the event like a regular SIGTRAP from here
2849 on. */
2850 status = W_STOPCODE (SIGTRAP);
2851 if (linux_handle_syscall_trap (lp, 0))
897608ed 2852 return;
ca2163eb 2853 }
bfd09d20
JS
2854 else
2855 {
2856 /* Almost all other ptrace-stops are known to be outside of system
2857 calls, with further exceptions in linux_handle_extended_wait. */
2858 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2859 }
02f3fc28 2860
ca2163eb 2861 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2862 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2863 && linux_is_extended_waitstatus (status))
02f3fc28 2864 {
9327494e
SM
2865 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
2866
4dd63d48 2867 if (linux_handle_extended_wait (lp, status))
897608ed 2868 return;
02f3fc28
PA
2869 }
2870
2871 /* Check if the thread has exited. */
9c02b525
PA
2872 if (WIFEXITED (status) || WIFSIGNALED (status))
2873 {
6cf20c46 2874 if (!report_thread_events && !is_leader (lp))
02f3fc28 2875 {
9327494e 2876 linux_nat_debug_printf ("%s exited.",
e53c95d4 2877 lp->ptid.to_string ().c_str ());
9c02b525 2878
6cf20c46 2879 /* If this was not the leader exiting, then the exit signal
4a6ed09b
PA
2880 was not the end of the debugged application and should be
2881 ignored. */
2882 exit_lwp (lp);
897608ed 2883 return;
02f3fc28
PA
2884 }
2885
77598427
PA
2886 /* Note that even if the leader was ptrace-stopped, it can still
2887 exit, if e.g., some other thread brings down the whole
2888 process (calls `exit'). So don't assert that the lwp is
2889 resumed. */
9327494e
SM
2890 linux_nat_debug_printf ("LWP %ld exited (resumed=%d)",
2891 lp->ptid.lwp (), lp->resumed);
02f3fc28 2892
9c02b525
PA
2893 /* Dead LWP's aren't expected to reported a pending sigstop. */
2894 lp->signalled = 0;
2895
2896 /* Store the pending event in the waitstatus, because
2897 W_EXITCODE(0,0) == 0. */
7509b829 2898 lp->waitstatus = host_status_to_waitstatus (status);
897608ed 2899 return;
02f3fc28
PA
2900 }
2901
02f3fc28
PA
2902 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2903 an attempt to stop an LWP. */
2904 if (lp->signalled
2905 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2906 {
02f3fc28
PA
2907 lp->signalled = 0;
2908
2bf6fb9d 2909 if (lp->last_resume_kind == resume_stop)
25289eb2 2910 {
9327494e 2911 linux_nat_debug_printf ("resume_stop SIGSTOP caught for %s.",
e53c95d4 2912 lp->ptid.to_string ().c_str ());
2bf6fb9d
PA
2913 }
2914 else
2915 {
2916 /* This is a delayed SIGSTOP. Filter out the event. */
02f3fc28 2917
9327494e
SM
2918 linux_nat_debug_printf
2919 ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2920 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 2921 lp->ptid.to_string ().c_str ());
02f3fc28 2922
2bf6fb9d 2923 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
25289eb2 2924 gdb_assert (lp->resumed);
897608ed 2925 return;
25289eb2 2926 }
02f3fc28
PA
2927 }
2928
57380f4e
DJ
2929 /* Make sure we don't report a SIGINT that we have already displayed
2930 for another thread. */
2931 if (lp->ignore_sigint
2932 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
2933 {
9327494e 2934 linux_nat_debug_printf ("Delayed SIGINT caught for %s.",
e53c95d4 2935 lp->ptid.to_string ().c_str ());
57380f4e
DJ
2936
2937 /* This is a delayed SIGINT. */
2938 lp->ignore_sigint = 0;
2939
8a99810d 2940 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
9327494e
SM
2941 linux_nat_debug_printf ("%s %s, 0, 0 (discard SIGINT)",
2942 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 2943 lp->ptid.to_string ().c_str ());
57380f4e
DJ
2944 gdb_assert (lp->resumed);
2945
2946 /* Discard the event. */
897608ed 2947 return;
57380f4e
DJ
2948 }
2949
9c02b525
PA
2950 /* Don't report signals that GDB isn't interested in, such as
2951 signals that are neither printed nor stopped upon. Stopping all
7da6a5b9 2952 threads can be a bit time-consuming, so if we want decent
9c02b525
PA
2953 performance with heavily multi-threaded programs, especially when
2954 they're using a high frequency timer, we'd better avoid it if we
2955 can. */
2956 if (WIFSTOPPED (status))
2957 {
2958 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
2959
fbea99ea 2960 if (!target_is_non_stop_p ())
9c02b525
PA
2961 {
2962 /* Only do the below in all-stop, as we currently use SIGSTOP
2963 to implement target_stop (see linux_nat_stop) in
2964 non-stop. */
2965 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
2966 {
2967 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2968 forwarded to the entire process group, that is, all LWPs
2969 will receive it - unless they're using CLONE_THREAD to
2970 share signals. Since we only want to report it once, we
2971 mark it as ignored for all LWPs except this one. */
d3a70e03 2972 iterate_over_lwps (ptid_t (lp->ptid.pid ()), set_ignore_sigint);
9c02b525
PA
2973 lp->ignore_sigint = 0;
2974 }
2975 else
2976 maybe_clear_ignore_sigint (lp);
2977 }
2978
2979 /* When using hardware single-step, we need to report every signal.
c9587f88 2980 Otherwise, signals in pass_mask may be short-circuited
d8c06f22
AB
2981 except signals that might be caused by a breakpoint, or SIGSTOP
2982 if we sent the SIGSTOP and are waiting for it to arrive. */
9c02b525 2983 if (!lp->step
c9587f88 2984 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
d8c06f22 2985 && (WSTOPSIG (status) != SIGSTOP
5b6d1e4f 2986 || !find_thread_ptid (linux_target, lp->ptid)->stop_requested)
c9587f88 2987 && !linux_wstatus_maybe_breakpoint (status))
9c02b525
PA
2988 {
2989 linux_resume_one_lwp (lp, lp->step, signo);
9327494e
SM
2990 linux_nat_debug_printf
2991 ("%s %s, %s (preempt 'handle')",
2992 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 2993 lp->ptid.to_string ().c_str (),
9327494e
SM
2994 (signo != GDB_SIGNAL_0
2995 ? strsignal (gdb_signal_to_host (signo)) : "0"));
897608ed 2996 return;
9c02b525
PA
2997 }
2998 }
2999
02f3fc28
PA
3000 /* An interesting event. */
3001 gdb_assert (lp);
ca2163eb 3002 lp->status = status;
e7ad2f14 3003 save_stop_reason (lp);
02f3fc28
PA
3004}
3005
0e5bf2a8
PA
3006/* Detect zombie thread group leaders, and "exit" them. We can't reap
3007 their exits until all other threads in the group have exited. */
3008
3009static void
3010check_zombie_leaders (void)
3011{
08036331 3012 for (inferior *inf : all_inferiors ())
0e5bf2a8
PA
3013 {
3014 struct lwp_info *leader_lp;
3015
3016 if (inf->pid == 0)
3017 continue;
3018
f2907e49 3019 leader_lp = find_lwp_pid (ptid_t (inf->pid));
0e5bf2a8
PA
3020 if (leader_lp != NULL
3021 /* Check if there are other threads in the group, as we may
6cf20c46
PA
3022 have raced with the inferior simply exiting. Note this
3023 isn't a watertight check. If the inferior is
3024 multi-threaded and is exiting, it may be we see the
3025 leader as zombie before we reap all the non-leader
3026 threads. See comments below. */
0e5bf2a8 3027 && num_lwps (inf->pid) > 1
5f572dec 3028 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8 3029 {
6cf20c46
PA
3030 /* A zombie leader in a multi-threaded program can mean one
3031 of three things:
3032
3033 #1 - Only the leader exited, not the whole program, e.g.,
3034 with pthread_exit. Since we can't reap the leader's exit
3035 status until all other threads are gone and reaped too,
3036 we want to delete the zombie leader right away, as it
3037 can't be debugged, we can't read its registers, etc.
3038 This is the main reason we check for zombie leaders
3039 disappearing.
3040
3041 #2 - The whole thread-group/process exited (a group exit,
3042 via e.g. exit(3), and there is (or will be shortly) an
3043 exit reported for each thread in the process, and then
3044 finally an exit for the leader once the non-leaders are
3045 reaped.
3046
3047 #3 - There are 3 or more threads in the group, and a
3048 thread other than the leader exec'd. See comments on
3049 exec events at the top of the file.
3050
3051 Ideally we would never delete the leader for case #2.
3052 Instead, we want to collect the exit status of each
3053 non-leader thread, and then finally collect the exit
3054 status of the leader as normal and use its exit code as
3055 whole-process exit code. Unfortunately, there's no
3056 race-free way to distinguish cases #1 and #2. We can't
3057 assume the exit events for the non-leaders threads are
3058 already pending in the kernel, nor can we assume the
3059 non-leader threads are in zombie state already. Between
3060 the leader becoming zombie and the non-leaders exiting
3061 and becoming zombie themselves, there's a small time
3062 window, so such a check would be racy. Temporarily
3063 pausing all threads and checking to see if all threads
3064 exit or not before re-resuming them would work in the
3065 case that all threads are running right now, but it
3066 wouldn't work if some thread is currently already
3067 ptrace-stopped, e.g., due to scheduler-locking.
3068
3069 So what we do is we delete the leader anyhow, and then
3070 later on when we see its exit status, we re-add it back.
3071 We also make sure that we only report a whole-process
3072 exit when we see the leader exiting, as opposed to when
3073 the last LWP in the LWP list exits, which can be a
3074 non-leader if we deleted the leader here. */
9327494e 3075 linux_nat_debug_printf ("Thread group leader %d zombie "
6cf20c46
PA
3076 "(it exited, or another thread execd), "
3077 "deleting it.",
9327494e 3078 inf->pid);
0e5bf2a8
PA
3079 exit_lwp (leader_lp);
3080 }
3081 }
3082}
3083
aa01bd36
PA
3084/* Convenience function that is called when the kernel reports an exit
3085 event. This decides whether to report the event to GDB as a
3086 process exit event, a thread exit event, or to suppress the
3087 event. */
3088
3089static ptid_t
3090filter_exit_event (struct lwp_info *event_child,
3091 struct target_waitstatus *ourstatus)
3092{
3093 ptid_t ptid = event_child->ptid;
3094
6cf20c46 3095 if (!is_leader (event_child))
aa01bd36
PA
3096 {
3097 if (report_thread_events)
183be222 3098 ourstatus->set_thread_exited (0);
aa01bd36 3099 else
183be222 3100 ourstatus->set_ignore ();
aa01bd36
PA
3101
3102 exit_lwp (event_child);
3103 }
3104
3105 return ptid;
3106}
3107
d6b0e80f 3108static ptid_t
f6ac5f3d 3109linux_nat_wait_1 (ptid_t ptid, struct target_waitstatus *ourstatus,
b60cea74 3110 target_wait_flags target_options)
d6b0e80f 3111{
fc9b8e47 3112 sigset_t prev_mask;
4b60df3d 3113 enum resume_kind last_resume_kind;
12d9289a 3114 struct lwp_info *lp;
12d9289a 3115 int status;
d6b0e80f 3116
9327494e 3117 linux_nat_debug_printf ("enter");
b84876c2 3118
f973ed9c
DJ
3119 /* The first time we get here after starting a new inferior, we may
3120 not have added it to the LWP list yet - this is the earliest
3121 moment at which we know its PID. */
677c92fe 3122 if (ptid.is_pid () && find_lwp_pid (ptid) == nullptr)
f973ed9c 3123 {
677c92fe 3124 ptid_t lwp_ptid (ptid.pid (), ptid.pid ());
27c9d204 3125
677c92fe
SM
3126 /* Upgrade the main thread's ptid. */
3127 thread_change_ptid (linux_target, ptid, lwp_ptid);
3128 lp = add_initial_lwp (lwp_ptid);
f973ed9c
DJ
3129 lp->resumed = 1;
3130 }
3131
12696c10 3132 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
7feb7d06 3133 block_child_signals (&prev_mask);
d6b0e80f 3134
d6b0e80f 3135 /* First check if there is a LWP with a wait status pending. */
d3a70e03 3136 lp = iterate_over_lwps (ptid, status_callback);
8a99810d 3137 if (lp != NULL)
d6b0e80f 3138 {
9327494e 3139 linux_nat_debug_printf ("Using pending wait status %s for %s.",
8d06918f 3140 status_to_str (lp->status).c_str (),
e53c95d4 3141 lp->ptid.to_string ().c_str ());
d6b0e80f
AC
3142 }
3143
9c02b525
PA
3144 /* But if we don't find a pending event, we'll have to wait. Always
3145 pull all events out of the kernel. We'll randomly select an
3146 event LWP out of all that have events, to prevent starvation. */
7feb7d06 3147
d90e17a7 3148 while (lp == NULL)
d6b0e80f
AC
3149 {
3150 pid_t lwpid;
3151
0e5bf2a8
PA
3152 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3153 quirks:
3154
3155 - If the thread group leader exits while other threads in the
3156 thread group still exist, waitpid(TGID, ...) hangs. That
3157 waitpid won't return an exit status until the other threads
85102364 3158 in the group are reaped.
0e5bf2a8
PA
3159
3160 - When a non-leader thread execs, that thread just vanishes
3161 without reporting an exit (so we'd hang if we waited for it
3162 explicitly in that case). The exec event is reported to
3163 the TGID pid. */
3164
3165 errno = 0;
4a6ed09b 3166 lwpid = my_waitpid (-1, &status, __WALL | WNOHANG);
0e5bf2a8 3167
9327494e
SM
3168 linux_nat_debug_printf ("waitpid(-1, ...) returned %d, %s",
3169 lwpid,
3170 errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3171
d6b0e80f
AC
3172 if (lwpid > 0)
3173 {
9327494e 3174 linux_nat_debug_printf ("waitpid %ld received %s",
8d06918f
SM
3175 (long) lwpid,
3176 status_to_str (status).c_str ());
d6b0e80f 3177
9c02b525 3178 linux_nat_filter_event (lwpid, status);
0e5bf2a8
PA
3179 /* Retry until nothing comes out of waitpid. A single
3180 SIGCHLD can indicate more than one child stopped. */
3181 continue;
d6b0e80f
AC
3182 }
3183
20ba1ce6
PA
3184 /* Now that we've pulled all events out of the kernel, resume
3185 LWPs that don't have an interesting event to report. */
3186 iterate_over_lwps (minus_one_ptid,
d3a70e03
TT
3187 [] (struct lwp_info *info)
3188 {
3189 return resume_stopped_resumed_lwps (info, minus_one_ptid);
3190 });
20ba1ce6
PA
3191
3192 /* ... and find an LWP with a status to report to the core, if
3193 any. */
d3a70e03 3194 lp = iterate_over_lwps (ptid, status_callback);
9c02b525
PA
3195 if (lp != NULL)
3196 break;
3197
0e5bf2a8
PA
3198 /* Check for zombie thread group leaders. Those can't be reaped
3199 until all other threads in the thread group are. */
3200 check_zombie_leaders ();
d6b0e80f 3201
0e5bf2a8
PA
3202 /* If there are no resumed children left, bail. We'd be stuck
3203 forever in the sigsuspend call below otherwise. */
d3a70e03 3204 if (iterate_over_lwps (ptid, resumed_callback) == NULL)
0e5bf2a8 3205 {
9327494e 3206 linux_nat_debug_printf ("exit (no resumed LWP)");
b84876c2 3207
183be222 3208 ourstatus->set_no_resumed ();
b84876c2 3209
0e5bf2a8
PA
3210 restore_child_signals_mask (&prev_mask);
3211 return minus_one_ptid;
d6b0e80f 3212 }
28736962 3213
0e5bf2a8
PA
3214 /* No interesting event to report to the core. */
3215
3216 if (target_options & TARGET_WNOHANG)
3217 {
9327494e 3218 linux_nat_debug_printf ("exit (ignore)");
28736962 3219
183be222 3220 ourstatus->set_ignore ();
28736962
PA
3221 restore_child_signals_mask (&prev_mask);
3222 return minus_one_ptid;
3223 }
d6b0e80f
AC
3224
3225 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3226 gdb_assert (lp == NULL);
0e5bf2a8
PA
3227
3228 /* Block until we get an event reported with SIGCHLD. */
9c3a5d93 3229 wait_for_signal ();
d6b0e80f
AC
3230 }
3231
d6b0e80f
AC
3232 gdb_assert (lp);
3233
ca2163eb
PA
3234 status = lp->status;
3235 lp->status = 0;
3236
fbea99ea 3237 if (!target_is_non_stop_p ())
4c28f408
PA
3238 {
3239 /* Now stop all other LWP's ... */
d3a70e03 3240 iterate_over_lwps (minus_one_ptid, stop_callback);
4c28f408
PA
3241
3242 /* ... and wait until all of them have reported back that
3243 they're no longer running. */
d3a70e03 3244 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
9c02b525
PA
3245 }
3246
3247 /* If we're not waiting for a specific LWP, choose an event LWP from
3248 among those that have had events. Giving equal priority to all
3249 LWPs that have had events helps prevent starvation. */
d7e15655 3250 if (ptid == minus_one_ptid || ptid.is_pid ())
9c02b525
PA
3251 select_event_lwp (ptid, &lp, &status);
3252
3253 gdb_assert (lp != NULL);
3254
3255 /* Now that we've selected our final event LWP, un-adjust its PC if
faf09f01
PA
3256 it was a software breakpoint, and we can't reliably support the
3257 "stopped by software breakpoint" stop reason. */
3258 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3259 && !USE_SIGTRAP_SIGINFO)
9c02b525 3260 {
5b6d1e4f 3261 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 3262 struct gdbarch *gdbarch = regcache->arch ();
527a273a 3263 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4c28f408 3264
9c02b525
PA
3265 if (decr_pc != 0)
3266 {
3267 CORE_ADDR pc;
d6b0e80f 3268
9c02b525
PA
3269 pc = regcache_read_pc (regcache);
3270 regcache_write_pc (regcache, pc + decr_pc);
3271 }
3272 }
e3e9f5a2 3273
9c02b525
PA
3274 /* We'll need this to determine whether to report a SIGSTOP as
3275 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3276 clears it. */
3277 last_resume_kind = lp->last_resume_kind;
4b60df3d 3278
fbea99ea 3279 if (!target_is_non_stop_p ())
9c02b525 3280 {
e3e9f5a2
PA
3281 /* In all-stop, from the core's perspective, all LWPs are now
3282 stopped until a new resume action is sent over. */
d3a70e03 3283 iterate_over_lwps (minus_one_ptid, resume_clear_callback);
e3e9f5a2
PA
3284 }
3285 else
25289eb2 3286 {
d3a70e03 3287 resume_clear_callback (lp);
25289eb2 3288 }
d6b0e80f 3289
135340af 3290 if (linux_target->low_status_is_event (status))
d6b0e80f 3291 {
9327494e 3292 linux_nat_debug_printf ("trap ptid is %s.",
e53c95d4 3293 lp->ptid.to_string ().c_str ());
d6b0e80f 3294 }
d6b0e80f 3295
183be222 3296 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
d6b0e80f
AC
3297 {
3298 *ourstatus = lp->waitstatus;
183be222 3299 lp->waitstatus.set_ignore ();
d6b0e80f
AC
3300 }
3301 else
7509b829 3302 *ourstatus = host_status_to_waitstatus (status);
d6b0e80f 3303
9327494e 3304 linux_nat_debug_printf ("exit");
b84876c2 3305
7feb7d06 3306 restore_child_signals_mask (&prev_mask);
1e225492 3307
4b60df3d 3308 if (last_resume_kind == resume_stop
183be222 3309 && ourstatus->kind () == TARGET_WAITKIND_STOPPED
25289eb2
PA
3310 && WSTOPSIG (status) == SIGSTOP)
3311 {
3312 /* A thread that has been requested to stop by GDB with
3313 target_stop, and it stopped cleanly, so report as SIG0. The
3314 use of SIGSTOP is an implementation detail. */
183be222 3315 ourstatus->set_stopped (GDB_SIGNAL_0);
25289eb2
PA
3316 }
3317
183be222
SM
3318 if (ourstatus->kind () == TARGET_WAITKIND_EXITED
3319 || ourstatus->kind () == TARGET_WAITKIND_SIGNALLED)
1e225492
JK
3320 lp->core = -1;
3321 else
2e794194 3322 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3323
183be222 3324 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
aa01bd36
PA
3325 return filter_exit_event (lp, ourstatus);
3326
f973ed9c 3327 return lp->ptid;
d6b0e80f
AC
3328}
3329
e3e9f5a2
PA
3330/* Resume LWPs that are currently stopped without any pending status
3331 to report, but are resumed from the core's perspective. */
3332
3333static int
d3a70e03 3334resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid)
e3e9f5a2 3335{
4dd63d48
PA
3336 if (!lp->stopped)
3337 {
9327494e 3338 linux_nat_debug_printf ("NOT resuming LWP %s, not stopped",
e53c95d4 3339 lp->ptid.to_string ().c_str ());
4dd63d48
PA
3340 }
3341 else if (!lp->resumed)
3342 {
9327494e 3343 linux_nat_debug_printf ("NOT resuming LWP %s, not resumed",
e53c95d4 3344 lp->ptid.to_string ().c_str ());
4dd63d48
PA
3345 }
3346 else if (lwp_status_pending_p (lp))
3347 {
9327494e 3348 linux_nat_debug_printf ("NOT resuming LWP %s, has pending status",
e53c95d4 3349 lp->ptid.to_string ().c_str ());
4dd63d48
PA
3350 }
3351 else
e3e9f5a2 3352 {
5b6d1e4f 3353 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 3354 struct gdbarch *gdbarch = regcache->arch ();
336060f3 3355
a70b8144 3356 try
e3e9f5a2 3357 {
23f238d3
PA
3358 CORE_ADDR pc = regcache_read_pc (regcache);
3359 int leave_stopped = 0;
e3e9f5a2 3360
23f238d3
PA
3361 /* Don't bother if there's a breakpoint at PC that we'd hit
3362 immediately, and we're not waiting for this LWP. */
d3a70e03 3363 if (!lp->ptid.matches (wait_ptid))
23f238d3 3364 {
a01bda52 3365 if (breakpoint_inserted_here_p (regcache->aspace (), pc))
23f238d3
PA
3366 leave_stopped = 1;
3367 }
e3e9f5a2 3368
23f238d3
PA
3369 if (!leave_stopped)
3370 {
9327494e
SM
3371 linux_nat_debug_printf
3372 ("resuming stopped-resumed LWP %s at %s: step=%d",
e53c95d4 3373 lp->ptid.to_string ().c_str (), paddress (gdbarch, pc),
9327494e 3374 lp->step);
23f238d3
PA
3375
3376 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3377 }
3378 }
230d2906 3379 catch (const gdb_exception_error &ex)
23f238d3
PA
3380 {
3381 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 3382 throw;
23f238d3 3383 }
e3e9f5a2
PA
3384 }
3385
3386 return 0;
3387}
3388
f6ac5f3d
PA
3389ptid_t
3390linux_nat_target::wait (ptid_t ptid, struct target_waitstatus *ourstatus,
b60cea74 3391 target_wait_flags target_options)
7feb7d06
PA
3392{
3393 ptid_t event_ptid;
3394
e53c95d4 3395 linux_nat_debug_printf ("[%s], [%s]", ptid.to_string ().c_str (),
9327494e 3396 target_options_to_string (target_options).c_str ());
7feb7d06
PA
3397
3398 /* Flush the async file first. */
d9d41e78 3399 if (target_is_async_p ())
7feb7d06
PA
3400 async_file_flush ();
3401
e3e9f5a2
PA
3402 /* Resume LWPs that are currently stopped without any pending status
3403 to report, but are resumed from the core's perspective. LWPs get
3404 in this state if we find them stopping at a time we're not
3405 interested in reporting the event (target_wait on a
3406 specific_process, for example, see linux_nat_wait_1), and
3407 meanwhile the event became uninteresting. Don't bother resuming
3408 LWPs we're not going to wait for if they'd stop immediately. */
fbea99ea 3409 if (target_is_non_stop_p ())
d3a70e03
TT
3410 iterate_over_lwps (minus_one_ptid,
3411 [=] (struct lwp_info *info)
3412 {
3413 return resume_stopped_resumed_lwps (info, ptid);
3414 });
e3e9f5a2 3415
f6ac5f3d 3416 event_ptid = linux_nat_wait_1 (ptid, ourstatus, target_options);
7feb7d06
PA
3417
3418 /* If we requested any event, and something came out, assume there
3419 may be more. If we requested a specific lwp or process, also
3420 assume there may be more. */
d9d41e78 3421 if (target_is_async_p ()
183be222
SM
3422 && ((ourstatus->kind () != TARGET_WAITKIND_IGNORE
3423 && ourstatus->kind () != TARGET_WAITKIND_NO_RESUMED)
d7e15655 3424 || ptid != minus_one_ptid))
7feb7d06
PA
3425 async_file_mark ();
3426
7feb7d06
PA
3427 return event_ptid;
3428}
3429
1d2736d4
PA
3430/* Kill one LWP. */
3431
3432static void
3433kill_one_lwp (pid_t pid)
d6b0e80f 3434{
ed731959
JK
3435 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3436
3437 errno = 0;
1d2736d4 3438 kill_lwp (pid, SIGKILL);
9327494e 3439
ed731959 3440 if (debug_linux_nat)
57745c90
PA
3441 {
3442 int save_errno = errno;
3443
9327494e
SM
3444 linux_nat_debug_printf
3445 ("kill (SIGKILL) %ld, 0, 0 (%s)", (long) pid,
3446 save_errno != 0 ? safe_strerror (save_errno) : "OK");
57745c90 3447 }
ed731959
JK
3448
3449 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3450
d6b0e80f 3451 errno = 0;
1d2736d4 3452 ptrace (PTRACE_KILL, pid, 0, 0);
d6b0e80f 3453 if (debug_linux_nat)
57745c90
PA
3454 {
3455 int save_errno = errno;
3456
9327494e
SM
3457 linux_nat_debug_printf
3458 ("PTRACE_KILL %ld, 0, 0 (%s)", (long) pid,
3459 save_errno ? safe_strerror (save_errno) : "OK");
57745c90 3460 }
d6b0e80f
AC
3461}
3462
1d2736d4
PA
3463/* Wait for an LWP to die. */
3464
3465static void
3466kill_wait_one_lwp (pid_t pid)
d6b0e80f 3467{
1d2736d4 3468 pid_t res;
d6b0e80f
AC
3469
3470 /* We must make sure that there are no pending events (delayed
3471 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3472 program doesn't interfere with any following debugging session. */
3473
d6b0e80f
AC
3474 do
3475 {
1d2736d4
PA
3476 res = my_waitpid (pid, NULL, __WALL);
3477 if (res != (pid_t) -1)
d6b0e80f 3478 {
9327494e
SM
3479 linux_nat_debug_printf ("wait %ld received unknown.", (long) pid);
3480
4a6ed09b
PA
3481 /* The Linux kernel sometimes fails to kill a thread
3482 completely after PTRACE_KILL; that goes from the stop
3483 point in do_fork out to the one in get_signal_to_deliver
3484 and waits again. So kill it again. */
1d2736d4 3485 kill_one_lwp (pid);
d6b0e80f
AC
3486 }
3487 }
1d2736d4
PA
3488 while (res == pid);
3489
3490 gdb_assert (res == -1 && errno == ECHILD);
3491}
3492
3493/* Callback for iterate_over_lwps. */
d6b0e80f 3494
1d2736d4 3495static int
d3a70e03 3496kill_callback (struct lwp_info *lp)
1d2736d4 3497{
e38504b3 3498 kill_one_lwp (lp->ptid.lwp ());
d6b0e80f
AC
3499 return 0;
3500}
3501
1d2736d4
PA
3502/* Callback for iterate_over_lwps. */
3503
3504static int
d3a70e03 3505kill_wait_callback (struct lwp_info *lp)
1d2736d4 3506{
e38504b3 3507 kill_wait_one_lwp (lp->ptid.lwp ());
1d2736d4
PA
3508 return 0;
3509}
3510
3511/* Kill the fork children of any threads of inferior INF that are
3512 stopped at a fork event. */
3513
3514static void
3515kill_unfollowed_fork_children (struct inferior *inf)
3516{
08036331
PA
3517 for (thread_info *thread : inf->non_exited_threads ())
3518 {
3519 struct target_waitstatus *ws = &thread->pending_follow;
1d2736d4 3520
183be222
SM
3521 if (ws->kind () == TARGET_WAITKIND_FORKED
3522 || ws->kind () == TARGET_WAITKIND_VFORKED)
08036331 3523 {
183be222 3524 ptid_t child_ptid = ws->child_ptid ();
08036331
PA
3525 int child_pid = child_ptid.pid ();
3526 int child_lwp = child_ptid.lwp ();
3527
3528 kill_one_lwp (child_lwp);
3529 kill_wait_one_lwp (child_lwp);
3530
3531 /* Let the arch-specific native code know this process is
3532 gone. */
3533 linux_target->low_forget_process (child_pid);
3534 }
3535 }
1d2736d4
PA
3536}
3537
f6ac5f3d
PA
3538void
3539linux_nat_target::kill ()
d6b0e80f 3540{
f973ed9c
DJ
3541 /* If we're stopped while forking and we haven't followed yet,
3542 kill the other task. We need to do this first because the
3543 parent will be sleeping if this is a vfork. */
1d2736d4 3544 kill_unfollowed_fork_children (current_inferior ());
f973ed9c
DJ
3545
3546 if (forks_exist_p ())
7feb7d06 3547 linux_fork_killall ();
f973ed9c
DJ
3548 else
3549 {
e99b03dc 3550 ptid_t ptid = ptid_t (inferior_ptid.pid ());
e0881a8e 3551
4c28f408 3552 /* Stop all threads before killing them, since ptrace requires
30baf67b 3553 that the thread is stopped to successfully PTRACE_KILL. */
d3a70e03 3554 iterate_over_lwps (ptid, stop_callback);
4c28f408
PA
3555 /* ... and wait until all of them have reported back that
3556 they're no longer running. */
d3a70e03 3557 iterate_over_lwps (ptid, stop_wait_callback);
4c28f408 3558
f973ed9c 3559 /* Kill all LWP's ... */
d3a70e03 3560 iterate_over_lwps (ptid, kill_callback);
f973ed9c
DJ
3561
3562 /* ... and wait until we've flushed all events. */
d3a70e03 3563 iterate_over_lwps (ptid, kill_wait_callback);
f973ed9c
DJ
3564 }
3565
bc1e6c81 3566 target_mourn_inferior (inferior_ptid);
d6b0e80f
AC
3567}
3568
f6ac5f3d
PA
3569void
3570linux_nat_target::mourn_inferior ()
d6b0e80f 3571{
e99b03dc 3572 int pid = inferior_ptid.pid ();
26cb8b7c
PA
3573
3574 purge_lwp_list (pid);
d6b0e80f 3575
8a89ddbd 3576 close_proc_mem_file (pid);
05c06f31 3577
f973ed9c 3578 if (! forks_exist_p ())
d90e17a7 3579 /* Normal case, no other forks available. */
f6ac5f3d 3580 inf_ptrace_target::mourn_inferior ();
f973ed9c
DJ
3581 else
3582 /* Multi-fork case. The current inferior_ptid has exited, but
3583 there are other viable forks to debug. Delete the exiting
3584 one and context-switch to the first available. */
3585 linux_fork_mourn_inferior ();
26cb8b7c
PA
3586
3587 /* Let the arch-specific native code know this process is gone. */
135340af 3588 linux_target->low_forget_process (pid);
d6b0e80f
AC
3589}
3590
5b009018
PA
3591/* Convert a native/host siginfo object, into/from the siginfo in the
3592 layout of the inferiors' architecture. */
3593
3594static void
a5362b9a 3595siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018 3596{
135340af
PA
3597 /* If the low target didn't do anything, then just do a straight
3598 memcpy. */
3599 if (!linux_target->low_siginfo_fixup (siginfo, inf_siginfo, direction))
5b009018
PA
3600 {
3601 if (direction == 1)
a5362b9a 3602 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 3603 else
a5362b9a 3604 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
3605 }
3606}
3607
9b409511 3608static enum target_xfer_status
f6ac5f3d 3609linux_xfer_siginfo (enum target_object object,
dda83cd7 3610 const char *annex, gdb_byte *readbuf,
9b409511
YQ
3611 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3612 ULONGEST *xfered_len)
4aa995e1 3613{
a5362b9a
TS
3614 siginfo_t siginfo;
3615 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
3616
3617 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3618 gdb_assert (readbuf || writebuf);
3619
4aa995e1 3620 if (offset > sizeof (siginfo))
2ed4b548 3621 return TARGET_XFER_E_IO;
4aa995e1 3622
d29ea328 3623 if (!linux_nat_get_siginfo (inferior_ptid, &siginfo))
2ed4b548 3624 return TARGET_XFER_E_IO;
4aa995e1 3625
5b009018
PA
3626 /* When GDB is built as a 64-bit application, ptrace writes into
3627 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3628 inferior with a 64-bit GDB should look the same as debugging it
3629 with a 32-bit GDB, we need to convert it. GDB core always sees
3630 the converted layout, so any read/write will have to be done
3631 post-conversion. */
3632 siginfo_fixup (&siginfo, inf_siginfo, 0);
3633
4aa995e1
PA
3634 if (offset + len > sizeof (siginfo))
3635 len = sizeof (siginfo) - offset;
3636
3637 if (readbuf != NULL)
5b009018 3638 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3639 else
3640 {
5b009018
PA
3641 memcpy (inf_siginfo + offset, writebuf, len);
3642
3643 /* Convert back to ptrace layout before flushing it out. */
3644 siginfo_fixup (&siginfo, inf_siginfo, 1);
3645
d29ea328 3646 int pid = get_ptrace_pid (inferior_ptid);
4aa995e1
PA
3647 errno = 0;
3648 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3649 if (errno != 0)
2ed4b548 3650 return TARGET_XFER_E_IO;
4aa995e1
PA
3651 }
3652
9b409511
YQ
3653 *xfered_len = len;
3654 return TARGET_XFER_OK;
4aa995e1
PA
3655}
3656
9b409511 3657static enum target_xfer_status
f6ac5f3d
PA
3658linux_nat_xfer_osdata (enum target_object object,
3659 const char *annex, gdb_byte *readbuf,
3660 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3661 ULONGEST *xfered_len);
3662
f6ac5f3d 3663static enum target_xfer_status
f9f593dd
SM
3664linux_proc_xfer_memory_partial (int pid, gdb_byte *readbuf,
3665 const gdb_byte *writebuf, ULONGEST offset,
3666 LONGEST len, ULONGEST *xfered_len);
f6ac5f3d
PA
3667
3668enum target_xfer_status
3669linux_nat_target::xfer_partial (enum target_object object,
3670 const char *annex, gdb_byte *readbuf,
3671 const gdb_byte *writebuf,
3672 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
d6b0e80f 3673{
4aa995e1 3674 if (object == TARGET_OBJECT_SIGNAL_INFO)
f6ac5f3d 3675 return linux_xfer_siginfo (object, annex, readbuf, writebuf,
9b409511 3676 offset, len, xfered_len);
4aa995e1 3677
c35b1492
PA
3678 /* The target is connected but no live inferior is selected. Pass
3679 this request down to a lower stratum (e.g., the executable
3680 file). */
d7e15655 3681 if (object == TARGET_OBJECT_MEMORY && inferior_ptid == null_ptid)
9b409511 3682 return TARGET_XFER_EOF;
c35b1492 3683
f6ac5f3d
PA
3684 if (object == TARGET_OBJECT_AUXV)
3685 return memory_xfer_auxv (this, object, annex, readbuf, writebuf,
3686 offset, len, xfered_len);
3687
3688 if (object == TARGET_OBJECT_OSDATA)
3689 return linux_nat_xfer_osdata (object, annex, readbuf, writebuf,
3690 offset, len, xfered_len);
d6b0e80f 3691
f6ac5f3d
PA
3692 if (object == TARGET_OBJECT_MEMORY)
3693 {
05c06f31
PA
3694 /* GDB calculates all addresses in the largest possible address
3695 width. The address width must be masked before its final use
3696 by linux_proc_xfer_partial.
3697
3698 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
f6ac5f3d
PA
3699 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
3700
3701 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
3702 offset &= ((ULONGEST) 1 << addr_bit) - 1;
f6ac5f3d 3703
dd09fe0d
KS
3704 /* If /proc/pid/mem is writable, don't fallback to ptrace. If
3705 the write via /proc/pid/mem fails because the inferior execed
3706 (and we haven't seen the exec event yet), a subsequent ptrace
3707 poke would incorrectly write memory to the post-exec address
3708 space, while the core was trying to write to the pre-exec
3709 address space. */
3710 if (proc_mem_file_is_writable ())
f9f593dd
SM
3711 return linux_proc_xfer_memory_partial (inferior_ptid.pid (), readbuf,
3712 writebuf, offset, len,
3713 xfered_len);
05c06f31 3714 }
f6ac5f3d
PA
3715
3716 return inf_ptrace_target::xfer_partial (object, annex, readbuf, writebuf,
3717 offset, len, xfered_len);
d6b0e80f
AC
3718}
3719
57810aa7 3720bool
f6ac5f3d 3721linux_nat_target::thread_alive (ptid_t ptid)
28439f5e 3722{
4a6ed09b
PA
3723 /* As long as a PTID is in lwp list, consider it alive. */
3724 return find_lwp_pid (ptid) != NULL;
28439f5e
PA
3725}
3726
8a06aea7
PA
3727/* Implement the to_update_thread_list target method for this
3728 target. */
3729
f6ac5f3d
PA
3730void
3731linux_nat_target::update_thread_list ()
8a06aea7 3732{
4a6ed09b
PA
3733 /* We add/delete threads from the list as clone/exit events are
3734 processed, so just try deleting exited threads still in the
3735 thread list. */
3736 delete_exited_threads ();
a6904d5a
PA
3737
3738 /* Update the processor core that each lwp/thread was last seen
3739 running on. */
901b9821 3740 for (lwp_info *lwp : all_lwps ())
1ad3de98
PA
3741 {
3742 /* Avoid accessing /proc if the thread hasn't run since we last
3743 time we fetched the thread's core. Accessing /proc becomes
3744 noticeably expensive when we have thousands of LWPs. */
3745 if (lwp->core == -1)
3746 lwp->core = linux_common_core_of_thread (lwp->ptid);
3747 }
8a06aea7
PA
3748}
3749
a068643d 3750std::string
f6ac5f3d 3751linux_nat_target::pid_to_str (ptid_t ptid)
d6b0e80f 3752{
15a9e13e 3753 if (ptid.lwp_p ()
e38504b3 3754 && (ptid.pid () != ptid.lwp ()
e99b03dc 3755 || num_lwps (ptid.pid ()) > 1))
a068643d 3756 return string_printf ("LWP %ld", ptid.lwp ());
d6b0e80f
AC
3757
3758 return normal_pid_to_str (ptid);
3759}
3760
f6ac5f3d
PA
3761const char *
3762linux_nat_target::thread_name (struct thread_info *thr)
4694da01 3763{
79efa585 3764 return linux_proc_tid_get_name (thr->ptid);
4694da01
TT
3765}
3766
dba24537
AC
3767/* Accepts an integer PID; Returns a string representing a file that
3768 can be opened to get the symbols for the child process. */
3769
0e90c441 3770const char *
f6ac5f3d 3771linux_nat_target::pid_to_exec_file (int pid)
dba24537 3772{
e0d86d2c 3773 return linux_proc_pid_to_exec_file (pid);
dba24537
AC
3774}
3775
8a89ddbd
PA
3776/* Object representing an /proc/PID/mem open file. We keep one such
3777 file open per inferior.
3778
3779 It might be tempting to think about only ever opening one file at
3780 most for all inferiors, closing/reopening the file as we access
3781 memory of different inferiors, to minimize number of file
3782 descriptors open, which can otherwise run into resource limits.
3783 However, that does not work correctly -- if the inferior execs and
3784 we haven't processed the exec event yet, and, we opened a
3785 /proc/PID/mem file, we will get a mem file accessing the post-exec
3786 address space, thinking we're opening it for the pre-exec address
3787 space. That is dangerous as we can poke memory (e.g. clearing
3788 breakpoints) in the post-exec memory by mistake, corrupting the
3789 inferior. For that reason, we open the mem file as early as
3790 possible, right after spawning, forking or attaching to the
3791 inferior, when the inferior is stopped and thus before it has a
3792 chance of execing.
3793
3794 Note that after opening the file, even if the thread we opened it
3795 for subsequently exits, the open file is still usable for accessing
3796 memory. It's only when the whole process exits or execs that the
3797 file becomes invalid, at which point reads/writes return EOF. */
3798
3799class proc_mem_file
3800{
3801public:
3802 proc_mem_file (ptid_t ptid, int fd)
3803 : m_ptid (ptid), m_fd (fd)
3804 {
3805 gdb_assert (m_fd != -1);
3806 }
05c06f31 3807
8a89ddbd 3808 ~proc_mem_file ()
05c06f31 3809 {
89662f69 3810 linux_nat_debug_printf ("closing fd %d for /proc/%d/task/%ld/mem",
8a89ddbd
PA
3811 m_fd, m_ptid.pid (), m_ptid.lwp ());
3812 close (m_fd);
05c06f31 3813 }
05c06f31 3814
8a89ddbd
PA
3815 DISABLE_COPY_AND_ASSIGN (proc_mem_file);
3816
3817 int fd ()
3818 {
3819 return m_fd;
3820 }
3821
3822private:
3823 /* The LWP this file was opened for. Just for debugging
3824 purposes. */
3825 ptid_t m_ptid;
3826
3827 /* The file descriptor. */
3828 int m_fd = -1;
3829};
3830
3831/* The map between an inferior process id, and the open /proc/PID/mem
3832 file. This is stored in a map instead of in a per-inferior
3833 structure because we need to be able to access memory of processes
3834 which don't have a corresponding struct inferior object. E.g.,
3835 with "detach-on-fork on" (the default), and "follow-fork parent"
3836 (also default), we don't create an inferior for the fork child, but
3837 we still need to remove breakpoints from the fork child's
3838 memory. */
3839static std::unordered_map<int, proc_mem_file> proc_mem_file_map;
3840
3841/* Close the /proc/PID/mem file for PID. */
05c06f31
PA
3842
3843static void
8a89ddbd 3844close_proc_mem_file (pid_t pid)
dba24537 3845{
8a89ddbd 3846 proc_mem_file_map.erase (pid);
05c06f31 3847}
dba24537 3848
8a89ddbd
PA
3849/* Open the /proc/PID/mem file for the process (thread group) of PTID.
3850 We actually open /proc/PID/task/LWP/mem, as that's the LWP we know
3851 exists and is stopped right now. We prefer the
3852 /proc/PID/task/LWP/mem form over /proc/LWP/mem to avoid tid-reuse
3853 races, just in case this is ever called on an already-waited
3854 LWP. */
dba24537 3855
8a89ddbd
PA
3856static void
3857open_proc_mem_file (ptid_t ptid)
05c06f31 3858{
8a89ddbd
PA
3859 auto iter = proc_mem_file_map.find (ptid.pid ());
3860 gdb_assert (iter == proc_mem_file_map.end ());
dba24537 3861
8a89ddbd
PA
3862 char filename[64];
3863 xsnprintf (filename, sizeof filename,
3864 "/proc/%d/task/%ld/mem", ptid.pid (), ptid.lwp ());
3865
3866 int fd = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
05c06f31 3867
8a89ddbd
PA
3868 if (fd == -1)
3869 {
3870 warning (_("opening /proc/PID/mem file for lwp %d.%ld failed: %s (%d)"),
3871 ptid.pid (), ptid.lwp (),
3872 safe_strerror (errno), errno);
3873 return;
05c06f31
PA
3874 }
3875
8a89ddbd
PA
3876 proc_mem_file_map.emplace (std::piecewise_construct,
3877 std::forward_as_tuple (ptid.pid ()),
3878 std::forward_as_tuple (ptid, fd));
3879
9221923c 3880 linux_nat_debug_printf ("opened fd %d for lwp %d.%ld",
8a89ddbd
PA
3881 fd, ptid.pid (), ptid.lwp ());
3882}
3883
1bcb0708
PA
3884/* Helper for linux_proc_xfer_memory_partial and
3885 proc_mem_file_is_writable. FD is the already opened /proc/pid/mem
3886 file, and PID is the pid of the corresponding process. The rest of
3887 the arguments are like linux_proc_xfer_memory_partial's. */
8a89ddbd
PA
3888
3889static enum target_xfer_status
1bcb0708
PA
3890linux_proc_xfer_memory_partial_fd (int fd, int pid,
3891 gdb_byte *readbuf, const gdb_byte *writebuf,
3892 ULONGEST offset, LONGEST len,
3893 ULONGEST *xfered_len)
8a89ddbd
PA
3894{
3895 ssize_t ret;
3896
8a89ddbd 3897 gdb_assert (fd != -1);
dba24537 3898
a379284a
AA
3899 /* Use pread64/pwrite64 if available, since they save a syscall and can
3900 handle 64-bit offsets even on 32-bit platforms (for instance, SPARC
3901 debugging a SPARC64 application). */
dba24537 3902#ifdef HAVE_PREAD64
a379284a
AA
3903 ret = (readbuf ? pread64 (fd, readbuf, len, offset)
3904 : pwrite64 (fd, writebuf, len, offset));
dba24537 3905#else
a379284a
AA
3906 ret = lseek (fd, offset, SEEK_SET);
3907 if (ret != -1)
3908 ret = (readbuf ? read (fd, readbuf, len)
3909 : write (fd, writebuf, len));
dba24537 3910#endif
dba24537 3911
05c06f31
PA
3912 if (ret == -1)
3913 {
9221923c 3914 linux_nat_debug_printf ("accessing fd %d for pid %d failed: %s (%d)",
1bcb0708 3915 fd, pid, safe_strerror (errno), errno);
284b6bb5 3916 return TARGET_XFER_E_IO;
05c06f31
PA
3917 }
3918 else if (ret == 0)
3919 {
8a89ddbd
PA
3920 /* EOF means the address space is gone, the whole process exited
3921 or execed. */
9221923c 3922 linux_nat_debug_printf ("accessing fd %d for pid %d got EOF",
1bcb0708 3923 fd, pid);
05c06f31
PA
3924 return TARGET_XFER_EOF;
3925 }
9b409511
YQ
3926 else
3927 {
8a89ddbd 3928 *xfered_len = ret;
9b409511
YQ
3929 return TARGET_XFER_OK;
3930 }
05c06f31 3931}
efcbbd14 3932
1bcb0708
PA
3933/* Implement the to_xfer_partial target method using /proc/PID/mem.
3934 Because we can use a single read/write call, this can be much more
3935 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
3936 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running
3937 threads. */
3938
3939static enum target_xfer_status
f9f593dd
SM
3940linux_proc_xfer_memory_partial (int pid, gdb_byte *readbuf,
3941 const gdb_byte *writebuf, ULONGEST offset,
3942 LONGEST len, ULONGEST *xfered_len)
1bcb0708 3943{
1bcb0708
PA
3944 auto iter = proc_mem_file_map.find (pid);
3945 if (iter == proc_mem_file_map.end ())
3946 return TARGET_XFER_EOF;
3947
3948 int fd = iter->second.fd ();
3949
3950 return linux_proc_xfer_memory_partial_fd (fd, pid, readbuf, writebuf, offset,
3951 len, xfered_len);
3952}
3953
3954/* Check whether /proc/pid/mem is writable in the current kernel, and
3955 return true if so. It wasn't writable before Linux 2.6.39, but
3956 there's no way to know whether the feature was backported to older
3957 kernels. So we check to see if it works. The result is cached,
3958 and this is garanteed to be called once early at startup. */
3959
3960static bool
3961proc_mem_file_is_writable ()
3962{
3963 static gdb::optional<bool> writable;
3964
3965 if (writable.has_value ())
3966 return *writable;
3967
3968 writable.emplace (false);
3969
3970 /* We check whether /proc/pid/mem is writable by trying to write to
3971 one of our variables via /proc/self/mem. */
3972
3973 int fd = gdb_open_cloexec ("/proc/self/mem", O_RDWR | O_LARGEFILE, 0).release ();
3974
3975 if (fd == -1)
3976 {
3977 warning (_("opening /proc/self/mem file failed: %s (%d)"),
3978 safe_strerror (errno), errno);
3979 return *writable;
3980 }
3981
3982 SCOPE_EXIT { close (fd); };
3983
3984 /* This is the variable we try to write to. Note OFFSET below. */
3985 volatile gdb_byte test_var = 0;
3986
3987 gdb_byte writebuf[] = {0x55};
3988 ULONGEST offset = (uintptr_t) &test_var;
3989 ULONGEST xfered_len;
3990
3991 enum target_xfer_status res
3992 = linux_proc_xfer_memory_partial_fd (fd, getpid (), nullptr, writebuf,
3993 offset, 1, &xfered_len);
3994
3995 if (res == TARGET_XFER_OK)
3996 {
3997 gdb_assert (xfered_len == 1);
3998 gdb_assert (test_var == 0x55);
3999 /* Success. */
4000 *writable = true;
4001 }
4002
4003 return *writable;
4004}
4005
dba24537
AC
4006/* Parse LINE as a signal set and add its set bits to SIGS. */
4007
4008static void
4009add_line_to_sigset (const char *line, sigset_t *sigs)
4010{
4011 int len = strlen (line) - 1;
4012 const char *p;
4013 int signum;
4014
4015 if (line[len] != '\n')
8a3fe4f8 4016 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4017
4018 p = line;
4019 signum = len * 4;
4020 while (len-- > 0)
4021 {
4022 int digit;
4023
4024 if (*p >= '0' && *p <= '9')
4025 digit = *p - '0';
4026 else if (*p >= 'a' && *p <= 'f')
4027 digit = *p - 'a' + 10;
4028 else
8a3fe4f8 4029 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4030
4031 signum -= 4;
4032
4033 if (digit & 1)
4034 sigaddset (sigs, signum + 1);
4035 if (digit & 2)
4036 sigaddset (sigs, signum + 2);
4037 if (digit & 4)
4038 sigaddset (sigs, signum + 3);
4039 if (digit & 8)
4040 sigaddset (sigs, signum + 4);
4041
4042 p++;
4043 }
4044}
4045
4046/* Find process PID's pending signals from /proc/pid/status and set
4047 SIGS to match. */
4048
4049void
3e43a32a
MS
4050linux_proc_pending_signals (int pid, sigset_t *pending,
4051 sigset_t *blocked, sigset_t *ignored)
dba24537 4052{
d8d2a3ee 4053 char buffer[PATH_MAX], fname[PATH_MAX];
dba24537
AC
4054
4055 sigemptyset (pending);
4056 sigemptyset (blocked);
4057 sigemptyset (ignored);
cde33bf1 4058 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
d419f42d 4059 gdb_file_up procfile = gdb_fopen_cloexec (fname, "r");
dba24537 4060 if (procfile == NULL)
8a3fe4f8 4061 error (_("Could not open %s"), fname);
dba24537 4062
d419f42d 4063 while (fgets (buffer, PATH_MAX, procfile.get ()) != NULL)
dba24537
AC
4064 {
4065 /* Normal queued signals are on the SigPnd line in the status
4066 file. However, 2.6 kernels also have a "shared" pending
4067 queue for delivering signals to a thread group, so check for
4068 a ShdPnd line also.
4069
4070 Unfortunately some Red Hat kernels include the shared pending
4071 queue but not the ShdPnd status field. */
4072
61012eef 4073 if (startswith (buffer, "SigPnd:\t"))
dba24537 4074 add_line_to_sigset (buffer + 8, pending);
61012eef 4075 else if (startswith (buffer, "ShdPnd:\t"))
dba24537 4076 add_line_to_sigset (buffer + 8, pending);
61012eef 4077 else if (startswith (buffer, "SigBlk:\t"))
dba24537 4078 add_line_to_sigset (buffer + 8, blocked);
61012eef 4079 else if (startswith (buffer, "SigIgn:\t"))
dba24537
AC
4080 add_line_to_sigset (buffer + 8, ignored);
4081 }
dba24537
AC
4082}
4083
9b409511 4084static enum target_xfer_status
f6ac5f3d 4085linux_nat_xfer_osdata (enum target_object object,
e0881a8e 4086 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4087 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4088 ULONGEST *xfered_len)
07e059b5 4089{
07e059b5
VP
4090 gdb_assert (object == TARGET_OBJECT_OSDATA);
4091
9b409511
YQ
4092 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4093 if (*xfered_len == 0)
4094 return TARGET_XFER_EOF;
4095 else
4096 return TARGET_XFER_OK;
07e059b5
VP
4097}
4098
f6ac5f3d
PA
4099std::vector<static_tracepoint_marker>
4100linux_nat_target::static_tracepoint_markers_by_strid (const char *strid)
5808517f
YQ
4101{
4102 char s[IPA_CMD_BUF_SIZE];
e99b03dc 4103 int pid = inferior_ptid.pid ();
5d9310c4 4104 std::vector<static_tracepoint_marker> markers;
256642e8 4105 const char *p = s;
184ea2f7 4106 ptid_t ptid = ptid_t (pid, 0);
5d9310c4 4107 static_tracepoint_marker marker;
5808517f
YQ
4108
4109 /* Pause all */
4110 target_stop (ptid);
4111
4112 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4113 s[sizeof ("qTfSTM")] = 0;
4114
42476b70 4115 agent_run_command (pid, s, strlen (s) + 1);
5808517f 4116
1db93f14
TT
4117 /* Unpause all. */
4118 SCOPE_EXIT { target_continue_no_signal (ptid); };
5808517f
YQ
4119
4120 while (*p++ == 'm')
4121 {
5808517f
YQ
4122 do
4123 {
5d9310c4 4124 parse_static_tracepoint_marker_definition (p, &p, &marker);
5808517f 4125
5d9310c4
SM
4126 if (strid == NULL || marker.str_id == strid)
4127 markers.push_back (std::move (marker));
5808517f
YQ
4128 }
4129 while (*p++ == ','); /* comma-separated list */
4130
4131 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4132 s[sizeof ("qTsSTM")] = 0;
42476b70 4133 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4134 p = s;
4135 }
4136
5808517f
YQ
4137 return markers;
4138}
4139
b84876c2
PA
4140/* target_can_async_p implementation. */
4141
57810aa7 4142bool
f6ac5f3d 4143linux_nat_target::can_async_p ()
b84876c2 4144{
fce6cd34
AB
4145 /* This flag should be checked in the common target.c code. */
4146 gdb_assert (target_async_permitted);
4147
4148 /* Otherwise, this targets is always able to support async mode. */
4149 return true;
b84876c2
PA
4150}
4151
57810aa7 4152bool
f6ac5f3d 4153linux_nat_target::supports_non_stop ()
9908b566 4154{
f80c8ec4 4155 return true;
9908b566
VP
4156}
4157
fbea99ea
PA
4158/* to_always_non_stop_p implementation. */
4159
57810aa7 4160bool
f6ac5f3d 4161linux_nat_target::always_non_stop_p ()
fbea99ea 4162{
f80c8ec4 4163 return true;
fbea99ea
PA
4164}
4165
57810aa7 4166bool
f6ac5f3d 4167linux_nat_target::supports_multi_process ()
d90e17a7 4168{
aee91db3 4169 return true;
d90e17a7
PA
4170}
4171
57810aa7 4172bool
f6ac5f3d 4173linux_nat_target::supports_disable_randomization ()
03583c20 4174{
f80c8ec4 4175 return true;
03583c20
UW
4176}
4177
7feb7d06
PA
4178/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4179 so we notice when any child changes state, and notify the
4180 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4181 above to wait for the arrival of a SIGCHLD. */
4182
b84876c2 4183static void
7feb7d06 4184sigchld_handler (int signo)
b84876c2 4185{
7feb7d06
PA
4186 int old_errno = errno;
4187
01124a23 4188 if (debug_linux_nat)
da5bd37e 4189 gdb_stdlog->write_async_safe ("sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06 4190
b146ba14
JB
4191 if (signo == SIGCHLD)
4192 {
4193 /* Let the event loop know that there are events to handle. */
4194 linux_nat_target::async_file_mark_if_open ();
4195 }
7feb7d06
PA
4196
4197 errno = old_errno;
4198}
4199
4200/* Callback registered with the target events file descriptor. */
4201
4202static void
4203handle_target_event (int error, gdb_client_data client_data)
4204{
b1a35af2 4205 inferior_event_handler (INF_REG_EVENT);
7feb7d06
PA
4206}
4207
b84876c2
PA
4208/* target_async implementation. */
4209
f6ac5f3d 4210void
4a570176 4211linux_nat_target::async (bool enable)
b84876c2 4212{
4a570176 4213 if (enable == is_async_p ())
b146ba14
JB
4214 return;
4215
4216 /* Block child signals while we create/destroy the pipe, as their
4217 handler writes to it. */
4218 gdb::block_signals blocker;
4219
6a3753b3 4220 if (enable)
b84876c2 4221 {
b146ba14 4222 if (!async_file_open ())
f34652de 4223 internal_error ("creating event pipe failed.");
b146ba14
JB
4224
4225 add_file_handler (async_wait_fd (), handle_target_event, NULL,
4226 "linux-nat");
4227
4228 /* There may be pending events to handle. Tell the event loop
4229 to poll them. */
4230 async_file_mark ();
b84876c2
PA
4231 }
4232 else
4233 {
b146ba14
JB
4234 delete_file_handler (async_wait_fd ());
4235 async_file_close ();
b84876c2 4236 }
b84876c2
PA
4237}
4238
a493e3e2 4239/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4240 event came out. */
4241
4c28f408 4242static int
d3a70e03 4243linux_nat_stop_lwp (struct lwp_info *lwp)
4c28f408 4244{
d90e17a7 4245 if (!lwp->stopped)
252fbfc8 4246 {
9327494e 4247 linux_nat_debug_printf ("running -> suspending %s",
e53c95d4 4248 lwp->ptid.to_string ().c_str ());
252fbfc8 4249
252fbfc8 4250
25289eb2
PA
4251 if (lwp->last_resume_kind == resume_stop)
4252 {
9327494e
SM
4253 linux_nat_debug_printf ("already stopping LWP %ld at GDB's request",
4254 lwp->ptid.lwp ());
25289eb2
PA
4255 return 0;
4256 }
252fbfc8 4257
d3a70e03 4258 stop_callback (lwp);
25289eb2 4259 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
4260 }
4261 else
4262 {
4263 /* Already known to be stopped; do nothing. */
252fbfc8 4264
d90e17a7
PA
4265 if (debug_linux_nat)
4266 {
5b6d1e4f 4267 if (find_thread_ptid (linux_target, lwp->ptid)->stop_requested)
9327494e 4268 linux_nat_debug_printf ("already stopped/stop_requested %s",
e53c95d4 4269 lwp->ptid.to_string ().c_str ());
d90e17a7 4270 else
9327494e 4271 linux_nat_debug_printf ("already stopped/no stop_requested yet %s",
e53c95d4 4272 lwp->ptid.to_string ().c_str ());
252fbfc8
PA
4273 }
4274 }
4c28f408
PA
4275 return 0;
4276}
4277
f6ac5f3d
PA
4278void
4279linux_nat_target::stop (ptid_t ptid)
4c28f408 4280{
b6e52a0b 4281 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
d3a70e03 4282 iterate_over_lwps (ptid, linux_nat_stop_lwp);
bfedc46a
PA
4283}
4284
c0694254
PA
4285/* When requests are passed down from the linux-nat layer to the
4286 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4287 used. The address space pointer is stored in the inferior object,
4288 but the common code that is passed such ptid can't tell whether
4289 lwpid is a "main" process id or not (it assumes so). We reverse
4290 look up the "main" process id from the lwp here. */
4291
f6ac5f3d
PA
4292struct address_space *
4293linux_nat_target::thread_address_space (ptid_t ptid)
c0694254
PA
4294{
4295 struct lwp_info *lwp;
4296 struct inferior *inf;
4297 int pid;
4298
e38504b3 4299 if (ptid.lwp () == 0)
c0694254
PA
4300 {
4301 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4302 tgid. */
4303 lwp = find_lwp_pid (ptid);
e99b03dc 4304 pid = lwp->ptid.pid ();
c0694254
PA
4305 }
4306 else
4307 {
4308 /* A (pid,lwpid,0) ptid. */
e99b03dc 4309 pid = ptid.pid ();
c0694254
PA
4310 }
4311
5b6d1e4f 4312 inf = find_inferior_pid (this, pid);
c0694254
PA
4313 gdb_assert (inf != NULL);
4314 return inf->aspace;
4315}
4316
dc146f7c
VP
4317/* Return the cached value of the processor core for thread PTID. */
4318
f6ac5f3d
PA
4319int
4320linux_nat_target::core_of_thread (ptid_t ptid)
dc146f7c
VP
4321{
4322 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 4323
dc146f7c
VP
4324 if (info)
4325 return info->core;
4326 return -1;
4327}
4328
7a6a1731
GB
4329/* Implementation of to_filesystem_is_local. */
4330
57810aa7 4331bool
f6ac5f3d 4332linux_nat_target::filesystem_is_local ()
7a6a1731
GB
4333{
4334 struct inferior *inf = current_inferior ();
4335
4336 if (inf->fake_pid_p || inf->pid == 0)
57810aa7 4337 return true;
7a6a1731
GB
4338
4339 return linux_ns_same (inf->pid, LINUX_NS_MNT);
4340}
4341
4342/* Convert the INF argument passed to a to_fileio_* method
4343 to a process ID suitable for passing to its corresponding
4344 linux_mntns_* function. If INF is non-NULL then the
4345 caller is requesting the filesystem seen by INF. If INF
4346 is NULL then the caller is requesting the filesystem seen
4347 by the GDB. We fall back to GDB's filesystem in the case
4348 that INF is non-NULL but its PID is unknown. */
4349
4350static pid_t
4351linux_nat_fileio_pid_of (struct inferior *inf)
4352{
4353 if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4354 return getpid ();
4355 else
4356 return inf->pid;
4357}
4358
4359/* Implementation of to_fileio_open. */
4360
f6ac5f3d
PA
4361int
4362linux_nat_target::fileio_open (struct inferior *inf, const char *filename,
4363 int flags, int mode, int warn_if_slow,
b872057a 4364 fileio_error *target_errno)
7a6a1731
GB
4365{
4366 int nat_flags;
4367 mode_t nat_mode;
4368 int fd;
4369
4370 if (fileio_to_host_openflags (flags, &nat_flags) == -1
4371 || fileio_to_host_mode (mode, &nat_mode) == -1)
4372 {
4373 *target_errno = FILEIO_EINVAL;
4374 return -1;
4375 }
4376
4377 fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
4378 filename, nat_flags, nat_mode);
4379 if (fd == -1)
4380 *target_errno = host_to_fileio_error (errno);
4381
4382 return fd;
4383}
4384
4385/* Implementation of to_fileio_readlink. */
4386
f6ac5f3d
PA
4387gdb::optional<std::string>
4388linux_nat_target::fileio_readlink (struct inferior *inf, const char *filename,
b872057a 4389 fileio_error *target_errno)
7a6a1731
GB
4390{
4391 char buf[PATH_MAX];
4392 int len;
7a6a1731
GB
4393
4394 len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
4395 filename, buf, sizeof (buf));
4396 if (len < 0)
4397 {
4398 *target_errno = host_to_fileio_error (errno);
e0d3522b 4399 return {};
7a6a1731
GB
4400 }
4401
e0d3522b 4402 return std::string (buf, len);
7a6a1731
GB
4403}
4404
4405/* Implementation of to_fileio_unlink. */
4406
f6ac5f3d
PA
4407int
4408linux_nat_target::fileio_unlink (struct inferior *inf, const char *filename,
b872057a 4409 fileio_error *target_errno)
7a6a1731
GB
4410{
4411 int ret;
4412
4413 ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
4414 filename);
4415 if (ret == -1)
4416 *target_errno = host_to_fileio_error (errno);
4417
4418 return ret;
4419}
4420
aa01bd36
PA
4421/* Implementation of the to_thread_events method. */
4422
f6ac5f3d
PA
4423void
4424linux_nat_target::thread_events (int enable)
aa01bd36
PA
4425{
4426 report_thread_events = enable;
4427}
4428
f6ac5f3d
PA
4429linux_nat_target::linux_nat_target ()
4430{
f973ed9c
DJ
4431 /* We don't change the stratum; this target will sit at
4432 process_stratum and thread_db will set at thread_stratum. This
4433 is a little strange, since this is a multi-threaded-capable
4434 target, but we want to be on the stack below thread_db, and we
4435 also want to be used for single-threaded processes. */
f973ed9c
DJ
4436}
4437
f865ee35
JK
4438/* See linux-nat.h. */
4439
ef632b4b 4440bool
f865ee35 4441linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
9f0bdab8 4442{
0acd1110 4443 int pid = get_ptrace_pid (ptid);
7cc662bc 4444 return ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo) == 0;
9f0bdab8
DJ
4445}
4446
7b669087
GB
4447/* See nat/linux-nat.h. */
4448
4449ptid_t
4450current_lwp_ptid (void)
4451{
15a9e13e 4452 gdb_assert (inferior_ptid.lwp_p ());
7b669087
GB
4453 return inferior_ptid;
4454}
4455
6c265988 4456void _initialize_linux_nat ();
d6b0e80f 4457void
6c265988 4458_initialize_linux_nat ()
d6b0e80f 4459{
8864ef42 4460 add_setshow_boolean_cmd ("linux-nat", class_maintenance,
b6e52a0b
AB
4461 &debug_linux_nat, _("\
4462Set debugging of GNU/Linux native target."), _(" \
4463Show debugging of GNU/Linux native target."), _(" \
4464When on, print debug messages relating to the GNU/Linux native target."),
4465 nullptr,
4466 show_debug_linux_nat,
4467 &setdebuglist, &showdebuglist);
b84876c2 4468
7a6a1731
GB
4469 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
4470 &debug_linux_namespaces, _("\
4471Set debugging of GNU/Linux namespaces module."), _("\
4472Show debugging of GNU/Linux namespaces module."), _("\
4473Enables printf debugging output."),
4474 NULL,
4475 NULL,
4476 &setdebuglist, &showdebuglist);
4477
7feb7d06
PA
4478 /* Install a SIGCHLD handler. */
4479 sigchld_action.sa_handler = sigchld_handler;
4480 sigemptyset (&sigchld_action.sa_mask);
4481 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
4482
4483 /* Make it the default. */
7feb7d06 4484 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
4485
4486 /* Make sure we don't block SIGCHLD during a sigsuspend. */
21987b9c 4487 gdb_sigmask (SIG_SETMASK, NULL, &suspend_mask);
d6b0e80f
AC
4488 sigdelset (&suspend_mask, SIGCHLD);
4489
7feb7d06 4490 sigemptyset (&blocked_mask);
774113b0
PA
4491
4492 lwp_lwpid_htab_create ();
1bcb0708
PA
4493
4494 proc_mem_file_is_writable ();
d6b0e80f
AC
4495}
4496\f
4497
4498/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4499 the GNU/Linux Threads library and therefore doesn't really belong
4500 here. */
4501
089436f7
TV
4502/* NPTL reserves the first two RT signals, but does not provide any
4503 way for the debugger to query the signal numbers - fortunately
4504 they don't change. */
4505static int lin_thread_signals[] = { __SIGRTMIN, __SIGRTMIN + 1 };
d6b0e80f 4506
089436f7
TV
4507/* See linux-nat.h. */
4508
4509unsigned int
4510lin_thread_get_thread_signal_num (void)
d6b0e80f 4511{
089436f7
TV
4512 return sizeof (lin_thread_signals) / sizeof (lin_thread_signals[0]);
4513}
d6b0e80f 4514
089436f7
TV
4515/* See linux-nat.h. */
4516
4517int
4518lin_thread_get_thread_signal (unsigned int i)
4519{
4520 gdb_assert (i < lin_thread_get_thread_signal_num ());
4521 return lin_thread_signals[i];
d6b0e80f 4522}