]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/linux-nat.c
Unify gdb printf functions
[thirdparty/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
4a94e368 3 Copyright (C) 2001-2022 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
45741a9c 22#include "infrun.h"
3993f6b1 23#include "target.h"
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
268a13a5 26#include "gdbsupport/gdb_wait.h"
d6b0e80f
AC
27#include <unistd.h>
28#include <sys/syscall.h>
5826e159 29#include "nat/gdb_ptrace.h"
0274a8ce 30#include "linux-nat.h"
125f8a3d
GB
31#include "nat/linux-ptrace.h"
32#include "nat/linux-procfs.h"
8cc73a39 33#include "nat/linux-personality.h"
ac264b3b 34#include "linux-fork.h"
d6b0e80f
AC
35#include "gdbthread.h"
36#include "gdbcmd.h"
37#include "regcache.h"
4f844a66 38#include "regset.h"
dab06dbe 39#include "inf-child.h"
10d6c8cd
DJ
40#include "inf-ptrace.h"
41#include "auxv.h"
1777feb0 42#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
43#include "elf-bfd.h" /* for elfcore_write_* */
44#include "gregset.h" /* for gregset */
45#include "gdbcore.h" /* for get_exec_file */
46#include <ctype.h> /* for isdigit */
53ce3c39 47#include <sys/stat.h> /* for struct stat */
dba24537 48#include <fcntl.h> /* for O_RDONLY */
b84876c2 49#include "inf-loop.h"
400b5eca 50#include "gdbsupport/event-loop.h"
b84876c2 51#include "event-top.h"
07e059b5
VP
52#include <pwd.h>
53#include <sys/types.h>
2978b111 54#include <dirent.h>
07e059b5 55#include "xml-support.h"
efcbbd14 56#include <sys/vfs.h>
6c95b8df 57#include "solib.h"
125f8a3d 58#include "nat/linux-osdata.h"
6432734d 59#include "linux-tdep.h"
7dcd53a0 60#include "symfile.h"
268a13a5 61#include "gdbsupport/agent.h"
5808517f 62#include "tracepoint.h"
268a13a5 63#include "gdbsupport/buffer.h"
6ecd4729 64#include "target-descriptions.h"
268a13a5 65#include "gdbsupport/filestuff.h"
77e371c0 66#include "objfiles.h"
7a6a1731 67#include "nat/linux-namespaces.h"
b146ba14 68#include "gdbsupport/block-signals.h"
268a13a5
TT
69#include "gdbsupport/fileio.h"
70#include "gdbsupport/scope-exit.h"
21987b9c 71#include "gdbsupport/gdb-sigmask.h"
ba988419 72#include "gdbsupport/common-debug.h"
8a89ddbd 73#include <unordered_map>
efcbbd14 74
1777feb0 75/* This comment documents high-level logic of this file.
8a77dff3
VP
76
77Waiting for events in sync mode
78===============================
79
4a6ed09b
PA
80When waiting for an event in a specific thread, we just use waitpid,
81passing the specific pid, and not passing WNOHANG.
82
83When waiting for an event in all threads, waitpid is not quite good:
84
85- If the thread group leader exits while other threads in the thread
86 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
87 return an exit status until the other threads in the group are
88 reaped.
89
90- When a non-leader thread execs, that thread just vanishes without
91 reporting an exit (so we'd hang if we waited for it explicitly in
92 that case). The exec event is instead reported to the TGID pid.
93
94The solution is to always use -1 and WNOHANG, together with
95sigsuspend.
96
97First, we use non-blocking waitpid to check for events. If nothing is
98found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
99it means something happened to a child process. As soon as we know
100there's an event, we get back to calling nonblocking waitpid.
101
102Note that SIGCHLD should be blocked between waitpid and sigsuspend
103calls, so that we don't miss a signal. If SIGCHLD arrives in between,
104when it's blocked, the signal becomes pending and sigsuspend
105immediately notices it and returns.
106
107Waiting for events in async mode (TARGET_WNOHANG)
108=================================================
8a77dff3 109
7feb7d06
PA
110In async mode, GDB should always be ready to handle both user input
111and target events, so neither blocking waitpid nor sigsuspend are
112viable options. Instead, we should asynchronously notify the GDB main
113event loop whenever there's an unprocessed event from the target. We
114detect asynchronous target events by handling SIGCHLD signals. To
c150bdf0
JB
115notify the event loop about target events, an event pipe is used
116--- the pipe is registered as waitable event source in the event loop,
7feb7d06 117the event loop select/poll's on the read end of this pipe (as well on
c150bdf0
JB
118other event sources, e.g., stdin), and the SIGCHLD handler marks the
119event pipe to raise an event. This is more portable than relying on
7feb7d06
PA
120pselect/ppoll, since on kernels that lack those syscalls, libc
121emulates them with select/poll+sigprocmask, and that is racy
122(a.k.a. plain broken).
123
124Obviously, if we fail to notify the event loop if there's a target
125event, it's bad. OTOH, if we notify the event loop when there's no
126event from the target, linux_nat_wait will detect that there's no real
127event to report, and return event of type TARGET_WAITKIND_IGNORE.
128This is mostly harmless, but it will waste time and is better avoided.
129
130The main design point is that every time GDB is outside linux-nat.c,
131we have a SIGCHLD handler installed that is called when something
132happens to the target and notifies the GDB event loop. Whenever GDB
133core decides to handle the event, and calls into linux-nat.c, we
134process things as in sync mode, except that the we never block in
135sigsuspend.
136
137While processing an event, we may end up momentarily blocked in
138waitpid calls. Those waitpid calls, while blocking, are guarantied to
139return quickly. E.g., in all-stop mode, before reporting to the core
140that an LWP hit a breakpoint, all LWPs are stopped by sending them
141SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
142Note that this is different from blocking indefinitely waiting for the
143next event --- here, we're already handling an event.
8a77dff3
VP
144
145Use of signals
146==============
147
148We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
149signal is not entirely significant; we just need for a signal to be delivered,
150so that we can intercept it. SIGSTOP's advantage is that it can not be
151blocked. A disadvantage is that it is not a real-time signal, so it can only
152be queued once; we do not keep track of other sources of SIGSTOP.
153
154Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
155use them, because they have special behavior when the signal is generated -
156not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
157kills the entire thread group.
158
159A delivered SIGSTOP would stop the entire thread group, not just the thread we
160tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
161cancel it (by PTRACE_CONT without passing SIGSTOP).
162
163We could use a real-time signal instead. This would solve those problems; we
164could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
165But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
166generates it, and there are races with trying to find a signal that is not
4a6ed09b
PA
167blocked.
168
169Exec events
170===========
171
172The case of a thread group (process) with 3 or more threads, and a
173thread other than the leader execs is worth detailing:
174
175On an exec, the Linux kernel destroys all threads except the execing
176one in the thread group, and resets the execing thread's tid to the
177tgid. No exit notification is sent for the execing thread -- from the
178ptracer's perspective, it appears as though the execing thread just
179vanishes. Until we reap all other threads except the leader and the
180execing thread, the leader will be zombie, and the execing thread will
181be in `D (disc sleep)' state. As soon as all other threads are
182reaped, the execing thread changes its tid to the tgid, and the
183previous (zombie) leader vanishes, giving place to the "new"
184leader. */
a0ef4274 185
dba24537
AC
186#ifndef O_LARGEFILE
187#define O_LARGEFILE 0
188#endif
0274a8ce 189
f6ac5f3d
PA
190struct linux_nat_target *linux_target;
191
433bbbf8 192/* Does the current host support PTRACE_GETREGSET? */
0bdb2f78 193enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
433bbbf8 194
b6e52a0b
AB
195/* When true, print debug messages relating to the linux native target. */
196
197static bool debug_linux_nat;
198
8864ef42 199/* Implement 'show debug linux-nat'. */
b6e52a0b 200
920d2a44
AC
201static void
202show_debug_linux_nat (struct ui_file *file, int from_tty,
203 struct cmd_list_element *c, const char *value)
204{
6cb06a8c
TT
205 gdb_printf (file, _("Debugging of GNU/Linux native targets is %s.\n"),
206 value);
920d2a44 207}
d6b0e80f 208
17417fb0 209/* Print a linux-nat debug statement. */
9327494e
SM
210
211#define linux_nat_debug_printf(fmt, ...) \
74b773fc 212 debug_prefixed_printf_cond (debug_linux_nat, "linux-nat", fmt, ##__VA_ARGS__)
9327494e 213
b6e52a0b
AB
214/* Print "linux-nat" enter/exit debug statements. */
215
216#define LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT \
217 scoped_debug_enter_exit (debug_linux_nat, "linux-nat")
218
ae087d01
DJ
219struct simple_pid_list
220{
221 int pid;
3d799a95 222 int status;
ae087d01
DJ
223 struct simple_pid_list *next;
224};
05c309a8 225static struct simple_pid_list *stopped_pids;
ae087d01 226
aa01bd36
PA
227/* Whether target_thread_events is in effect. */
228static int report_thread_events;
229
7feb7d06
PA
230static int kill_lwp (int lwpid, int signo);
231
d3a70e03 232static int stop_callback (struct lwp_info *lp);
7feb7d06
PA
233
234static void block_child_signals (sigset_t *prev_mask);
235static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
236
237struct lwp_info;
238static struct lwp_info *add_lwp (ptid_t ptid);
239static void purge_lwp_list (int pid);
4403d8e9 240static void delete_lwp (ptid_t ptid);
2277426b
PA
241static struct lwp_info *find_lwp_pid (ptid_t ptid);
242
8a99810d
PA
243static int lwp_status_pending_p (struct lwp_info *lp);
244
e7ad2f14
PA
245static void save_stop_reason (struct lwp_info *lp);
246
8a89ddbd
PA
247static void close_proc_mem_file (pid_t pid);
248static void open_proc_mem_file (ptid_t ptid);
05c06f31 249
6cf20c46
PA
250/* Return TRUE if LWP is the leader thread of the process. */
251
252static bool
253is_leader (lwp_info *lp)
254{
255 return lp->ptid.pid () == lp->ptid.lwp ();
256}
257
cff068da
GB
258\f
259/* LWP accessors. */
260
261/* See nat/linux-nat.h. */
262
263ptid_t
264ptid_of_lwp (struct lwp_info *lwp)
265{
266 return lwp->ptid;
267}
268
269/* See nat/linux-nat.h. */
270
4b134ca1
GB
271void
272lwp_set_arch_private_info (struct lwp_info *lwp,
273 struct arch_lwp_info *info)
274{
275 lwp->arch_private = info;
276}
277
278/* See nat/linux-nat.h. */
279
280struct arch_lwp_info *
281lwp_arch_private_info (struct lwp_info *lwp)
282{
283 return lwp->arch_private;
284}
285
286/* See nat/linux-nat.h. */
287
cff068da
GB
288int
289lwp_is_stopped (struct lwp_info *lwp)
290{
291 return lwp->stopped;
292}
293
294/* See nat/linux-nat.h. */
295
296enum target_stop_reason
297lwp_stop_reason (struct lwp_info *lwp)
298{
299 return lwp->stop_reason;
300}
301
0e00e962
AA
302/* See nat/linux-nat.h. */
303
304int
305lwp_is_stepping (struct lwp_info *lwp)
306{
307 return lwp->step;
308}
309
ae087d01
DJ
310\f
311/* Trivial list manipulation functions to keep track of a list of
312 new stopped processes. */
313static void
3d799a95 314add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01 315{
8d749320 316 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
e0881a8e 317
ae087d01 318 new_pid->pid = pid;
3d799a95 319 new_pid->status = status;
ae087d01
DJ
320 new_pid->next = *listp;
321 *listp = new_pid;
322}
323
324static int
46a96992 325pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
326{
327 struct simple_pid_list **p;
328
329 for (p = listp; *p != NULL; p = &(*p)->next)
330 if ((*p)->pid == pid)
331 {
332 struct simple_pid_list *next = (*p)->next;
e0881a8e 333
46a96992 334 *statusp = (*p)->status;
ae087d01
DJ
335 xfree (*p);
336 *p = next;
337 return 1;
338 }
339 return 0;
340}
341
de0d863e
DB
342/* Return the ptrace options that we want to try to enable. */
343
344static int
345linux_nat_ptrace_options (int attached)
346{
347 int options = 0;
348
349 if (!attached)
350 options |= PTRACE_O_EXITKILL;
351
352 options |= (PTRACE_O_TRACESYSGOOD
353 | PTRACE_O_TRACEVFORKDONE
354 | PTRACE_O_TRACEVFORK
355 | PTRACE_O_TRACEFORK
356 | PTRACE_O_TRACEEXEC);
357
358 return options;
359}
360
1b919490
VB
361/* Initialize ptrace and procfs warnings and check for supported
362 ptrace features given PID.
beed38b8
JB
363
364 ATTACHED should be nonzero iff we attached to the inferior. */
3993f6b1
DJ
365
366static void
1b919490 367linux_init_ptrace_procfs (pid_t pid, int attached)
3993f6b1 368{
de0d863e
DB
369 int options = linux_nat_ptrace_options (attached);
370
371 linux_enable_event_reporting (pid, options);
96d7229d 372 linux_ptrace_init_warnings ();
1b919490 373 linux_proc_init_warnings ();
4de4c07c
DJ
374}
375
f6ac5f3d
PA
376linux_nat_target::~linux_nat_target ()
377{}
378
379void
380linux_nat_target::post_attach (int pid)
4de4c07c 381{
1b919490 382 linux_init_ptrace_procfs (pid, 1);
4de4c07c
DJ
383}
384
200fd287
AB
385/* Implement the virtual inf_ptrace_target::post_startup_inferior method. */
386
f6ac5f3d
PA
387void
388linux_nat_target::post_startup_inferior (ptid_t ptid)
4de4c07c 389{
1b919490 390 linux_init_ptrace_procfs (ptid.pid (), 0);
4de4c07c
DJ
391}
392
4403d8e9
JK
393/* Return the number of known LWPs in the tgid given by PID. */
394
395static int
396num_lwps (int pid)
397{
398 int count = 0;
4403d8e9 399
901b9821 400 for (const lwp_info *lp ATTRIBUTE_UNUSED : all_lwps ())
e99b03dc 401 if (lp->ptid.pid () == pid)
4403d8e9
JK
402 count++;
403
404 return count;
405}
406
169bb27b 407/* Deleter for lwp_info unique_ptr specialisation. */
4403d8e9 408
169bb27b 409struct lwp_deleter
4403d8e9 410{
169bb27b
AB
411 void operator() (struct lwp_info *lwp) const
412 {
413 delete_lwp (lwp->ptid);
414 }
415};
4403d8e9 416
169bb27b
AB
417/* A unique_ptr specialisation for lwp_info. */
418
419typedef std::unique_ptr<struct lwp_info, lwp_deleter> lwp_info_up;
4403d8e9 420
82d1f134 421/* Target hook for follow_fork. */
d83ad864 422
e97007b6 423void
82d1f134
SM
424linux_nat_target::follow_fork (inferior *child_inf, ptid_t child_ptid,
425 target_waitkind fork_kind, bool follow_child,
426 bool detach_fork)
3993f6b1 427{
82d1f134
SM
428 inf_ptrace_target::follow_fork (child_inf, child_ptid, fork_kind,
429 follow_child, detach_fork);
430
d83ad864 431 if (!follow_child)
4de4c07c 432 {
3a849a34
SM
433 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
434 ptid_t parent_ptid = inferior_ptid;
3a849a34
SM
435 int parent_pid = parent_ptid.lwp ();
436 int child_pid = child_ptid.lwp ();
4de4c07c 437
1777feb0 438 /* We're already attached to the parent, by default. */
3a849a34 439 lwp_info *child_lp = add_lwp (child_ptid);
d83ad864
DB
440 child_lp->stopped = 1;
441 child_lp->last_resume_kind = resume_stop;
4de4c07c 442
ac264b3b
MS
443 /* Detach new forked process? */
444 if (detach_fork)
f75c00e4 445 {
95347337
AB
446 int child_stop_signal = 0;
447 bool detach_child = true;
4403d8e9 448
169bb27b
AB
449 /* Move CHILD_LP into a unique_ptr and clear the source pointer
450 to prevent us doing anything stupid with it. */
451 lwp_info_up child_lp_ptr (child_lp);
452 child_lp = nullptr;
453
454 linux_target->low_prepare_to_resume (child_lp_ptr.get ());
c077881a
HZ
455
456 /* When debugging an inferior in an architecture that supports
457 hardware single stepping on a kernel without commit
458 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
459 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
460 set if the parent process had them set.
461 To work around this, single step the child process
462 once before detaching to clear the flags. */
463
2fd9d7ca
PA
464 /* Note that we consult the parent's architecture instead of
465 the child's because there's no inferior for the child at
466 this point. */
c077881a 467 if (!gdbarch_software_single_step_p (target_thread_architecture
2fd9d7ca 468 (parent_ptid)))
c077881a 469 {
95347337
AB
470 int status;
471
c077881a
HZ
472 linux_disable_event_reporting (child_pid);
473 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
474 perror_with_name (_("Couldn't do single step"));
475 if (my_waitpid (child_pid, &status, 0) < 0)
476 perror_with_name (_("Couldn't wait vfork process"));
95347337
AB
477 else
478 {
479 detach_child = WIFSTOPPED (status);
480 child_stop_signal = WSTOPSIG (status);
481 }
c077881a
HZ
482 }
483
95347337 484 if (detach_child)
9caaaa83 485 {
95347337 486 int signo = child_stop_signal;
9caaaa83 487
9caaaa83
PA
488 if (signo != 0
489 && !signal_pass_state (gdb_signal_from_host (signo)))
490 signo = 0;
491 ptrace (PTRACE_DETACH, child_pid, 0, signo);
8a89ddbd
PA
492
493 close_proc_mem_file (child_pid);
9caaaa83 494 }
ac264b3b 495 }
9016a515
DJ
496
497 if (has_vforked)
498 {
a2885186
SM
499 lwp_info *parent_lp = find_lwp_pid (parent_ptid);
500 linux_nat_debug_printf ("waiting for VFORK_DONE on %d", parent_pid);
501 parent_lp->stopped = 1;
6c95b8df 502
a2885186
SM
503 /* We'll handle the VFORK_DONE event like any other
504 event, in target_wait. */
9016a515 505 }
4de4c07c 506 }
3993f6b1 507 else
4de4c07c 508 {
3ced3da4 509 struct lwp_info *child_lp;
4de4c07c 510
82d1f134 511 child_lp = add_lwp (child_ptid);
3ced3da4 512 child_lp->stopped = 1;
25289eb2 513 child_lp->last_resume_kind = resume_stop;
4de4c07c 514 }
4de4c07c
DJ
515}
516
4de4c07c 517\f
f6ac5f3d
PA
518int
519linux_nat_target::insert_fork_catchpoint (int pid)
4de4c07c 520{
a2885186 521 return 0;
3993f6b1
DJ
522}
523
f6ac5f3d
PA
524int
525linux_nat_target::remove_fork_catchpoint (int pid)
eb73ad13
PA
526{
527 return 0;
528}
529
f6ac5f3d
PA
530int
531linux_nat_target::insert_vfork_catchpoint (int pid)
3993f6b1 532{
a2885186 533 return 0;
3993f6b1
DJ
534}
535
f6ac5f3d
PA
536int
537linux_nat_target::remove_vfork_catchpoint (int pid)
eb73ad13
PA
538{
539 return 0;
540}
541
f6ac5f3d
PA
542int
543linux_nat_target::insert_exec_catchpoint (int pid)
3993f6b1 544{
a2885186 545 return 0;
3993f6b1
DJ
546}
547
f6ac5f3d
PA
548int
549linux_nat_target::remove_exec_catchpoint (int pid)
eb73ad13
PA
550{
551 return 0;
552}
553
f6ac5f3d
PA
554int
555linux_nat_target::set_syscall_catchpoint (int pid, bool needed, int any_count,
556 gdb::array_view<const int> syscall_counts)
a96d9b2e 557{
a96d9b2e
SDJ
558 /* On GNU/Linux, we ignore the arguments. It means that we only
559 enable the syscall catchpoints, but do not disable them.
77b06cd7 560
649a140c 561 Also, we do not use the `syscall_counts' information because we do not
a96d9b2e
SDJ
562 filter system calls here. We let GDB do the logic for us. */
563 return 0;
564}
565
774113b0
PA
566/* List of known LWPs, keyed by LWP PID. This speeds up the common
567 case of mapping a PID returned from the kernel to our corresponding
568 lwp_info data structure. */
569static htab_t lwp_lwpid_htab;
570
571/* Calculate a hash from a lwp_info's LWP PID. */
572
573static hashval_t
574lwp_info_hash (const void *ap)
575{
576 const struct lwp_info *lp = (struct lwp_info *) ap;
e38504b3 577 pid_t pid = lp->ptid.lwp ();
774113b0
PA
578
579 return iterative_hash_object (pid, 0);
580}
581
582/* Equality function for the lwp_info hash table. Compares the LWP's
583 PID. */
584
585static int
586lwp_lwpid_htab_eq (const void *a, const void *b)
587{
588 const struct lwp_info *entry = (const struct lwp_info *) a;
589 const struct lwp_info *element = (const struct lwp_info *) b;
590
e38504b3 591 return entry->ptid.lwp () == element->ptid.lwp ();
774113b0
PA
592}
593
594/* Create the lwp_lwpid_htab hash table. */
595
596static void
597lwp_lwpid_htab_create (void)
598{
599 lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL);
600}
601
602/* Add LP to the hash table. */
603
604static void
605lwp_lwpid_htab_add_lwp (struct lwp_info *lp)
606{
607 void **slot;
608
609 slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT);
610 gdb_assert (slot != NULL && *slot == NULL);
611 *slot = lp;
612}
613
614/* Head of doubly-linked list of known LWPs. Sorted by reverse
615 creation order. This order is assumed in some cases. E.g.,
616 reaping status after killing alls lwps of a process: the leader LWP
617 must be reaped last. */
901b9821
SM
618
619static intrusive_list<lwp_info> lwp_list;
620
621/* See linux-nat.h. */
622
623lwp_info_range
624all_lwps ()
625{
626 return lwp_info_range (lwp_list.begin ());
627}
628
629/* See linux-nat.h. */
630
631lwp_info_safe_range
632all_lwps_safe ()
633{
634 return lwp_info_safe_range (lwp_list.begin ());
635}
774113b0
PA
636
637/* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
638
639static void
640lwp_list_add (struct lwp_info *lp)
641{
901b9821 642 lwp_list.push_front (*lp);
774113b0
PA
643}
644
645/* Remove LP from sorted-by-reverse-creation-order doubly-linked
646 list. */
647
648static void
649lwp_list_remove (struct lwp_info *lp)
650{
651 /* Remove from sorted-by-creation-order list. */
901b9821 652 lwp_list.erase (lwp_list.iterator_to (*lp));
774113b0
PA
653}
654
d6b0e80f
AC
655\f
656
d6b0e80f
AC
657/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
658 _initialize_linux_nat. */
659static sigset_t suspend_mask;
660
7feb7d06
PA
661/* Signals to block to make that sigsuspend work. */
662static sigset_t blocked_mask;
663
664/* SIGCHLD action. */
6bd434d6 665static struct sigaction sigchld_action;
b84876c2 666
7feb7d06
PA
667/* Block child signals (SIGCHLD and linux threads signals), and store
668 the previous mask in PREV_MASK. */
84e46146 669
7feb7d06
PA
670static void
671block_child_signals (sigset_t *prev_mask)
672{
673 /* Make sure SIGCHLD is blocked. */
674 if (!sigismember (&blocked_mask, SIGCHLD))
675 sigaddset (&blocked_mask, SIGCHLD);
676
21987b9c 677 gdb_sigmask (SIG_BLOCK, &blocked_mask, prev_mask);
7feb7d06
PA
678}
679
680/* Restore child signals mask, previously returned by
681 block_child_signals. */
682
683static void
684restore_child_signals_mask (sigset_t *prev_mask)
685{
21987b9c 686 gdb_sigmask (SIG_SETMASK, prev_mask, NULL);
7feb7d06 687}
2455069d
UW
688
689/* Mask of signals to pass directly to the inferior. */
690static sigset_t pass_mask;
691
692/* Update signals to pass to the inferior. */
f6ac5f3d 693void
adc6a863
PA
694linux_nat_target::pass_signals
695 (gdb::array_view<const unsigned char> pass_signals)
2455069d
UW
696{
697 int signo;
698
699 sigemptyset (&pass_mask);
700
701 for (signo = 1; signo < NSIG; signo++)
702 {
2ea28649 703 int target_signo = gdb_signal_from_host (signo);
adc6a863 704 if (target_signo < pass_signals.size () && pass_signals[target_signo])
dda83cd7 705 sigaddset (&pass_mask, signo);
2455069d
UW
706 }
707}
708
d6b0e80f
AC
709\f
710
711/* Prototypes for local functions. */
d3a70e03
TT
712static int stop_wait_callback (struct lwp_info *lp);
713static int resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid);
ced2dffb 714static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
710151dd 715
d6b0e80f 716\f
d6b0e80f 717
7b50312a
PA
718/* Destroy and free LP. */
719
676362df 720lwp_info::~lwp_info ()
7b50312a 721{
466eecee 722 /* Let the arch specific bits release arch_lwp_info. */
676362df 723 linux_target->low_delete_thread (this->arch_private);
7b50312a
PA
724}
725
774113b0 726/* Traversal function for purge_lwp_list. */
d90e17a7 727
774113b0
PA
728static int
729lwp_lwpid_htab_remove_pid (void **slot, void *info)
d90e17a7 730{
774113b0
PA
731 struct lwp_info *lp = (struct lwp_info *) *slot;
732 int pid = *(int *) info;
d90e17a7 733
e99b03dc 734 if (lp->ptid.pid () == pid)
d90e17a7 735 {
774113b0
PA
736 htab_clear_slot (lwp_lwpid_htab, slot);
737 lwp_list_remove (lp);
676362df 738 delete lp;
774113b0 739 }
d90e17a7 740
774113b0
PA
741 return 1;
742}
d90e17a7 743
774113b0
PA
744/* Remove all LWPs belong to PID from the lwp list. */
745
746static void
747purge_lwp_list (int pid)
748{
749 htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid);
d90e17a7
PA
750}
751
26cb8b7c
PA
752/* Add the LWP specified by PTID to the list. PTID is the first LWP
753 in the process. Return a pointer to the structure describing the
754 new LWP.
755
756 This differs from add_lwp in that we don't let the arch specific
757 bits know about this new thread. Current clients of this callback
758 take the opportunity to install watchpoints in the new thread, and
759 we shouldn't do that for the first thread. If we're spawning a
760 child ("run"), the thread executes the shell wrapper first, and we
761 shouldn't touch it until it execs the program we want to debug.
762 For "attach", it'd be okay to call the callback, but it's not
763 necessary, because watchpoints can't yet have been inserted into
764 the inferior. */
d6b0e80f
AC
765
766static struct lwp_info *
26cb8b7c 767add_initial_lwp (ptid_t ptid)
d6b0e80f 768{
15a9e13e 769 gdb_assert (ptid.lwp_p ());
d6b0e80f 770
b0f6c8d2 771 lwp_info *lp = new lwp_info (ptid);
d6b0e80f 772
d6b0e80f 773
774113b0
PA
774 /* Add to sorted-by-reverse-creation-order list. */
775 lwp_list_add (lp);
776
777 /* Add to keyed-by-pid htab. */
778 lwp_lwpid_htab_add_lwp (lp);
d6b0e80f 779
26cb8b7c
PA
780 return lp;
781}
782
783/* Add the LWP specified by PID to the list. Return a pointer to the
784 structure describing the new LWP. The LWP should already be
785 stopped. */
786
787static struct lwp_info *
788add_lwp (ptid_t ptid)
789{
790 struct lwp_info *lp;
791
792 lp = add_initial_lwp (ptid);
793
6e012a6c
PA
794 /* Let the arch specific bits know about this new thread. Current
795 clients of this callback take the opportunity to install
26cb8b7c
PA
796 watchpoints in the new thread. We don't do this for the first
797 thread though. See add_initial_lwp. */
135340af 798 linux_target->low_new_thread (lp);
9f0bdab8 799
d6b0e80f
AC
800 return lp;
801}
802
803/* Remove the LWP specified by PID from the list. */
804
805static void
806delete_lwp (ptid_t ptid)
807{
b0f6c8d2 808 lwp_info dummy (ptid);
d6b0e80f 809
b0f6c8d2 810 void **slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
774113b0
PA
811 if (slot == NULL)
812 return;
d6b0e80f 813
b0f6c8d2 814 lwp_info *lp = *(struct lwp_info **) slot;
774113b0 815 gdb_assert (lp != NULL);
d6b0e80f 816
774113b0 817 htab_clear_slot (lwp_lwpid_htab, slot);
d6b0e80f 818
774113b0
PA
819 /* Remove from sorted-by-creation-order list. */
820 lwp_list_remove (lp);
d6b0e80f 821
774113b0 822 /* Release. */
676362df 823 delete lp;
d6b0e80f
AC
824}
825
826/* Return a pointer to the structure describing the LWP corresponding
827 to PID. If no corresponding LWP could be found, return NULL. */
828
829static struct lwp_info *
830find_lwp_pid (ptid_t ptid)
831{
d6b0e80f
AC
832 int lwp;
833
15a9e13e 834 if (ptid.lwp_p ())
e38504b3 835 lwp = ptid.lwp ();
d6b0e80f 836 else
e99b03dc 837 lwp = ptid.pid ();
d6b0e80f 838
b0f6c8d2
SM
839 lwp_info dummy (ptid_t (0, lwp));
840 return (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
d6b0e80f
AC
841}
842
6d4ee8c6 843/* See nat/linux-nat.h. */
d6b0e80f
AC
844
845struct lwp_info *
d90e17a7 846iterate_over_lwps (ptid_t filter,
d3a70e03 847 gdb::function_view<iterate_over_lwps_ftype> callback)
d6b0e80f 848{
901b9821 849 for (lwp_info *lp : all_lwps_safe ())
d6b0e80f 850 {
26a57c92 851 if (lp->ptid.matches (filter))
d90e17a7 852 {
d3a70e03 853 if (callback (lp) != 0)
d90e17a7
PA
854 return lp;
855 }
d6b0e80f
AC
856 }
857
858 return NULL;
859}
860
2277426b
PA
861/* Update our internal state when changing from one checkpoint to
862 another indicated by NEW_PTID. We can only switch single-threaded
863 applications, so we only create one new LWP, and the previous list
864 is discarded. */
f973ed9c
DJ
865
866void
867linux_nat_switch_fork (ptid_t new_ptid)
868{
869 struct lwp_info *lp;
870
e99b03dc 871 purge_lwp_list (inferior_ptid.pid ());
2277426b 872
f973ed9c
DJ
873 lp = add_lwp (new_ptid);
874 lp->stopped = 1;
e26af52f 875
2277426b
PA
876 /* This changes the thread's ptid while preserving the gdb thread
877 num. Also changes the inferior pid, while preserving the
878 inferior num. */
5b6d1e4f 879 thread_change_ptid (linux_target, inferior_ptid, new_ptid);
2277426b
PA
880
881 /* We've just told GDB core that the thread changed target id, but,
882 in fact, it really is a different thread, with different register
883 contents. */
884 registers_changed ();
e26af52f
DJ
885}
886
e26af52f
DJ
887/* Handle the exit of a single thread LP. */
888
889static void
890exit_lwp (struct lwp_info *lp)
891{
5b6d1e4f 892 struct thread_info *th = find_thread_ptid (linux_target, lp->ptid);
063bfe2e
VP
893
894 if (th)
e26af52f 895 {
17faa917 896 if (print_thread_events)
6cb06a8c
TT
897 gdb_printf (_("[%s exited]\n"),
898 target_pid_to_str (lp->ptid).c_str ());
17faa917 899
00431a78 900 delete_thread (th);
e26af52f
DJ
901 }
902
903 delete_lwp (lp->ptid);
904}
905
a0ef4274
DJ
906/* Wait for the LWP specified by LP, which we have just attached to.
907 Returns a wait status for that LWP, to cache. */
908
909static int
22827c51 910linux_nat_post_attach_wait (ptid_t ptid, int *signalled)
a0ef4274 911{
e38504b3 912 pid_t new_pid, pid = ptid.lwp ();
a0ef4274
DJ
913 int status;
914
644cebc9 915 if (linux_proc_pid_is_stopped (pid))
a0ef4274 916 {
9327494e 917 linux_nat_debug_printf ("Attaching to a stopped process");
a0ef4274
DJ
918
919 /* The process is definitely stopped. It is in a job control
920 stop, unless the kernel predates the TASK_STOPPED /
921 TASK_TRACED distinction, in which case it might be in a
922 ptrace stop. Make sure it is in a ptrace stop; from there we
923 can kill it, signal it, et cetera.
924
dda83cd7 925 First make sure there is a pending SIGSTOP. Since we are
a0ef4274
DJ
926 already attached, the process can not transition from stopped
927 to running without a PTRACE_CONT; so we know this signal will
928 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
929 probably already in the queue (unless this kernel is old
930 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
931 is not an RT signal, it can only be queued once. */
932 kill_lwp (pid, SIGSTOP);
933
934 /* Finally, resume the stopped process. This will deliver the SIGSTOP
935 (or a higher priority signal, just like normal PTRACE_ATTACH). */
936 ptrace (PTRACE_CONT, pid, 0, 0);
937 }
938
939 /* Make sure the initial process is stopped. The user-level threads
940 layer might want to poke around in the inferior, and that won't
941 work if things haven't stabilized yet. */
4a6ed09b 942 new_pid = my_waitpid (pid, &status, __WALL);
dacc9cb2
PP
943 gdb_assert (pid == new_pid);
944
945 if (!WIFSTOPPED (status))
946 {
947 /* The pid we tried to attach has apparently just exited. */
9327494e 948 linux_nat_debug_printf ("Failed to stop %d: %s", pid,
8d06918f 949 status_to_str (status).c_str ());
dacc9cb2
PP
950 return status;
951 }
a0ef4274
DJ
952
953 if (WSTOPSIG (status) != SIGSTOP)
954 {
955 *signalled = 1;
9327494e 956 linux_nat_debug_printf ("Received %s after attaching",
8d06918f 957 status_to_str (status).c_str ());
a0ef4274
DJ
958 }
959
960 return status;
961}
962
f6ac5f3d
PA
963void
964linux_nat_target::create_inferior (const char *exec_file,
965 const std::string &allargs,
966 char **env, int from_tty)
b84876c2 967{
41272101
TT
968 maybe_disable_address_space_randomization restore_personality
969 (disable_randomization);
b84876c2
PA
970
971 /* The fork_child mechanism is synchronous and calls target_wait, so
972 we have to mask the async mode. */
973
2455069d 974 /* Make sure we report all signals during startup. */
adc6a863 975 pass_signals ({});
2455069d 976
f6ac5f3d 977 inf_ptrace_target::create_inferior (exec_file, allargs, env, from_tty);
8a89ddbd
PA
978
979 open_proc_mem_file (inferior_ptid);
b84876c2
PA
980}
981
8784d563
PA
982/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
983 already attached. Returns true if a new LWP is found, false
984 otherwise. */
985
986static int
987attach_proc_task_lwp_callback (ptid_t ptid)
988{
989 struct lwp_info *lp;
990
991 /* Ignore LWPs we're already attached to. */
992 lp = find_lwp_pid (ptid);
993 if (lp == NULL)
994 {
e38504b3 995 int lwpid = ptid.lwp ();
8784d563
PA
996
997 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
998 {
999 int err = errno;
1000
1001 /* Be quiet if we simply raced with the thread exiting.
1002 EPERM is returned if the thread's task still exists, and
1003 is marked as exited or zombie, as well as other
1004 conditions, so in that case, confirm the status in
1005 /proc/PID/status. */
1006 if (err == ESRCH
1007 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1008 {
9327494e
SM
1009 linux_nat_debug_printf
1010 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1011 lwpid, err, safe_strerror (err));
1012
8784d563
PA
1013 }
1014 else
1015 {
4d9b86e1 1016 std::string reason
50fa3001 1017 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1 1018
f71f0b0d 1019 warning (_("Cannot attach to lwp %d: %s"),
4d9b86e1 1020 lwpid, reason.c_str ());
8784d563
PA
1021 }
1022 }
1023 else
1024 {
9327494e 1025 linux_nat_debug_printf ("PTRACE_ATTACH %s, 0, 0 (OK)",
e53c95d4 1026 ptid.to_string ().c_str ());
8784d563
PA
1027
1028 lp = add_lwp (ptid);
8784d563
PA
1029
1030 /* The next time we wait for this LWP we'll see a SIGSTOP as
1031 PTRACE_ATTACH brings it to a halt. */
1032 lp->signalled = 1;
1033
1034 /* We need to wait for a stop before being able to make the
1035 next ptrace call on this LWP. */
1036 lp->must_set_ptrace_flags = 1;
026a9174
PA
1037
1038 /* So that wait collects the SIGSTOP. */
1039 lp->resumed = 1;
1040
1041 /* Also add the LWP to gdb's thread list, in case a
1042 matching libthread_db is not found (or the process uses
1043 raw clone). */
5b6d1e4f 1044 add_thread (linux_target, lp->ptid);
719546c4
SM
1045 set_running (linux_target, lp->ptid, true);
1046 set_executing (linux_target, lp->ptid, true);
8784d563
PA
1047 }
1048
1049 return 1;
1050 }
1051 return 0;
1052}
1053
f6ac5f3d
PA
1054void
1055linux_nat_target::attach (const char *args, int from_tty)
d6b0e80f
AC
1056{
1057 struct lwp_info *lp;
d6b0e80f 1058 int status;
af990527 1059 ptid_t ptid;
d6b0e80f 1060
2455069d 1061 /* Make sure we report all signals during attach. */
adc6a863 1062 pass_signals ({});
2455069d 1063
a70b8144 1064 try
87b0bb13 1065 {
f6ac5f3d 1066 inf_ptrace_target::attach (args, from_tty);
87b0bb13 1067 }
230d2906 1068 catch (const gdb_exception_error &ex)
87b0bb13
JK
1069 {
1070 pid_t pid = parse_pid_to_attach (args);
50fa3001 1071 std::string reason = linux_ptrace_attach_fail_reason (pid);
87b0bb13 1072
4d9b86e1 1073 if (!reason.empty ())
3d6e9d23
TT
1074 throw_error (ex.error, "warning: %s\n%s", reason.c_str (),
1075 ex.what ());
7ae1a6a6 1076 else
3d6e9d23 1077 throw_error (ex.error, "%s", ex.what ());
87b0bb13 1078 }
d6b0e80f 1079
af990527
PA
1080 /* The ptrace base target adds the main thread with (pid,0,0)
1081 format. Decorate it with lwp info. */
e99b03dc 1082 ptid = ptid_t (inferior_ptid.pid (),
184ea2f7 1083 inferior_ptid.pid ());
5b6d1e4f 1084 thread_change_ptid (linux_target, inferior_ptid, ptid);
af990527 1085
9f0bdab8 1086 /* Add the initial process as the first LWP to the list. */
26cb8b7c 1087 lp = add_initial_lwp (ptid);
a0ef4274 1088
22827c51 1089 status = linux_nat_post_attach_wait (lp->ptid, &lp->signalled);
dacc9cb2
PP
1090 if (!WIFSTOPPED (status))
1091 {
1092 if (WIFEXITED (status))
1093 {
1094 int exit_code = WEXITSTATUS (status);
1095
223ffa71 1096 target_terminal::ours ();
bc1e6c81 1097 target_mourn_inferior (inferior_ptid);
dacc9cb2
PP
1098 if (exit_code == 0)
1099 error (_("Unable to attach: program exited normally."));
1100 else
1101 error (_("Unable to attach: program exited with code %d."),
1102 exit_code);
1103 }
1104 else if (WIFSIGNALED (status))
1105 {
2ea28649 1106 enum gdb_signal signo;
dacc9cb2 1107
223ffa71 1108 target_terminal::ours ();
bc1e6c81 1109 target_mourn_inferior (inferior_ptid);
dacc9cb2 1110
2ea28649 1111 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1112 error (_("Unable to attach: program terminated with signal "
1113 "%s, %s."),
2ea28649
PA
1114 gdb_signal_to_name (signo),
1115 gdb_signal_to_string (signo));
dacc9cb2
PP
1116 }
1117
1118 internal_error (__FILE__, __LINE__,
1119 _("unexpected status %d for PID %ld"),
e38504b3 1120 status, (long) ptid.lwp ());
dacc9cb2
PP
1121 }
1122
a0ef4274 1123 lp->stopped = 1;
9f0bdab8 1124
8a89ddbd
PA
1125 open_proc_mem_file (lp->ptid);
1126
a0ef4274 1127 /* Save the wait status to report later. */
d6b0e80f 1128 lp->resumed = 1;
9327494e 1129 linux_nat_debug_printf ("waitpid %ld, saving status %s",
8d06918f
SM
1130 (long) lp->ptid.pid (),
1131 status_to_str (status).c_str ());
710151dd 1132
7feb7d06
PA
1133 lp->status = status;
1134
8784d563
PA
1135 /* We must attach to every LWP. If /proc is mounted, use that to
1136 find them now. The inferior may be using raw clone instead of
1137 using pthreads. But even if it is using pthreads, thread_db
1138 walks structures in the inferior's address space to find the list
1139 of threads/LWPs, and those structures may well be corrupted.
1140 Note that once thread_db is loaded, we'll still use it to list
1141 threads and associate pthread info with each LWP. */
e99b03dc 1142 linux_proc_attach_tgid_threads (lp->ptid.pid (),
8784d563 1143 attach_proc_task_lwp_callback);
d6b0e80f
AC
1144}
1145
4a3ee32a
SM
1146/* Ptrace-detach the thread with pid PID. */
1147
1148static void
1149detach_one_pid (int pid, int signo)
1150{
1151 if (ptrace (PTRACE_DETACH, pid, 0, signo) < 0)
1152 {
1153 int save_errno = errno;
1154
1155 /* We know the thread exists, so ESRCH must mean the lwp is
1156 zombie. This can happen if one of the already-detached
1157 threads exits the whole thread group. In that case we're
1158 still attached, and must reap the lwp. */
1159 if (save_errno == ESRCH)
1160 {
1161 int ret, status;
1162
1163 ret = my_waitpid (pid, &status, __WALL);
1164 if (ret == -1)
1165 {
1166 warning (_("Couldn't reap LWP %d while detaching: %s"),
1167 pid, safe_strerror (errno));
1168 }
1169 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1170 {
1171 warning (_("Reaping LWP %d while detaching "
1172 "returned unexpected status 0x%x"),
1173 pid, status);
1174 }
1175 }
1176 else
1177 error (_("Can't detach %d: %s"),
1178 pid, safe_strerror (save_errno));
1179 }
1180 else
1181 linux_nat_debug_printf ("PTRACE_DETACH (%d, %s, 0) (OK)",
1182 pid, strsignal (signo));
1183}
1184
ced2dffb
PA
1185/* Get pending signal of THREAD as a host signal number, for detaching
1186 purposes. This is the signal the thread last stopped for, which we
1187 need to deliver to the thread when detaching, otherwise, it'd be
1188 suppressed/lost. */
1189
a0ef4274 1190static int
ced2dffb 1191get_detach_signal (struct lwp_info *lp)
a0ef4274 1192{
a493e3e2 1193 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1194
1195 /* If we paused threads momentarily, we may have stored pending
1196 events in lp->status or lp->waitstatus (see stop_wait_callback),
1197 and GDB core hasn't seen any signal for those threads.
1198 Otherwise, the last signal reported to the core is found in the
1199 thread object's stop_signal.
1200
1201 There's a corner case that isn't handled here at present. Only
1202 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1203 stop_signal make sense as a real signal to pass to the inferior.
1204 Some catchpoint related events, like
1205 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1206 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1207 those traps are debug API (ptrace in our case) related and
1208 induced; the inferior wouldn't see them if it wasn't being
1209 traced. Hence, we should never pass them to the inferior, even
1210 when set to pass state. Since this corner case isn't handled by
1211 infrun.c when proceeding with a signal, for consistency, neither
1212 do we handle it here (or elsewhere in the file we check for
1213 signal pass state). Normally SIGTRAP isn't set to pass state, so
1214 this is really a corner case. */
1215
183be222 1216 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
a493e3e2 1217 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1218 else if (lp->status)
2ea28649 1219 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
00431a78 1220 else
ca2163eb 1221 {
5b6d1e4f 1222 struct thread_info *tp = find_thread_ptid (linux_target, lp->ptid);
e0881a8e 1223
611841bb 1224 if (target_is_non_stop_p () && !tp->executing ())
ca2163eb 1225 {
1edb66d8 1226 if (tp->has_pending_waitstatus ())
df5ad102
SM
1227 {
1228 /* If the thread has a pending event, and it was stopped with a
1229 signal, use that signal to resume it. If it has a pending
1230 event of another kind, it was not stopped with a signal, so
1231 resume it without a signal. */
1232 if (tp->pending_waitstatus ().kind () == TARGET_WAITKIND_STOPPED)
1233 signo = tp->pending_waitstatus ().sig ();
1234 else
1235 signo = GDB_SIGNAL_0;
1236 }
00431a78 1237 else
1edb66d8 1238 signo = tp->stop_signal ();
00431a78
PA
1239 }
1240 else if (!target_is_non_stop_p ())
1241 {
00431a78 1242 ptid_t last_ptid;
5b6d1e4f 1243 process_stratum_target *last_target;
00431a78 1244
5b6d1e4f 1245 get_last_target_status (&last_target, &last_ptid, nullptr);
e0881a8e 1246
5b6d1e4f
PA
1247 if (last_target == linux_target
1248 && lp->ptid.lwp () == last_ptid.lwp ())
1edb66d8 1249 signo = tp->stop_signal ();
4c28f408 1250 }
ca2163eb 1251 }
4c28f408 1252
a493e3e2 1253 if (signo == GDB_SIGNAL_0)
ca2163eb 1254 {
9327494e 1255 linux_nat_debug_printf ("lwp %s has no pending signal",
e53c95d4 1256 lp->ptid.to_string ().c_str ());
ca2163eb
PA
1257 }
1258 else if (!signal_pass_state (signo))
1259 {
9327494e
SM
1260 linux_nat_debug_printf
1261 ("lwp %s had signal %s but it is in no pass state",
e53c95d4 1262 lp->ptid.to_string ().c_str (), gdb_signal_to_string (signo));
a0ef4274 1263 }
a0ef4274 1264 else
4c28f408 1265 {
9327494e 1266 linux_nat_debug_printf ("lwp %s has pending signal %s",
e53c95d4 1267 lp->ptid.to_string ().c_str (),
9327494e 1268 gdb_signal_to_string (signo));
ced2dffb
PA
1269
1270 return gdb_signal_to_host (signo);
4c28f408 1271 }
a0ef4274
DJ
1272
1273 return 0;
1274}
1275
ced2dffb
PA
1276/* Detach from LP. If SIGNO_P is non-NULL, then it points to the
1277 signal number that should be passed to the LWP when detaching.
1278 Otherwise pass any pending signal the LWP may have, if any. */
1279
1280static void
1281detach_one_lwp (struct lwp_info *lp, int *signo_p)
d6b0e80f 1282{
e38504b3 1283 int lwpid = lp->ptid.lwp ();
ced2dffb
PA
1284 int signo;
1285
d6b0e80f
AC
1286 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1287
df5ad102
SM
1288 /* If the lwp/thread we are about to detach has a pending fork event,
1289 there is a process GDB is attached to that the core of GDB doesn't know
1290 about. Detach from it. */
1291
1292 /* Check in lwp_info::status. */
1293 if (WIFSTOPPED (lp->status) && linux_is_extended_waitstatus (lp->status))
1294 {
1295 int event = linux_ptrace_get_extended_event (lp->status);
1296
1297 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1298 {
1299 unsigned long child_pid;
1300 int ret = ptrace (PTRACE_GETEVENTMSG, lp->ptid.lwp (), 0, &child_pid);
1301 if (ret == 0)
1302 detach_one_pid (child_pid, 0);
1303 else
1304 perror_warning_with_name (_("Failed to detach fork child"));
1305 }
1306 }
1307
1308 /* Check in lwp_info::waitstatus. */
1309 if (lp->waitstatus.kind () == TARGET_WAITKIND_VFORKED
1310 || lp->waitstatus.kind () == TARGET_WAITKIND_FORKED)
1311 detach_one_pid (lp->waitstatus.child_ptid ().pid (), 0);
1312
1313
1314 /* Check in thread_info::pending_waitstatus. */
1315 thread_info *tp = find_thread_ptid (linux_target, lp->ptid);
1316 if (tp->has_pending_waitstatus ())
1317 {
1318 const target_waitstatus &ws = tp->pending_waitstatus ();
1319
1320 if (ws.kind () == TARGET_WAITKIND_VFORKED
1321 || ws.kind () == TARGET_WAITKIND_FORKED)
1322 detach_one_pid (ws.child_ptid ().pid (), 0);
1323 }
1324
1325 /* Check in thread_info::pending_follow. */
1326 if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED
1327 || tp->pending_follow.kind () == TARGET_WAITKIND_FORKED)
1328 detach_one_pid (tp->pending_follow.child_ptid ().pid (), 0);
1329
9327494e
SM
1330 if (lp->status != 0)
1331 linux_nat_debug_printf ("Pending %s for %s on detach.",
1332 strsignal (WSTOPSIG (lp->status)),
e53c95d4 1333 lp->ptid.to_string ().c_str ());
d6b0e80f 1334
a0ef4274
DJ
1335 /* If there is a pending SIGSTOP, get rid of it. */
1336 if (lp->signalled)
d6b0e80f 1337 {
9327494e 1338 linux_nat_debug_printf ("Sending SIGCONT to %s",
e53c95d4 1339 lp->ptid.to_string ().c_str ());
d6b0e80f 1340
ced2dffb 1341 kill_lwp (lwpid, SIGCONT);
d6b0e80f 1342 lp->signalled = 0;
d6b0e80f
AC
1343 }
1344
ced2dffb 1345 if (signo_p == NULL)
d6b0e80f 1346 {
a0ef4274 1347 /* Pass on any pending signal for this LWP. */
ced2dffb
PA
1348 signo = get_detach_signal (lp);
1349 }
1350 else
1351 signo = *signo_p;
a0ef4274 1352
ced2dffb
PA
1353 /* Preparing to resume may try to write registers, and fail if the
1354 lwp is zombie. If that happens, ignore the error. We'll handle
1355 it below, when detach fails with ESRCH. */
a70b8144 1356 try
ced2dffb 1357 {
135340af 1358 linux_target->low_prepare_to_resume (lp);
ced2dffb 1359 }
230d2906 1360 catch (const gdb_exception_error &ex)
ced2dffb
PA
1361 {
1362 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 1363 throw;
ced2dffb 1364 }
d6b0e80f 1365
4a3ee32a 1366 detach_one_pid (lwpid, signo);
ced2dffb
PA
1367
1368 delete_lwp (lp->ptid);
1369}
d6b0e80f 1370
ced2dffb 1371static int
d3a70e03 1372detach_callback (struct lwp_info *lp)
ced2dffb
PA
1373{
1374 /* We don't actually detach from the thread group leader just yet.
1375 If the thread group exits, we must reap the zombie clone lwps
1376 before we're able to reap the leader. */
e38504b3 1377 if (lp->ptid.lwp () != lp->ptid.pid ())
ced2dffb 1378 detach_one_lwp (lp, NULL);
d6b0e80f
AC
1379 return 0;
1380}
1381
f6ac5f3d
PA
1382void
1383linux_nat_target::detach (inferior *inf, int from_tty)
d6b0e80f 1384{
d90e17a7 1385 struct lwp_info *main_lwp;
bc09b0c1 1386 int pid = inf->pid;
a0ef4274 1387
ae5e0686
MK
1388 /* Don't unregister from the event loop, as there may be other
1389 inferiors running. */
b84876c2 1390
4c28f408 1391 /* Stop all threads before detaching. ptrace requires that the
30baf67b 1392 thread is stopped to successfully detach. */
d3a70e03 1393 iterate_over_lwps (ptid_t (pid), stop_callback);
4c28f408
PA
1394 /* ... and wait until all of them have reported back that
1395 they're no longer running. */
d3a70e03 1396 iterate_over_lwps (ptid_t (pid), stop_wait_callback);
4c28f408 1397
e87f0fe8
PA
1398 /* We can now safely remove breakpoints. We don't this in earlier
1399 in common code because this target doesn't currently support
1400 writing memory while the inferior is running. */
1401 remove_breakpoints_inf (current_inferior ());
1402
d3a70e03 1403 iterate_over_lwps (ptid_t (pid), detach_callback);
d6b0e80f
AC
1404
1405 /* Only the initial process should be left right now. */
bc09b0c1 1406 gdb_assert (num_lwps (pid) == 1);
d90e17a7 1407
f2907e49 1408 main_lwp = find_lwp_pid (ptid_t (pid));
d6b0e80f 1409
7a7d3353
PA
1410 if (forks_exist_p ())
1411 {
1412 /* Multi-fork case. The current inferior_ptid is being detached
1413 from, but there are other viable forks to debug. Detach from
1414 the current fork, and context-switch to the first
1415 available. */
6bd6f3b6 1416 linux_fork_detach (from_tty);
7a7d3353
PA
1417 }
1418 else
ced2dffb 1419 {
ced2dffb
PA
1420 target_announce_detach (from_tty);
1421
6bd6f3b6
SM
1422 /* Pass on any pending signal for the last LWP. */
1423 int signo = get_detach_signal (main_lwp);
ced2dffb
PA
1424
1425 detach_one_lwp (main_lwp, &signo);
1426
f6ac5f3d 1427 detach_success (inf);
ced2dffb 1428 }
05c06f31 1429
8a89ddbd 1430 close_proc_mem_file (pid);
d6b0e80f
AC
1431}
1432
8a99810d
PA
1433/* Resume execution of the inferior process. If STEP is nonzero,
1434 single-step it. If SIGNAL is nonzero, give it that signal. */
1435
1436static void
23f238d3
PA
1437linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1438 enum gdb_signal signo)
8a99810d 1439{
8a99810d 1440 lp->step = step;
9c02b525
PA
1441
1442 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1443 We only presently need that if the LWP is stepped though (to
1444 handle the case of stepping a breakpoint instruction). */
1445 if (step)
1446 {
5b6d1e4f 1447 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
9c02b525
PA
1448
1449 lp->stop_pc = regcache_read_pc (regcache);
1450 }
1451 else
1452 lp->stop_pc = 0;
1453
135340af 1454 linux_target->low_prepare_to_resume (lp);
f6ac5f3d 1455 linux_target->low_resume (lp->ptid, step, signo);
23f238d3
PA
1456
1457 /* Successfully resumed. Clear state that no longer makes sense,
1458 and mark the LWP as running. Must not do this before resuming
1459 otherwise if that fails other code will be confused. E.g., we'd
1460 later try to stop the LWP and hang forever waiting for a stop
1461 status. Note that we must not throw after this is cleared,
1462 otherwise handle_zombie_lwp_error would get confused. */
8a99810d 1463 lp->stopped = 0;
1ad3de98 1464 lp->core = -1;
23f238d3 1465 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
5b6d1e4f 1466 registers_changed_ptid (linux_target, lp->ptid);
8a99810d
PA
1467}
1468
23f238d3
PA
1469/* Called when we try to resume a stopped LWP and that errors out. If
1470 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1471 or about to become), discard the error, clear any pending status
1472 the LWP may have, and return true (we'll collect the exit status
1473 soon enough). Otherwise, return false. */
1474
1475static int
1476check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1477{
1478 /* If we get an error after resuming the LWP successfully, we'd
1479 confuse !T state for the LWP being gone. */
1480 gdb_assert (lp->stopped);
1481
1482 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1483 because even if ptrace failed with ESRCH, the tracee may be "not
1484 yet fully dead", but already refusing ptrace requests. In that
1485 case the tracee has 'R (Running)' state for a little bit
1486 (observed in Linux 3.18). See also the note on ESRCH in the
1487 ptrace(2) man page. Instead, check whether the LWP has any state
1488 other than ptrace-stopped. */
1489
1490 /* Don't assume anything if /proc/PID/status can't be read. */
e38504b3 1491 if (linux_proc_pid_is_trace_stopped_nowarn (lp->ptid.lwp ()) == 0)
23f238d3
PA
1492 {
1493 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1494 lp->status = 0;
183be222 1495 lp->waitstatus.set_ignore ();
23f238d3
PA
1496 return 1;
1497 }
1498 return 0;
1499}
1500
1501/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1502 disappears while we try to resume it. */
1503
1504static void
1505linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1506{
a70b8144 1507 try
23f238d3
PA
1508 {
1509 linux_resume_one_lwp_throw (lp, step, signo);
1510 }
230d2906 1511 catch (const gdb_exception_error &ex)
23f238d3
PA
1512 {
1513 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 1514 throw;
23f238d3 1515 }
23f238d3
PA
1516}
1517
d6b0e80f
AC
1518/* Resume LP. */
1519
25289eb2 1520static void
e5ef252a 1521resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1522{
25289eb2 1523 if (lp->stopped)
6c95b8df 1524 {
5b6d1e4f 1525 struct inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
25289eb2
PA
1526
1527 if (inf->vfork_child != NULL)
1528 {
9327494e 1529 linux_nat_debug_printf ("Not resuming %s (vfork parent)",
e53c95d4 1530 lp->ptid.to_string ().c_str ());
25289eb2 1531 }
8a99810d 1532 else if (!lwp_status_pending_p (lp))
25289eb2 1533 {
9327494e 1534 linux_nat_debug_printf ("Resuming sibling %s, %s, %s",
e53c95d4 1535 lp->ptid.to_string ().c_str (),
9327494e
SM
1536 (signo != GDB_SIGNAL_0
1537 ? strsignal (gdb_signal_to_host (signo))
1538 : "0"),
1539 step ? "step" : "resume");
25289eb2 1540
8a99810d 1541 linux_resume_one_lwp (lp, step, signo);
25289eb2
PA
1542 }
1543 else
1544 {
9327494e 1545 linux_nat_debug_printf ("Not resuming sibling %s (has pending)",
e53c95d4 1546 lp->ptid.to_string ().c_str ());
25289eb2 1547 }
6c95b8df 1548 }
25289eb2 1549 else
9327494e 1550 linux_nat_debug_printf ("Not resuming sibling %s (not stopped)",
e53c95d4 1551 lp->ptid.to_string ().c_str ());
25289eb2 1552}
d6b0e80f 1553
8817a6f2
PA
1554/* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1555 Resume LWP with the last stop signal, if it is in pass state. */
e5ef252a 1556
25289eb2 1557static int
d3a70e03 1558linux_nat_resume_callback (struct lwp_info *lp, struct lwp_info *except)
25289eb2 1559{
e5ef252a
PA
1560 enum gdb_signal signo = GDB_SIGNAL_0;
1561
8817a6f2
PA
1562 if (lp == except)
1563 return 0;
1564
e5ef252a
PA
1565 if (lp->stopped)
1566 {
1567 struct thread_info *thread;
1568
5b6d1e4f 1569 thread = find_thread_ptid (linux_target, lp->ptid);
e5ef252a
PA
1570 if (thread != NULL)
1571 {
1edb66d8
SM
1572 signo = thread->stop_signal ();
1573 thread->set_stop_signal (GDB_SIGNAL_0);
e5ef252a
PA
1574 }
1575 }
1576
1577 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1578 return 0;
1579}
1580
1581static int
d3a70e03 1582resume_clear_callback (struct lwp_info *lp)
d6b0e80f
AC
1583{
1584 lp->resumed = 0;
25289eb2 1585 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1586 return 0;
1587}
1588
1589static int
d3a70e03 1590resume_set_callback (struct lwp_info *lp)
d6b0e80f
AC
1591{
1592 lp->resumed = 1;
25289eb2 1593 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1594 return 0;
1595}
1596
f6ac5f3d
PA
1597void
1598linux_nat_target::resume (ptid_t ptid, int step, enum gdb_signal signo)
d6b0e80f
AC
1599{
1600 struct lwp_info *lp;
d90e17a7 1601 int resume_many;
d6b0e80f 1602
9327494e
SM
1603 linux_nat_debug_printf ("Preparing to %s %s, %s, inferior_ptid %s",
1604 step ? "step" : "resume",
e53c95d4 1605 ptid.to_string ().c_str (),
9327494e
SM
1606 (signo != GDB_SIGNAL_0
1607 ? strsignal (gdb_signal_to_host (signo)) : "0"),
e53c95d4 1608 inferior_ptid.to_string ().c_str ());
76f50ad1 1609
d6b0e80f 1610 /* A specific PTID means `step only this process id'. */
d7e15655 1611 resume_many = (minus_one_ptid == ptid
0e998d96 1612 || ptid.is_pid ());
4c28f408 1613
7da6a5b9
LM
1614 /* Mark the lwps we're resuming as resumed and update their
1615 last_resume_kind to resume_continue. */
d3a70e03 1616 iterate_over_lwps (ptid, resume_set_callback);
d6b0e80f 1617
d90e17a7
PA
1618 /* See if it's the current inferior that should be handled
1619 specially. */
1620 if (resume_many)
1621 lp = find_lwp_pid (inferior_ptid);
1622 else
1623 lp = find_lwp_pid (ptid);
9f0bdab8 1624 gdb_assert (lp != NULL);
d6b0e80f 1625
9f0bdab8 1626 /* Remember if we're stepping. */
25289eb2 1627 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1628
9f0bdab8
DJ
1629 /* If we have a pending wait status for this thread, there is no
1630 point in resuming the process. But first make sure that
1631 linux_nat_wait won't preemptively handle the event - we
1632 should never take this short-circuit if we are going to
1633 leave LP running, since we have skipped resuming all the
1634 other threads. This bit of code needs to be synchronized
1635 with linux_nat_wait. */
76f50ad1 1636
9f0bdab8
DJ
1637 if (lp->status && WIFSTOPPED (lp->status))
1638 {
2455069d
UW
1639 if (!lp->step
1640 && WSTOPSIG (lp->status)
1641 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1642 {
9327494e
SM
1643 linux_nat_debug_printf
1644 ("Not short circuiting for ignored status 0x%x", lp->status);
9f0bdab8 1645
d6b0e80f
AC
1646 /* FIXME: What should we do if we are supposed to continue
1647 this thread with a signal? */
a493e3e2 1648 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 1649 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1650 lp->status = 0;
1651 }
1652 }
76f50ad1 1653
8a99810d 1654 if (lwp_status_pending_p (lp))
9f0bdab8
DJ
1655 {
1656 /* FIXME: What should we do if we are supposed to continue
1657 this thread with a signal? */
a493e3e2 1658 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 1659
9327494e
SM
1660 linux_nat_debug_printf ("Short circuiting for status 0x%x",
1661 lp->status);
d6b0e80f 1662
7feb7d06
PA
1663 if (target_can_async_p ())
1664 {
6a3753b3 1665 target_async (1);
7feb7d06
PA
1666 /* Tell the event loop we have something to process. */
1667 async_file_mark ();
1668 }
9f0bdab8 1669 return;
d6b0e80f
AC
1670 }
1671
d90e17a7 1672 if (resume_many)
d3a70e03
TT
1673 iterate_over_lwps (ptid, [=] (struct lwp_info *info)
1674 {
1675 return linux_nat_resume_callback (info, lp);
1676 });
d90e17a7 1677
9327494e
SM
1678 linux_nat_debug_printf ("%s %s, %s (resume event thread)",
1679 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 1680 lp->ptid.to_string ().c_str (),
9327494e
SM
1681 (signo != GDB_SIGNAL_0
1682 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2 1683
2bf6fb9d 1684 linux_resume_one_lwp (lp, step, signo);
d6b0e80f
AC
1685}
1686
c5f62d5f 1687/* Send a signal to an LWP. */
d6b0e80f
AC
1688
1689static int
1690kill_lwp (int lwpid, int signo)
1691{
4a6ed09b 1692 int ret;
d6b0e80f 1693
4a6ed09b
PA
1694 errno = 0;
1695 ret = syscall (__NR_tkill, lwpid, signo);
1696 if (errno == ENOSYS)
1697 {
1698 /* If tkill fails, then we are not using nptl threads, a
1699 configuration we no longer support. */
1700 perror_with_name (("tkill"));
1701 }
1702 return ret;
d6b0e80f
AC
1703}
1704
ca2163eb
PA
1705/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1706 event, check if the core is interested in it: if not, ignore the
1707 event, and keep waiting; otherwise, we need to toggle the LWP's
1708 syscall entry/exit status, since the ptrace event itself doesn't
1709 indicate it, and report the trap to higher layers. */
1710
1711static int
1712linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1713{
1714 struct target_waitstatus *ourstatus = &lp->waitstatus;
1715 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
5b6d1e4f 1716 thread_info *thread = find_thread_ptid (linux_target, lp->ptid);
00431a78 1717 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, thread);
ca2163eb
PA
1718
1719 if (stopping)
1720 {
1721 /* If we're stopping threads, there's a SIGSTOP pending, which
1722 makes it so that the LWP reports an immediate syscall return,
1723 followed by the SIGSTOP. Skip seeing that "return" using
1724 PTRACE_CONT directly, and let stop_wait_callback collect the
1725 SIGSTOP. Later when the thread is resumed, a new syscall
1726 entry event. If we didn't do this (and returned 0), we'd
1727 leave a syscall entry pending, and our caller, by using
1728 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1729 itself. Later, when the user re-resumes this LWP, we'd see
1730 another syscall entry event and we'd mistake it for a return.
1731
1732 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1733 (leaving immediately with LWP->signalled set, without issuing
1734 a PTRACE_CONT), it would still be problematic to leave this
1735 syscall enter pending, as later when the thread is resumed,
1736 it would then see the same syscall exit mentioned above,
1737 followed by the delayed SIGSTOP, while the syscall didn't
1738 actually get to execute. It seems it would be even more
1739 confusing to the user. */
1740
9327494e
SM
1741 linux_nat_debug_printf
1742 ("ignoring syscall %d for LWP %ld (stopping threads), resuming with "
1743 "PTRACE_CONT for SIGSTOP", syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1744
1745 lp->syscall_state = TARGET_WAITKIND_IGNORE;
e38504b3 1746 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
8817a6f2 1747 lp->stopped = 0;
ca2163eb
PA
1748 return 1;
1749 }
1750
bfd09d20
JS
1751 /* Always update the entry/return state, even if this particular
1752 syscall isn't interesting to the core now. In async mode,
1753 the user could install a new catchpoint for this syscall
1754 between syscall enter/return, and we'll need to know to
1755 report a syscall return if that happens. */
1756 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1757 ? TARGET_WAITKIND_SYSCALL_RETURN
1758 : TARGET_WAITKIND_SYSCALL_ENTRY);
1759
ca2163eb
PA
1760 if (catch_syscall_enabled ())
1761 {
ca2163eb
PA
1762 if (catching_syscall_number (syscall_number))
1763 {
1764 /* Alright, an event to report. */
183be222
SM
1765 if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
1766 ourstatus->set_syscall_entry (syscall_number);
1767 else if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
1768 ourstatus->set_syscall_return (syscall_number);
1769 else
1770 gdb_assert_not_reached ("unexpected syscall state");
ca2163eb 1771
9327494e
SM
1772 linux_nat_debug_printf
1773 ("stopping for %s of syscall %d for LWP %ld",
1774 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1775 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1776
ca2163eb
PA
1777 return 0;
1778 }
1779
9327494e
SM
1780 linux_nat_debug_printf
1781 ("ignoring %s of syscall %d for LWP %ld",
1782 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1783 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1784 }
1785 else
1786 {
1787 /* If we had been syscall tracing, and hence used PT_SYSCALL
1788 before on this LWP, it could happen that the user removes all
1789 syscall catchpoints before we get to process this event.
1790 There are two noteworthy issues here:
1791
1792 - When stopped at a syscall entry event, resuming with
1793 PT_STEP still resumes executing the syscall and reports a
1794 syscall return.
1795
1796 - Only PT_SYSCALL catches syscall enters. If we last
1797 single-stepped this thread, then this event can't be a
1798 syscall enter. If we last single-stepped this thread, this
1799 has to be a syscall exit.
1800
1801 The points above mean that the next resume, be it PT_STEP or
1802 PT_CONTINUE, can not trigger a syscall trace event. */
9327494e
SM
1803 linux_nat_debug_printf
1804 ("caught syscall event with no syscall catchpoints. %d for LWP %ld, "
1805 "ignoring", syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1806 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1807 }
1808
1809 /* The core isn't interested in this event. For efficiency, avoid
1810 stopping all threads only to have the core resume them all again.
1811 Since we're not stopping threads, if we're still syscall tracing
1812 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1813 subsequent syscall. Simply resume using the inf-ptrace layer,
1814 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1815
8a99810d 1816 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
ca2163eb
PA
1817 return 1;
1818}
1819
3d799a95
DJ
1820/* Handle a GNU/Linux extended wait response. If we see a clone
1821 event, we need to add the new LWP to our list (and not report the
1822 trap to higher layers). This function returns non-zero if the
1823 event should be ignored and we should wait again. If STOPPING is
1824 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1825
1826static int
4dd63d48 1827linux_handle_extended_wait (struct lwp_info *lp, int status)
d6b0e80f 1828{
e38504b3 1829 int pid = lp->ptid.lwp ();
3d799a95 1830 struct target_waitstatus *ourstatus = &lp->waitstatus;
89a5711c 1831 int event = linux_ptrace_get_extended_event (status);
d6b0e80f 1832
bfd09d20
JS
1833 /* All extended events we currently use are mid-syscall. Only
1834 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
1835 you have to be using PTRACE_SEIZE to get that. */
1836 lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
1837
3d799a95
DJ
1838 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1839 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1840 {
3d799a95
DJ
1841 unsigned long new_pid;
1842 int ret;
1843
1844 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1845
3d799a95
DJ
1846 /* If we haven't already seen the new PID stop, wait for it now. */
1847 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1848 {
1849 /* The new child has a pending SIGSTOP. We can't affect it until it
1850 hits the SIGSTOP, but we're already attached. */
4a6ed09b 1851 ret = my_waitpid (new_pid, &status, __WALL);
3d799a95
DJ
1852 if (ret == -1)
1853 perror_with_name (_("waiting for new child"));
1854 else if (ret != new_pid)
1855 internal_error (__FILE__, __LINE__,
1856 _("wait returned unexpected PID %d"), ret);
1857 else if (!WIFSTOPPED (status))
1858 internal_error (__FILE__, __LINE__,
1859 _("wait returned unexpected status 0x%x"), status);
1860 }
1861
183be222 1862 ptid_t child_ptid (new_pid, new_pid);
3d799a95 1863
26cb8b7c
PA
1864 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1865 {
8a89ddbd
PA
1866 open_proc_mem_file (child_ptid);
1867
26cb8b7c
PA
1868 /* The arch-specific native code may need to know about new
1869 forks even if those end up never mapped to an
1870 inferior. */
135340af 1871 linux_target->low_new_fork (lp, new_pid);
26cb8b7c 1872 }
1310c1b0
PFC
1873 else if (event == PTRACE_EVENT_CLONE)
1874 {
1875 linux_target->low_new_clone (lp, new_pid);
1876 }
26cb8b7c 1877
2277426b 1878 if (event == PTRACE_EVENT_FORK
e99b03dc 1879 && linux_fork_checkpointing_p (lp->ptid.pid ()))
2277426b 1880 {
2277426b
PA
1881 /* Handle checkpointing by linux-fork.c here as a special
1882 case. We don't want the follow-fork-mode or 'catch fork'
1883 to interfere with this. */
1884
1885 /* This won't actually modify the breakpoint list, but will
1886 physically remove the breakpoints from the child. */
184ea2f7 1887 detach_breakpoints (ptid_t (new_pid, new_pid));
2277426b
PA
1888
1889 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
1890 if (!find_fork_pid (new_pid))
1891 add_fork (new_pid);
2277426b
PA
1892
1893 /* Report as spurious, so that infrun doesn't want to follow
1894 this fork. We're actually doing an infcall in
1895 linux-fork.c. */
183be222 1896 ourstatus->set_spurious ();
2277426b
PA
1897
1898 /* Report the stop to the core. */
1899 return 0;
1900 }
1901
3d799a95 1902 if (event == PTRACE_EVENT_FORK)
183be222 1903 ourstatus->set_forked (child_ptid);
3d799a95 1904 else if (event == PTRACE_EVENT_VFORK)
183be222 1905 ourstatus->set_vforked (child_ptid);
4dd63d48 1906 else if (event == PTRACE_EVENT_CLONE)
3d799a95 1907 {
78768c4a
JK
1908 struct lwp_info *new_lp;
1909
183be222 1910 ourstatus->set_ignore ();
78768c4a 1911
9327494e
SM
1912 linux_nat_debug_printf
1913 ("Got clone event from LWP %d, new child is LWP %ld", pid, new_pid);
3c4d7e12 1914
184ea2f7 1915 new_lp = add_lwp (ptid_t (lp->ptid.pid (), new_pid));
4c28f408 1916 new_lp->stopped = 1;
4dd63d48 1917 new_lp->resumed = 1;
d6b0e80f 1918
2db9a427
PA
1919 /* If the thread_db layer is active, let it record the user
1920 level thread id and status, and add the thread to GDB's
1921 list. */
1922 if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
3d799a95 1923 {
2db9a427
PA
1924 /* The process is not using thread_db. Add the LWP to
1925 GDB's list. */
e38504b3 1926 target_post_attach (new_lp->ptid.lwp ());
5b6d1e4f 1927 add_thread (linux_target, new_lp->ptid);
2db9a427 1928 }
4c28f408 1929
2ee52aa4 1930 /* Even if we're stopping the thread for some reason
4dd63d48
PA
1931 internal to this module, from the perspective of infrun
1932 and the user/frontend, this new thread is running until
1933 it next reports a stop. */
719546c4
SM
1934 set_running (linux_target, new_lp->ptid, true);
1935 set_executing (linux_target, new_lp->ptid, true);
4c28f408 1936
4dd63d48 1937 if (WSTOPSIG (status) != SIGSTOP)
79395f92 1938 {
4dd63d48
PA
1939 /* This can happen if someone starts sending signals to
1940 the new thread before it gets a chance to run, which
1941 have a lower number than SIGSTOP (e.g. SIGUSR1).
1942 This is an unlikely case, and harder to handle for
1943 fork / vfork than for clone, so we do not try - but
1944 we handle it for clone events here. */
1945
1946 new_lp->signalled = 1;
1947
79395f92
PA
1948 /* We created NEW_LP so it cannot yet contain STATUS. */
1949 gdb_assert (new_lp->status == 0);
1950
1951 /* Save the wait status to report later. */
9327494e
SM
1952 linux_nat_debug_printf
1953 ("waitpid of new LWP %ld, saving status %s",
8d06918f 1954 (long) new_lp->ptid.lwp (), status_to_str (status).c_str ());
79395f92
PA
1955 new_lp->status = status;
1956 }
aa01bd36
PA
1957 else if (report_thread_events)
1958 {
183be222 1959 new_lp->waitstatus.set_thread_created ();
aa01bd36
PA
1960 new_lp->status = status;
1961 }
79395f92 1962
3d799a95
DJ
1963 return 1;
1964 }
1965
1966 return 0;
d6b0e80f
AC
1967 }
1968
3d799a95
DJ
1969 if (event == PTRACE_EVENT_EXEC)
1970 {
9327494e 1971 linux_nat_debug_printf ("Got exec event from LWP %ld", lp->ptid.lwp ());
a75724bc 1972
8a89ddbd
PA
1973 /* Close the previous /proc/PID/mem file for this inferior,
1974 which was using the address space which is now gone.
1975 Reading/writing from this file would return 0/EOF. */
1976 close_proc_mem_file (lp->ptid.pid ());
1977
1978 /* Open a new file for the new address space. */
1979 open_proc_mem_file (lp->ptid);
05c06f31 1980
183be222
SM
1981 ourstatus->set_execd
1982 (make_unique_xstrdup (linux_proc_pid_to_exec_file (pid)));
3d799a95 1983
8af756ef
PA
1984 /* The thread that execed must have been resumed, but, when a
1985 thread execs, it changes its tid to the tgid, and the old
1986 tgid thread might have not been resumed. */
1987 lp->resumed = 1;
6c95b8df
PA
1988 return 0;
1989 }
1990
1991 if (event == PTRACE_EVENT_VFORK_DONE)
1992 {
1993 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 1994 {
9327494e
SM
1995 linux_nat_debug_printf
1996 ("Got expected PTRACE_EVENT_VFORK_DONE from LWP %ld: stopping",
1997 lp->ptid.lwp ());
3d799a95 1998
183be222 1999 ourstatus->set_vfork_done ();
6c95b8df 2000 return 0;
3d799a95
DJ
2001 }
2002
9327494e
SM
2003 linux_nat_debug_printf
2004 ("Got PTRACE_EVENT_VFORK_DONE from LWP %ld: ignoring", lp->ptid.lwp ());
2005
6c95b8df 2006 return 1;
3d799a95
DJ
2007 }
2008
2009 internal_error (__FILE__, __LINE__,
2010 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2011}
2012
9c3a5d93
PA
2013/* Suspend waiting for a signal. We're mostly interested in
2014 SIGCHLD/SIGINT. */
2015
2016static void
2017wait_for_signal ()
2018{
9327494e 2019 linux_nat_debug_printf ("about to sigsuspend");
9c3a5d93
PA
2020 sigsuspend (&suspend_mask);
2021
2022 /* If the quit flag is set, it means that the user pressed Ctrl-C
2023 and we're debugging a process that is running on a separate
2024 terminal, so we must forward the Ctrl-C to the inferior. (If the
2025 inferior is sharing GDB's terminal, then the Ctrl-C reaches the
2026 inferior directly.) We must do this here because functions that
2027 need to block waiting for a signal loop forever until there's an
2028 event to report before returning back to the event loop. */
2029 if (!target_terminal::is_ours ())
2030 {
2031 if (check_quit_flag ())
2032 target_pass_ctrlc ();
2033 }
2034}
2035
d6b0e80f
AC
2036/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2037 exited. */
2038
2039static int
2040wait_lwp (struct lwp_info *lp)
2041{
2042 pid_t pid;
432b4d03 2043 int status = 0;
d6b0e80f 2044 int thread_dead = 0;
432b4d03 2045 sigset_t prev_mask;
d6b0e80f
AC
2046
2047 gdb_assert (!lp->stopped);
2048 gdb_assert (lp->status == 0);
2049
432b4d03
JK
2050 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2051 block_child_signals (&prev_mask);
2052
2053 for (;;)
d6b0e80f 2054 {
e38504b3 2055 pid = my_waitpid (lp->ptid.lwp (), &status, __WALL | WNOHANG);
a9f4bb21
PA
2056 if (pid == -1 && errno == ECHILD)
2057 {
2058 /* The thread has previously exited. We need to delete it
4a6ed09b
PA
2059 now because if this was a non-leader thread execing, we
2060 won't get an exit event. See comments on exec events at
2061 the top of the file. */
a9f4bb21 2062 thread_dead = 1;
9327494e 2063 linux_nat_debug_printf ("%s vanished.",
e53c95d4 2064 lp->ptid.to_string ().c_str ());
a9f4bb21 2065 }
432b4d03
JK
2066 if (pid != 0)
2067 break;
2068
2069 /* Bugs 10970, 12702.
2070 Thread group leader may have exited in which case we'll lock up in
2071 waitpid if there are other threads, even if they are all zombies too.
2072 Basically, we're not supposed to use waitpid this way.
4a6ed09b
PA
2073 tkill(pid,0) cannot be used here as it gets ESRCH for both
2074 for zombie and running processes.
432b4d03
JK
2075
2076 As a workaround, check if we're waiting for the thread group leader and
2077 if it's a zombie, and avoid calling waitpid if it is.
2078
2079 This is racy, what if the tgl becomes a zombie right after we check?
2080 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2081 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03 2082
e38504b3
TT
2083 if (lp->ptid.pid () == lp->ptid.lwp ()
2084 && linux_proc_pid_is_zombie (lp->ptid.lwp ()))
d6b0e80f 2085 {
d6b0e80f 2086 thread_dead = 1;
9327494e 2087 linux_nat_debug_printf ("Thread group leader %s vanished.",
e53c95d4 2088 lp->ptid.to_string ().c_str ());
432b4d03 2089 break;
d6b0e80f 2090 }
432b4d03
JK
2091
2092 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2093 get invoked despite our caller had them intentionally blocked by
2094 block_child_signals. This is sensitive only to the loop of
2095 linux_nat_wait_1 and there if we get called my_waitpid gets called
2096 again before it gets to sigsuspend so we can safely let the handlers
2097 get executed here. */
9c3a5d93 2098 wait_for_signal ();
432b4d03
JK
2099 }
2100
2101 restore_child_signals_mask (&prev_mask);
2102
d6b0e80f
AC
2103 if (!thread_dead)
2104 {
e38504b3 2105 gdb_assert (pid == lp->ptid.lwp ());
d6b0e80f 2106
9327494e 2107 linux_nat_debug_printf ("waitpid %s received %s",
e53c95d4 2108 lp->ptid.to_string ().c_str (),
8d06918f 2109 status_to_str (status).c_str ());
d6b0e80f 2110
a9f4bb21
PA
2111 /* Check if the thread has exited. */
2112 if (WIFEXITED (status) || WIFSIGNALED (status))
2113 {
aa01bd36 2114 if (report_thread_events
e38504b3 2115 || lp->ptid.pid () == lp->ptid.lwp ())
69dde7dc 2116 {
9327494e 2117 linux_nat_debug_printf ("LWP %d exited.", lp->ptid.pid ());
69dde7dc 2118
aa01bd36 2119 /* If this is the leader exiting, it means the whole
69dde7dc
PA
2120 process is gone. Store the status to report to the
2121 core. Store it in lp->waitstatus, because lp->status
2122 would be ambiguous (W_EXITCODE(0,0) == 0). */
7509b829 2123 lp->waitstatus = host_status_to_waitstatus (status);
69dde7dc
PA
2124 return 0;
2125 }
2126
a9f4bb21 2127 thread_dead = 1;
9327494e 2128 linux_nat_debug_printf ("%s exited.",
e53c95d4 2129 lp->ptid.to_string ().c_str ());
a9f4bb21 2130 }
d6b0e80f
AC
2131 }
2132
2133 if (thread_dead)
2134 {
e26af52f 2135 exit_lwp (lp);
d6b0e80f
AC
2136 return 0;
2137 }
2138
2139 gdb_assert (WIFSTOPPED (status));
8817a6f2 2140 lp->stopped = 1;
d6b0e80f 2141
8784d563
PA
2142 if (lp->must_set_ptrace_flags)
2143 {
5b6d1e4f 2144 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
de0d863e 2145 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2146
e38504b3 2147 linux_enable_event_reporting (lp->ptid.lwp (), options);
8784d563
PA
2148 lp->must_set_ptrace_flags = 0;
2149 }
2150
ca2163eb
PA
2151 /* Handle GNU/Linux's syscall SIGTRAPs. */
2152 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2153 {
2154 /* No longer need the sysgood bit. The ptrace event ends up
2155 recorded in lp->waitstatus if we care for it. We can carry
2156 on handling the event like a regular SIGTRAP from here
2157 on. */
2158 status = W_STOPCODE (SIGTRAP);
2159 if (linux_handle_syscall_trap (lp, 1))
2160 return wait_lwp (lp);
2161 }
bfd09d20
JS
2162 else
2163 {
2164 /* Almost all other ptrace-stops are known to be outside of system
2165 calls, with further exceptions in linux_handle_extended_wait. */
2166 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2167 }
ca2163eb 2168
d6b0e80f 2169 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2170 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2171 && linux_is_extended_waitstatus (status))
d6b0e80f 2172 {
9327494e 2173 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
4dd63d48 2174 linux_handle_extended_wait (lp, status);
20ba1ce6 2175 return 0;
d6b0e80f
AC
2176 }
2177
2178 return status;
2179}
2180
2181/* Send a SIGSTOP to LP. */
2182
2183static int
d3a70e03 2184stop_callback (struct lwp_info *lp)
d6b0e80f
AC
2185{
2186 if (!lp->stopped && !lp->signalled)
2187 {
2188 int ret;
2189
9327494e 2190 linux_nat_debug_printf ("kill %s **<SIGSTOP>**",
e53c95d4 2191 lp->ptid.to_string ().c_str ());
9327494e 2192
d6b0e80f 2193 errno = 0;
e38504b3 2194 ret = kill_lwp (lp->ptid.lwp (), SIGSTOP);
9327494e 2195 linux_nat_debug_printf ("lwp kill %d %s", ret,
d6b0e80f 2196 errno ? safe_strerror (errno) : "ERRNO-OK");
d6b0e80f
AC
2197
2198 lp->signalled = 1;
2199 gdb_assert (lp->status == 0);
2200 }
2201
2202 return 0;
2203}
2204
7b50312a
PA
2205/* Request a stop on LWP. */
2206
2207void
2208linux_stop_lwp (struct lwp_info *lwp)
2209{
d3a70e03 2210 stop_callback (lwp);
7b50312a
PA
2211}
2212
2db9a427
PA
2213/* See linux-nat.h */
2214
2215void
2216linux_stop_and_wait_all_lwps (void)
2217{
2218 /* Stop all LWP's ... */
d3a70e03 2219 iterate_over_lwps (minus_one_ptid, stop_callback);
2db9a427
PA
2220
2221 /* ... and wait until all of them have reported back that
2222 they're no longer running. */
d3a70e03 2223 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
2db9a427
PA
2224}
2225
2226/* See linux-nat.h */
2227
2228void
2229linux_unstop_all_lwps (void)
2230{
2231 iterate_over_lwps (minus_one_ptid,
d3a70e03
TT
2232 [] (struct lwp_info *info)
2233 {
2234 return resume_stopped_resumed_lwps (info, minus_one_ptid);
2235 });
2db9a427
PA
2236}
2237
57380f4e 2238/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2239
2240static int
57380f4e
DJ
2241linux_nat_has_pending_sigint (int pid)
2242{
2243 sigset_t pending, blocked, ignored;
57380f4e
DJ
2244
2245 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2246
2247 if (sigismember (&pending, SIGINT)
2248 && !sigismember (&ignored, SIGINT))
2249 return 1;
2250
2251 return 0;
2252}
2253
2254/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2255
2256static int
d3a70e03 2257set_ignore_sigint (struct lwp_info *lp)
d6b0e80f 2258{
57380f4e
DJ
2259 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2260 flag to consume the next one. */
2261 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2262 && WSTOPSIG (lp->status) == SIGINT)
2263 lp->status = 0;
2264 else
2265 lp->ignore_sigint = 1;
2266
2267 return 0;
2268}
2269
2270/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2271 This function is called after we know the LWP has stopped; if the LWP
2272 stopped before the expected SIGINT was delivered, then it will never have
2273 arrived. Also, if the signal was delivered to a shared queue and consumed
2274 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2275
57380f4e
DJ
2276static void
2277maybe_clear_ignore_sigint (struct lwp_info *lp)
2278{
2279 if (!lp->ignore_sigint)
2280 return;
2281
e38504b3 2282 if (!linux_nat_has_pending_sigint (lp->ptid.lwp ()))
57380f4e 2283 {
9327494e 2284 linux_nat_debug_printf ("Clearing bogus flag for %s",
e53c95d4 2285 lp->ptid.to_string ().c_str ());
57380f4e
DJ
2286 lp->ignore_sigint = 0;
2287 }
2288}
2289
ebec9a0f
PA
2290/* Fetch the possible triggered data watchpoint info and store it in
2291 LP.
2292
2293 On some archs, like x86, that use debug registers to set
2294 watchpoints, it's possible that the way to know which watched
2295 address trapped, is to check the register that is used to select
2296 which address to watch. Problem is, between setting the watchpoint
2297 and reading back which data address trapped, the user may change
2298 the set of watchpoints, and, as a consequence, GDB changes the
2299 debug registers in the inferior. To avoid reading back a stale
2300 stopped-data-address when that happens, we cache in LP the fact
2301 that a watchpoint trapped, and the corresponding data address, as
2302 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2303 registers meanwhile, we have the cached data we can rely on. */
2304
9c02b525
PA
2305static int
2306check_stopped_by_watchpoint (struct lwp_info *lp)
ebec9a0f 2307{
2989a365 2308 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
ebec9a0f
PA
2309 inferior_ptid = lp->ptid;
2310
f6ac5f3d 2311 if (linux_target->low_stopped_by_watchpoint ())
ebec9a0f 2312 {
15c66dd6 2313 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
f6ac5f3d
PA
2314 lp->stopped_data_address_p
2315 = linux_target->low_stopped_data_address (&lp->stopped_data_address);
ebec9a0f
PA
2316 }
2317
15c66dd6 2318 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
9c02b525
PA
2319}
2320
9c02b525 2321/* Returns true if the LWP had stopped for a watchpoint. */
ebec9a0f 2322
57810aa7 2323bool
f6ac5f3d 2324linux_nat_target::stopped_by_watchpoint ()
ebec9a0f
PA
2325{
2326 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2327
2328 gdb_assert (lp != NULL);
2329
15c66dd6 2330 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
ebec9a0f
PA
2331}
2332
57810aa7 2333bool
f6ac5f3d 2334linux_nat_target::stopped_data_address (CORE_ADDR *addr_p)
ebec9a0f
PA
2335{
2336 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2337
2338 gdb_assert (lp != NULL);
2339
2340 *addr_p = lp->stopped_data_address;
2341
2342 return lp->stopped_data_address_p;
2343}
2344
26ab7092
JK
2345/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2346
135340af
PA
2347bool
2348linux_nat_target::low_status_is_event (int status)
26ab7092
JK
2349{
2350 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2351}
2352
57380f4e
DJ
2353/* Wait until LP is stopped. */
2354
2355static int
d3a70e03 2356stop_wait_callback (struct lwp_info *lp)
57380f4e 2357{
5b6d1e4f 2358 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
6c95b8df
PA
2359
2360 /* If this is a vfork parent, bail out, it is not going to report
2361 any SIGSTOP until the vfork is done with. */
2362 if (inf->vfork_child != NULL)
2363 return 0;
2364
d6b0e80f
AC
2365 if (!lp->stopped)
2366 {
2367 int status;
2368
2369 status = wait_lwp (lp);
2370 if (status == 0)
2371 return 0;
2372
57380f4e
DJ
2373 if (lp->ignore_sigint && WIFSTOPPED (status)
2374 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2375 {
57380f4e 2376 lp->ignore_sigint = 0;
d6b0e80f
AC
2377
2378 errno = 0;
e38504b3 2379 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
8817a6f2 2380 lp->stopped = 0;
9327494e
SM
2381 linux_nat_debug_printf
2382 ("PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)",
e53c95d4 2383 lp->ptid.to_string ().c_str (),
9327494e 2384 errno ? safe_strerror (errno) : "OK");
d6b0e80f 2385
d3a70e03 2386 return stop_wait_callback (lp);
d6b0e80f
AC
2387 }
2388
57380f4e
DJ
2389 maybe_clear_ignore_sigint (lp);
2390
d6b0e80f
AC
2391 if (WSTOPSIG (status) != SIGSTOP)
2392 {
e5ef252a 2393 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2394
9327494e 2395 linux_nat_debug_printf ("Pending event %s in %s",
8d06918f 2396 status_to_str ((int) status).c_str (),
e53c95d4 2397 lp->ptid.to_string ().c_str ());
e5ef252a
PA
2398
2399 /* Save the sigtrap event. */
2400 lp->status = status;
e5ef252a 2401 gdb_assert (lp->signalled);
e7ad2f14 2402 save_stop_reason (lp);
d6b0e80f
AC
2403 }
2404 else
2405 {
7010835a 2406 /* We caught the SIGSTOP that we intended to catch. */
e5ef252a 2407
9327494e 2408 linux_nat_debug_printf ("Expected SIGSTOP caught for %s.",
e53c95d4 2409 lp->ptid.to_string ().c_str ());
e5ef252a 2410
d6b0e80f 2411 lp->signalled = 0;
7010835a
AB
2412
2413 /* If we are waiting for this stop so we can report the thread
2414 stopped then we need to record this status. Otherwise, we can
2415 now discard this stop event. */
2416 if (lp->last_resume_kind == resume_stop)
2417 {
2418 lp->status = status;
2419 save_stop_reason (lp);
2420 }
d6b0e80f
AC
2421 }
2422 }
2423
2424 return 0;
2425}
2426
9c02b525
PA
2427/* Return non-zero if LP has a wait status pending. Discard the
2428 pending event and resume the LWP if the event that originally
2429 caused the stop became uninteresting. */
d6b0e80f
AC
2430
2431static int
d3a70e03 2432status_callback (struct lwp_info *lp)
d6b0e80f
AC
2433{
2434 /* Only report a pending wait status if we pretend that this has
2435 indeed been resumed. */
ca2163eb
PA
2436 if (!lp->resumed)
2437 return 0;
2438
eb54c8bf
PA
2439 if (!lwp_status_pending_p (lp))
2440 return 0;
2441
15c66dd6
PA
2442 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2443 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525 2444 {
5b6d1e4f 2445 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
9c02b525
PA
2446 CORE_ADDR pc;
2447 int discard = 0;
2448
9c02b525
PA
2449 pc = regcache_read_pc (regcache);
2450
2451 if (pc != lp->stop_pc)
2452 {
9327494e 2453 linux_nat_debug_printf ("PC of %s changed. was=%s, now=%s",
e53c95d4 2454 lp->ptid.to_string ().c_str (),
9327494e
SM
2455 paddress (target_gdbarch (), lp->stop_pc),
2456 paddress (target_gdbarch (), pc));
9c02b525
PA
2457 discard = 1;
2458 }
faf09f01
PA
2459
2460#if !USE_SIGTRAP_SIGINFO
a01bda52 2461 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
9c02b525 2462 {
9327494e 2463 linux_nat_debug_printf ("previous breakpoint of %s, at %s gone",
e53c95d4 2464 lp->ptid.to_string ().c_str (),
9327494e 2465 paddress (target_gdbarch (), lp->stop_pc));
9c02b525
PA
2466
2467 discard = 1;
2468 }
faf09f01 2469#endif
9c02b525
PA
2470
2471 if (discard)
2472 {
9327494e 2473 linux_nat_debug_printf ("pending event of %s cancelled.",
e53c95d4 2474 lp->ptid.to_string ().c_str ());
9c02b525
PA
2475
2476 lp->status = 0;
2477 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2478 return 0;
2479 }
9c02b525
PA
2480 }
2481
eb54c8bf 2482 return 1;
d6b0e80f
AC
2483}
2484
d6b0e80f
AC
2485/* Count the LWP's that have had events. */
2486
2487static int
d3a70e03 2488count_events_callback (struct lwp_info *lp, int *count)
d6b0e80f 2489{
d6b0e80f
AC
2490 gdb_assert (count != NULL);
2491
9c02b525
PA
2492 /* Select only resumed LWPs that have an event pending. */
2493 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2494 (*count)++;
2495
2496 return 0;
2497}
2498
2499/* Select the LWP (if any) that is currently being single-stepped. */
2500
2501static int
d3a70e03 2502select_singlestep_lwp_callback (struct lwp_info *lp)
d6b0e80f 2503{
25289eb2
PA
2504 if (lp->last_resume_kind == resume_step
2505 && lp->status != 0)
d6b0e80f
AC
2506 return 1;
2507 else
2508 return 0;
2509}
2510
8a99810d
PA
2511/* Returns true if LP has a status pending. */
2512
2513static int
2514lwp_status_pending_p (struct lwp_info *lp)
2515{
2516 /* We check for lp->waitstatus in addition to lp->status, because we
2517 can have pending process exits recorded in lp->status and
2518 W_EXITCODE(0,0) happens to be 0. */
183be222 2519 return lp->status != 0 || lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE;
8a99810d
PA
2520}
2521
b90fc188 2522/* Select the Nth LWP that has had an event. */
d6b0e80f
AC
2523
2524static int
d3a70e03 2525select_event_lwp_callback (struct lwp_info *lp, int *selector)
d6b0e80f 2526{
d6b0e80f
AC
2527 gdb_assert (selector != NULL);
2528
9c02b525
PA
2529 /* Select only resumed LWPs that have an event pending. */
2530 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2531 if ((*selector)-- == 0)
2532 return 1;
2533
2534 return 0;
2535}
2536
e7ad2f14
PA
2537/* Called when the LWP stopped for a signal/trap. If it stopped for a
2538 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2539 and save the result in the LWP's stop_reason field. If it stopped
2540 for a breakpoint, decrement the PC if necessary on the lwp's
2541 architecture. */
9c02b525 2542
e7ad2f14
PA
2543static void
2544save_stop_reason (struct lwp_info *lp)
710151dd 2545{
e7ad2f14
PA
2546 struct regcache *regcache;
2547 struct gdbarch *gdbarch;
515630c5 2548 CORE_ADDR pc;
9c02b525 2549 CORE_ADDR sw_bp_pc;
faf09f01
PA
2550#if USE_SIGTRAP_SIGINFO
2551 siginfo_t siginfo;
2552#endif
9c02b525 2553
e7ad2f14
PA
2554 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
2555 gdb_assert (lp->status != 0);
2556
135340af 2557 if (!linux_target->low_status_is_event (lp->status))
e7ad2f14
PA
2558 return;
2559
5b6d1e4f 2560 regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 2561 gdbarch = regcache->arch ();
e7ad2f14 2562
9c02b525 2563 pc = regcache_read_pc (regcache);
527a273a 2564 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
515630c5 2565
faf09f01
PA
2566#if USE_SIGTRAP_SIGINFO
2567 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2568 {
2569 if (siginfo.si_signo == SIGTRAP)
2570 {
e7ad2f14
PA
2571 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
2572 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2573 {
e7ad2f14
PA
2574 /* The si_code is ambiguous on this arch -- check debug
2575 registers. */
2576 if (!check_stopped_by_watchpoint (lp))
2577 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2578 }
2579 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
2580 {
2581 /* If we determine the LWP stopped for a SW breakpoint,
2582 trust it. Particularly don't check watchpoint
7da6a5b9 2583 registers, because, at least on s390, we'd find
e7ad2f14
PA
2584 stopped-by-watchpoint as long as there's a watchpoint
2585 set. */
faf09f01 2586 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
faf09f01 2587 }
e7ad2f14 2588 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2589 {
e7ad2f14
PA
2590 /* This can indicate either a hardware breakpoint or
2591 hardware watchpoint. Check debug registers. */
2592 if (!check_stopped_by_watchpoint (lp))
2593 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
faf09f01 2594 }
2bf6fb9d
PA
2595 else if (siginfo.si_code == TRAP_TRACE)
2596 {
9327494e 2597 linux_nat_debug_printf ("%s stopped by trace",
e53c95d4 2598 lp->ptid.to_string ().c_str ());
e7ad2f14
PA
2599
2600 /* We may have single stepped an instruction that
2601 triggered a watchpoint. In that case, on some
2602 architectures (such as x86), instead of TRAP_HWBKPT,
2603 si_code indicates TRAP_TRACE, and we need to check
2604 the debug registers separately. */
2605 check_stopped_by_watchpoint (lp);
2bf6fb9d 2606 }
faf09f01
PA
2607 }
2608 }
2609#else
9c02b525 2610 if ((!lp->step || lp->stop_pc == sw_bp_pc)
a01bda52 2611 && software_breakpoint_inserted_here_p (regcache->aspace (),
9c02b525 2612 sw_bp_pc))
710151dd 2613 {
9c02b525
PA
2614 /* The LWP was either continued, or stepped a software
2615 breakpoint instruction. */
e7ad2f14
PA
2616 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2617 }
2618
a01bda52 2619 if (hardware_breakpoint_inserted_here_p (regcache->aspace (), pc))
e7ad2f14
PA
2620 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2621
2622 if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
2623 check_stopped_by_watchpoint (lp);
2624#endif
2625
2626 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2627 {
9327494e 2628 linux_nat_debug_printf ("%s stopped by software breakpoint",
e53c95d4 2629 lp->ptid.to_string ().c_str ());
710151dd
PA
2630
2631 /* Back up the PC if necessary. */
9c02b525
PA
2632 if (pc != sw_bp_pc)
2633 regcache_write_pc (regcache, sw_bp_pc);
515630c5 2634
e7ad2f14
PA
2635 /* Update this so we record the correct stop PC below. */
2636 pc = sw_bp_pc;
710151dd 2637 }
e7ad2f14 2638 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525 2639 {
9327494e 2640 linux_nat_debug_printf ("%s stopped by hardware breakpoint",
e53c95d4 2641 lp->ptid.to_string ().c_str ());
e7ad2f14
PA
2642 }
2643 else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2644 {
9327494e 2645 linux_nat_debug_printf ("%s stopped by hardware watchpoint",
e53c95d4 2646 lp->ptid.to_string ().c_str ());
9c02b525 2647 }
d6b0e80f 2648
e7ad2f14 2649 lp->stop_pc = pc;
d6b0e80f
AC
2650}
2651
faf09f01
PA
2652
2653/* Returns true if the LWP had stopped for a software breakpoint. */
2654
57810aa7 2655bool
f6ac5f3d 2656linux_nat_target::stopped_by_sw_breakpoint ()
faf09f01
PA
2657{
2658 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2659
2660 gdb_assert (lp != NULL);
2661
2662 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2663}
2664
2665/* Implement the supports_stopped_by_sw_breakpoint method. */
2666
57810aa7 2667bool
f6ac5f3d 2668linux_nat_target::supports_stopped_by_sw_breakpoint ()
faf09f01
PA
2669{
2670 return USE_SIGTRAP_SIGINFO;
2671}
2672
2673/* Returns true if the LWP had stopped for a hardware
2674 breakpoint/watchpoint. */
2675
57810aa7 2676bool
f6ac5f3d 2677linux_nat_target::stopped_by_hw_breakpoint ()
faf09f01
PA
2678{
2679 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2680
2681 gdb_assert (lp != NULL);
2682
2683 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2684}
2685
2686/* Implement the supports_stopped_by_hw_breakpoint method. */
2687
57810aa7 2688bool
f6ac5f3d 2689linux_nat_target::supports_stopped_by_hw_breakpoint ()
faf09f01
PA
2690{
2691 return USE_SIGTRAP_SIGINFO;
2692}
2693
d6b0e80f
AC
2694/* Select one LWP out of those that have events pending. */
2695
2696static void
d90e17a7 2697select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2698{
2699 int num_events = 0;
2700 int random_selector;
9c02b525 2701 struct lwp_info *event_lp = NULL;
d6b0e80f 2702
ac264b3b 2703 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2704 (*orig_lp)->status = *status;
2705
9c02b525
PA
2706 /* In all-stop, give preference to the LWP that is being
2707 single-stepped. There will be at most one, and it will be the
2708 LWP that the core is most interested in. If we didn't do this,
2709 then we'd have to handle pending step SIGTRAPs somehow in case
2710 the core later continues the previously-stepped thread, as
2711 otherwise we'd report the pending SIGTRAP then, and the core, not
2712 having stepped the thread, wouldn't understand what the trap was
2713 for, and therefore would report it to the user as a random
2714 signal. */
fbea99ea 2715 if (!target_is_non_stop_p ())
d6b0e80f 2716 {
d3a70e03 2717 event_lp = iterate_over_lwps (filter, select_singlestep_lwp_callback);
9c02b525
PA
2718 if (event_lp != NULL)
2719 {
9327494e 2720 linux_nat_debug_printf ("Select single-step %s",
e53c95d4 2721 event_lp->ptid.to_string ().c_str ());
9c02b525 2722 }
d6b0e80f 2723 }
9c02b525
PA
2724
2725 if (event_lp == NULL)
d6b0e80f 2726 {
9c02b525 2727 /* Pick one at random, out of those which have had events. */
d6b0e80f 2728
9c02b525 2729 /* First see how many events we have. */
d3a70e03
TT
2730 iterate_over_lwps (filter,
2731 [&] (struct lwp_info *info)
2732 {
2733 return count_events_callback (info, &num_events);
2734 });
8bf3b159 2735 gdb_assert (num_events > 0);
d6b0e80f 2736
9c02b525
PA
2737 /* Now randomly pick a LWP out of those that have had
2738 events. */
d6b0e80f
AC
2739 random_selector = (int)
2740 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2741
9327494e
SM
2742 if (num_events > 1)
2743 linux_nat_debug_printf ("Found %d events, selecting #%d",
2744 num_events, random_selector);
d6b0e80f 2745
d3a70e03
TT
2746 event_lp
2747 = (iterate_over_lwps
2748 (filter,
2749 [&] (struct lwp_info *info)
2750 {
2751 return select_event_lwp_callback (info,
2752 &random_selector);
2753 }));
d6b0e80f
AC
2754 }
2755
2756 if (event_lp != NULL)
2757 {
2758 /* Switch the event LWP. */
2759 *orig_lp = event_lp;
2760 *status = event_lp->status;
2761 }
2762
2763 /* Flush the wait status for the event LWP. */
2764 (*orig_lp)->status = 0;
2765}
2766
2767/* Return non-zero if LP has been resumed. */
2768
2769static int
d3a70e03 2770resumed_callback (struct lwp_info *lp)
d6b0e80f
AC
2771{
2772 return lp->resumed;
2773}
2774
02f3fc28 2775/* Check if we should go on and pass this event to common code.
12d9289a 2776
897608ed
SM
2777 If so, save the status to the lwp_info structure associated to LWPID. */
2778
2779static void
9c02b525 2780linux_nat_filter_event (int lwpid, int status)
02f3fc28
PA
2781{
2782 struct lwp_info *lp;
89a5711c 2783 int event = linux_ptrace_get_extended_event (status);
02f3fc28 2784
f2907e49 2785 lp = find_lwp_pid (ptid_t (lwpid));
02f3fc28 2786
1abeb1e9
PA
2787 /* Check for events reported by anything not in our LWP list. */
2788 if (lp == nullptr)
0e5bf2a8 2789 {
1abeb1e9
PA
2790 if (WIFSTOPPED (status))
2791 {
2792 if (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC)
2793 {
2794 /* A non-leader thread exec'ed after we've seen the
2795 leader zombie, and removed it from our lists (in
2796 check_zombie_leaders). The non-leader thread changes
2797 its tid to the tgid. */
2798 linux_nat_debug_printf
2799 ("Re-adding thread group leader LWP %d after exec.",
2800 lwpid);
0e5bf2a8 2801
1abeb1e9
PA
2802 lp = add_lwp (ptid_t (lwpid, lwpid));
2803 lp->stopped = 1;
2804 lp->resumed = 1;
2805 add_thread (linux_target, lp->ptid);
2806 }
2807 else
2808 {
2809 /* A process we are controlling has forked and the new
2810 child's stop was reported to us by the kernel. Save
2811 its PID and go back to waiting for the fork event to
2812 be reported - the stopped process might be returned
2813 from waitpid before or after the fork event is. */
2814 linux_nat_debug_printf
2815 ("Saving LWP %d status %s in stopped_pids list",
2816 lwpid, status_to_str (status).c_str ());
2817 add_to_pid_list (&stopped_pids, lwpid, status);
2818 }
2819 }
2820 else
2821 {
2822 /* Don't report an event for the exit of an LWP not in our
2823 list, i.e. not part of any inferior we're debugging.
2824 This can happen if we detach from a program we originally
6cf20c46
PA
2825 forked and then it exits. However, note that we may have
2826 earlier deleted a leader of an inferior we're debugging,
2827 in check_zombie_leaders. Re-add it back here if so. */
2828 for (inferior *inf : all_inferiors (linux_target))
2829 {
2830 if (inf->pid == lwpid)
2831 {
2832 linux_nat_debug_printf
2833 ("Re-adding thread group leader LWP %d after exit.",
2834 lwpid);
2835
2836 lp = add_lwp (ptid_t (lwpid, lwpid));
2837 lp->resumed = 1;
2838 add_thread (linux_target, lp->ptid);
2839 break;
2840 }
2841 }
1abeb1e9 2842 }
0e5bf2a8 2843
1abeb1e9
PA
2844 if (lp == nullptr)
2845 return;
02f3fc28
PA
2846 }
2847
8817a6f2
PA
2848 /* This LWP is stopped now. (And if dead, this prevents it from
2849 ever being continued.) */
2850 lp->stopped = 1;
2851
8784d563
PA
2852 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2853 {
5b6d1e4f 2854 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
de0d863e 2855 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2856
e38504b3 2857 linux_enable_event_reporting (lp->ptid.lwp (), options);
8784d563
PA
2858 lp->must_set_ptrace_flags = 0;
2859 }
2860
ca2163eb
PA
2861 /* Handle GNU/Linux's syscall SIGTRAPs. */
2862 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2863 {
2864 /* No longer need the sysgood bit. The ptrace event ends up
2865 recorded in lp->waitstatus if we care for it. We can carry
2866 on handling the event like a regular SIGTRAP from here
2867 on. */
2868 status = W_STOPCODE (SIGTRAP);
2869 if (linux_handle_syscall_trap (lp, 0))
897608ed 2870 return;
ca2163eb 2871 }
bfd09d20
JS
2872 else
2873 {
2874 /* Almost all other ptrace-stops are known to be outside of system
2875 calls, with further exceptions in linux_handle_extended_wait. */
2876 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2877 }
02f3fc28 2878
ca2163eb 2879 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2880 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2881 && linux_is_extended_waitstatus (status))
02f3fc28 2882 {
9327494e
SM
2883 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
2884
4dd63d48 2885 if (linux_handle_extended_wait (lp, status))
897608ed 2886 return;
02f3fc28
PA
2887 }
2888
2889 /* Check if the thread has exited. */
9c02b525
PA
2890 if (WIFEXITED (status) || WIFSIGNALED (status))
2891 {
6cf20c46 2892 if (!report_thread_events && !is_leader (lp))
02f3fc28 2893 {
9327494e 2894 linux_nat_debug_printf ("%s exited.",
e53c95d4 2895 lp->ptid.to_string ().c_str ());
9c02b525 2896
6cf20c46 2897 /* If this was not the leader exiting, then the exit signal
4a6ed09b
PA
2898 was not the end of the debugged application and should be
2899 ignored. */
2900 exit_lwp (lp);
897608ed 2901 return;
02f3fc28
PA
2902 }
2903
77598427
PA
2904 /* Note that even if the leader was ptrace-stopped, it can still
2905 exit, if e.g., some other thread brings down the whole
2906 process (calls `exit'). So don't assert that the lwp is
2907 resumed. */
9327494e
SM
2908 linux_nat_debug_printf ("LWP %ld exited (resumed=%d)",
2909 lp->ptid.lwp (), lp->resumed);
02f3fc28 2910
9c02b525
PA
2911 /* Dead LWP's aren't expected to reported a pending sigstop. */
2912 lp->signalled = 0;
2913
2914 /* Store the pending event in the waitstatus, because
2915 W_EXITCODE(0,0) == 0. */
7509b829 2916 lp->waitstatus = host_status_to_waitstatus (status);
897608ed 2917 return;
02f3fc28
PA
2918 }
2919
02f3fc28
PA
2920 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2921 an attempt to stop an LWP. */
2922 if (lp->signalled
2923 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2924 {
02f3fc28
PA
2925 lp->signalled = 0;
2926
2bf6fb9d 2927 if (lp->last_resume_kind == resume_stop)
25289eb2 2928 {
9327494e 2929 linux_nat_debug_printf ("resume_stop SIGSTOP caught for %s.",
e53c95d4 2930 lp->ptid.to_string ().c_str ());
2bf6fb9d
PA
2931 }
2932 else
2933 {
2934 /* This is a delayed SIGSTOP. Filter out the event. */
02f3fc28 2935
9327494e
SM
2936 linux_nat_debug_printf
2937 ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2938 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 2939 lp->ptid.to_string ().c_str ());
02f3fc28 2940
2bf6fb9d 2941 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
25289eb2 2942 gdb_assert (lp->resumed);
897608ed 2943 return;
25289eb2 2944 }
02f3fc28
PA
2945 }
2946
57380f4e
DJ
2947 /* Make sure we don't report a SIGINT that we have already displayed
2948 for another thread. */
2949 if (lp->ignore_sigint
2950 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
2951 {
9327494e 2952 linux_nat_debug_printf ("Delayed SIGINT caught for %s.",
e53c95d4 2953 lp->ptid.to_string ().c_str ());
57380f4e
DJ
2954
2955 /* This is a delayed SIGINT. */
2956 lp->ignore_sigint = 0;
2957
8a99810d 2958 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
9327494e
SM
2959 linux_nat_debug_printf ("%s %s, 0, 0 (discard SIGINT)",
2960 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 2961 lp->ptid.to_string ().c_str ());
57380f4e
DJ
2962 gdb_assert (lp->resumed);
2963
2964 /* Discard the event. */
897608ed 2965 return;
57380f4e
DJ
2966 }
2967
9c02b525
PA
2968 /* Don't report signals that GDB isn't interested in, such as
2969 signals that are neither printed nor stopped upon. Stopping all
7da6a5b9 2970 threads can be a bit time-consuming, so if we want decent
9c02b525
PA
2971 performance with heavily multi-threaded programs, especially when
2972 they're using a high frequency timer, we'd better avoid it if we
2973 can. */
2974 if (WIFSTOPPED (status))
2975 {
2976 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
2977
fbea99ea 2978 if (!target_is_non_stop_p ())
9c02b525
PA
2979 {
2980 /* Only do the below in all-stop, as we currently use SIGSTOP
2981 to implement target_stop (see linux_nat_stop) in
2982 non-stop. */
2983 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
2984 {
2985 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2986 forwarded to the entire process group, that is, all LWPs
2987 will receive it - unless they're using CLONE_THREAD to
2988 share signals. Since we only want to report it once, we
2989 mark it as ignored for all LWPs except this one. */
d3a70e03 2990 iterate_over_lwps (ptid_t (lp->ptid.pid ()), set_ignore_sigint);
9c02b525
PA
2991 lp->ignore_sigint = 0;
2992 }
2993 else
2994 maybe_clear_ignore_sigint (lp);
2995 }
2996
2997 /* When using hardware single-step, we need to report every signal.
c9587f88 2998 Otherwise, signals in pass_mask may be short-circuited
d8c06f22
AB
2999 except signals that might be caused by a breakpoint, or SIGSTOP
3000 if we sent the SIGSTOP and are waiting for it to arrive. */
9c02b525 3001 if (!lp->step
c9587f88 3002 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
d8c06f22 3003 && (WSTOPSIG (status) != SIGSTOP
5b6d1e4f 3004 || !find_thread_ptid (linux_target, lp->ptid)->stop_requested)
c9587f88 3005 && !linux_wstatus_maybe_breakpoint (status))
9c02b525
PA
3006 {
3007 linux_resume_one_lwp (lp, lp->step, signo);
9327494e
SM
3008 linux_nat_debug_printf
3009 ("%s %s, %s (preempt 'handle')",
3010 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
e53c95d4 3011 lp->ptid.to_string ().c_str (),
9327494e
SM
3012 (signo != GDB_SIGNAL_0
3013 ? strsignal (gdb_signal_to_host (signo)) : "0"));
897608ed 3014 return;
9c02b525
PA
3015 }
3016 }
3017
02f3fc28
PA
3018 /* An interesting event. */
3019 gdb_assert (lp);
ca2163eb 3020 lp->status = status;
e7ad2f14 3021 save_stop_reason (lp);
02f3fc28
PA
3022}
3023
0e5bf2a8
PA
3024/* Detect zombie thread group leaders, and "exit" them. We can't reap
3025 their exits until all other threads in the group have exited. */
3026
3027static void
3028check_zombie_leaders (void)
3029{
08036331 3030 for (inferior *inf : all_inferiors ())
0e5bf2a8
PA
3031 {
3032 struct lwp_info *leader_lp;
3033
3034 if (inf->pid == 0)
3035 continue;
3036
f2907e49 3037 leader_lp = find_lwp_pid (ptid_t (inf->pid));
0e5bf2a8
PA
3038 if (leader_lp != NULL
3039 /* Check if there are other threads in the group, as we may
6cf20c46
PA
3040 have raced with the inferior simply exiting. Note this
3041 isn't a watertight check. If the inferior is
3042 multi-threaded and is exiting, it may be we see the
3043 leader as zombie before we reap all the non-leader
3044 threads. See comments below. */
0e5bf2a8 3045 && num_lwps (inf->pid) > 1
5f572dec 3046 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8 3047 {
6cf20c46
PA
3048 /* A zombie leader in a multi-threaded program can mean one
3049 of three things:
3050
3051 #1 - Only the leader exited, not the whole program, e.g.,
3052 with pthread_exit. Since we can't reap the leader's exit
3053 status until all other threads are gone and reaped too,
3054 we want to delete the zombie leader right away, as it
3055 can't be debugged, we can't read its registers, etc.
3056 This is the main reason we check for zombie leaders
3057 disappearing.
3058
3059 #2 - The whole thread-group/process exited (a group exit,
3060 via e.g. exit(3), and there is (or will be shortly) an
3061 exit reported for each thread in the process, and then
3062 finally an exit for the leader once the non-leaders are
3063 reaped.
3064
3065 #3 - There are 3 or more threads in the group, and a
3066 thread other than the leader exec'd. See comments on
3067 exec events at the top of the file.
3068
3069 Ideally we would never delete the leader for case #2.
3070 Instead, we want to collect the exit status of each
3071 non-leader thread, and then finally collect the exit
3072 status of the leader as normal and use its exit code as
3073 whole-process exit code. Unfortunately, there's no
3074 race-free way to distinguish cases #1 and #2. We can't
3075 assume the exit events for the non-leaders threads are
3076 already pending in the kernel, nor can we assume the
3077 non-leader threads are in zombie state already. Between
3078 the leader becoming zombie and the non-leaders exiting
3079 and becoming zombie themselves, there's a small time
3080 window, so such a check would be racy. Temporarily
3081 pausing all threads and checking to see if all threads
3082 exit or not before re-resuming them would work in the
3083 case that all threads are running right now, but it
3084 wouldn't work if some thread is currently already
3085 ptrace-stopped, e.g., due to scheduler-locking.
3086
3087 So what we do is we delete the leader anyhow, and then
3088 later on when we see its exit status, we re-add it back.
3089 We also make sure that we only report a whole-process
3090 exit when we see the leader exiting, as opposed to when
3091 the last LWP in the LWP list exits, which can be a
3092 non-leader if we deleted the leader here. */
9327494e 3093 linux_nat_debug_printf ("Thread group leader %d zombie "
6cf20c46
PA
3094 "(it exited, or another thread execd), "
3095 "deleting it.",
9327494e 3096 inf->pid);
0e5bf2a8
PA
3097 exit_lwp (leader_lp);
3098 }
3099 }
3100}
3101
aa01bd36
PA
3102/* Convenience function that is called when the kernel reports an exit
3103 event. This decides whether to report the event to GDB as a
3104 process exit event, a thread exit event, or to suppress the
3105 event. */
3106
3107static ptid_t
3108filter_exit_event (struct lwp_info *event_child,
3109 struct target_waitstatus *ourstatus)
3110{
3111 ptid_t ptid = event_child->ptid;
3112
6cf20c46 3113 if (!is_leader (event_child))
aa01bd36
PA
3114 {
3115 if (report_thread_events)
183be222 3116 ourstatus->set_thread_exited (0);
aa01bd36 3117 else
183be222 3118 ourstatus->set_ignore ();
aa01bd36
PA
3119
3120 exit_lwp (event_child);
3121 }
3122
3123 return ptid;
3124}
3125
d6b0e80f 3126static ptid_t
f6ac5f3d 3127linux_nat_wait_1 (ptid_t ptid, struct target_waitstatus *ourstatus,
b60cea74 3128 target_wait_flags target_options)
d6b0e80f 3129{
fc9b8e47 3130 sigset_t prev_mask;
4b60df3d 3131 enum resume_kind last_resume_kind;
12d9289a 3132 struct lwp_info *lp;
12d9289a 3133 int status;
d6b0e80f 3134
9327494e 3135 linux_nat_debug_printf ("enter");
b84876c2 3136
f973ed9c
DJ
3137 /* The first time we get here after starting a new inferior, we may
3138 not have added it to the LWP list yet - this is the earliest
3139 moment at which we know its PID. */
677c92fe 3140 if (ptid.is_pid () && find_lwp_pid (ptid) == nullptr)
f973ed9c 3141 {
677c92fe 3142 ptid_t lwp_ptid (ptid.pid (), ptid.pid ());
27c9d204 3143
677c92fe
SM
3144 /* Upgrade the main thread's ptid. */
3145 thread_change_ptid (linux_target, ptid, lwp_ptid);
3146 lp = add_initial_lwp (lwp_ptid);
f973ed9c
DJ
3147 lp->resumed = 1;
3148 }
3149
12696c10 3150 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
7feb7d06 3151 block_child_signals (&prev_mask);
d6b0e80f 3152
d6b0e80f 3153 /* First check if there is a LWP with a wait status pending. */
d3a70e03 3154 lp = iterate_over_lwps (ptid, status_callback);
8a99810d 3155 if (lp != NULL)
d6b0e80f 3156 {
9327494e 3157 linux_nat_debug_printf ("Using pending wait status %s for %s.",
8d06918f 3158 status_to_str (lp->status).c_str (),
e53c95d4 3159 lp->ptid.to_string ().c_str ());
d6b0e80f
AC
3160 }
3161
9c02b525
PA
3162 /* But if we don't find a pending event, we'll have to wait. Always
3163 pull all events out of the kernel. We'll randomly select an
3164 event LWP out of all that have events, to prevent starvation. */
7feb7d06 3165
d90e17a7 3166 while (lp == NULL)
d6b0e80f
AC
3167 {
3168 pid_t lwpid;
3169
0e5bf2a8
PA
3170 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3171 quirks:
3172
3173 - If the thread group leader exits while other threads in the
3174 thread group still exist, waitpid(TGID, ...) hangs. That
3175 waitpid won't return an exit status until the other threads
85102364 3176 in the group are reaped.
0e5bf2a8
PA
3177
3178 - When a non-leader thread execs, that thread just vanishes
3179 without reporting an exit (so we'd hang if we waited for it
3180 explicitly in that case). The exec event is reported to
3181 the TGID pid. */
3182
3183 errno = 0;
4a6ed09b 3184 lwpid = my_waitpid (-1, &status, __WALL | WNOHANG);
0e5bf2a8 3185
9327494e
SM
3186 linux_nat_debug_printf ("waitpid(-1, ...) returned %d, %s",
3187 lwpid,
3188 errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3189
d6b0e80f
AC
3190 if (lwpid > 0)
3191 {
9327494e 3192 linux_nat_debug_printf ("waitpid %ld received %s",
8d06918f
SM
3193 (long) lwpid,
3194 status_to_str (status).c_str ());
d6b0e80f 3195
9c02b525 3196 linux_nat_filter_event (lwpid, status);
0e5bf2a8
PA
3197 /* Retry until nothing comes out of waitpid. A single
3198 SIGCHLD can indicate more than one child stopped. */
3199 continue;
d6b0e80f
AC
3200 }
3201
20ba1ce6
PA
3202 /* Now that we've pulled all events out of the kernel, resume
3203 LWPs that don't have an interesting event to report. */
3204 iterate_over_lwps (minus_one_ptid,
d3a70e03
TT
3205 [] (struct lwp_info *info)
3206 {
3207 return resume_stopped_resumed_lwps (info, minus_one_ptid);
3208 });
20ba1ce6
PA
3209
3210 /* ... and find an LWP with a status to report to the core, if
3211 any. */
d3a70e03 3212 lp = iterate_over_lwps (ptid, status_callback);
9c02b525
PA
3213 if (lp != NULL)
3214 break;
3215
0e5bf2a8
PA
3216 /* Check for zombie thread group leaders. Those can't be reaped
3217 until all other threads in the thread group are. */
3218 check_zombie_leaders ();
d6b0e80f 3219
0e5bf2a8
PA
3220 /* If there are no resumed children left, bail. We'd be stuck
3221 forever in the sigsuspend call below otherwise. */
d3a70e03 3222 if (iterate_over_lwps (ptid, resumed_callback) == NULL)
0e5bf2a8 3223 {
9327494e 3224 linux_nat_debug_printf ("exit (no resumed LWP)");
b84876c2 3225
183be222 3226 ourstatus->set_no_resumed ();
b84876c2 3227
0e5bf2a8
PA
3228 restore_child_signals_mask (&prev_mask);
3229 return minus_one_ptid;
d6b0e80f 3230 }
28736962 3231
0e5bf2a8
PA
3232 /* No interesting event to report to the core. */
3233
3234 if (target_options & TARGET_WNOHANG)
3235 {
9327494e 3236 linux_nat_debug_printf ("exit (ignore)");
28736962 3237
183be222 3238 ourstatus->set_ignore ();
28736962
PA
3239 restore_child_signals_mask (&prev_mask);
3240 return minus_one_ptid;
3241 }
d6b0e80f
AC
3242
3243 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3244 gdb_assert (lp == NULL);
0e5bf2a8
PA
3245
3246 /* Block until we get an event reported with SIGCHLD. */
9c3a5d93 3247 wait_for_signal ();
d6b0e80f
AC
3248 }
3249
d6b0e80f
AC
3250 gdb_assert (lp);
3251
ca2163eb
PA
3252 status = lp->status;
3253 lp->status = 0;
3254
fbea99ea 3255 if (!target_is_non_stop_p ())
4c28f408
PA
3256 {
3257 /* Now stop all other LWP's ... */
d3a70e03 3258 iterate_over_lwps (minus_one_ptid, stop_callback);
4c28f408
PA
3259
3260 /* ... and wait until all of them have reported back that
3261 they're no longer running. */
d3a70e03 3262 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
9c02b525
PA
3263 }
3264
3265 /* If we're not waiting for a specific LWP, choose an event LWP from
3266 among those that have had events. Giving equal priority to all
3267 LWPs that have had events helps prevent starvation. */
d7e15655 3268 if (ptid == minus_one_ptid || ptid.is_pid ())
9c02b525
PA
3269 select_event_lwp (ptid, &lp, &status);
3270
3271 gdb_assert (lp != NULL);
3272
3273 /* Now that we've selected our final event LWP, un-adjust its PC if
faf09f01
PA
3274 it was a software breakpoint, and we can't reliably support the
3275 "stopped by software breakpoint" stop reason. */
3276 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3277 && !USE_SIGTRAP_SIGINFO)
9c02b525 3278 {
5b6d1e4f 3279 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 3280 struct gdbarch *gdbarch = regcache->arch ();
527a273a 3281 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4c28f408 3282
9c02b525
PA
3283 if (decr_pc != 0)
3284 {
3285 CORE_ADDR pc;
d6b0e80f 3286
9c02b525
PA
3287 pc = regcache_read_pc (regcache);
3288 regcache_write_pc (regcache, pc + decr_pc);
3289 }
3290 }
e3e9f5a2 3291
9c02b525
PA
3292 /* We'll need this to determine whether to report a SIGSTOP as
3293 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3294 clears it. */
3295 last_resume_kind = lp->last_resume_kind;
4b60df3d 3296
fbea99ea 3297 if (!target_is_non_stop_p ())
9c02b525 3298 {
e3e9f5a2
PA
3299 /* In all-stop, from the core's perspective, all LWPs are now
3300 stopped until a new resume action is sent over. */
d3a70e03 3301 iterate_over_lwps (minus_one_ptid, resume_clear_callback);
e3e9f5a2
PA
3302 }
3303 else
25289eb2 3304 {
d3a70e03 3305 resume_clear_callback (lp);
25289eb2 3306 }
d6b0e80f 3307
135340af 3308 if (linux_target->low_status_is_event (status))
d6b0e80f 3309 {
9327494e 3310 linux_nat_debug_printf ("trap ptid is %s.",
e53c95d4 3311 lp->ptid.to_string ().c_str ());
d6b0e80f 3312 }
d6b0e80f 3313
183be222 3314 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
d6b0e80f
AC
3315 {
3316 *ourstatus = lp->waitstatus;
183be222 3317 lp->waitstatus.set_ignore ();
d6b0e80f
AC
3318 }
3319 else
7509b829 3320 *ourstatus = host_status_to_waitstatus (status);
d6b0e80f 3321
9327494e 3322 linux_nat_debug_printf ("exit");
b84876c2 3323
7feb7d06 3324 restore_child_signals_mask (&prev_mask);
1e225492 3325
4b60df3d 3326 if (last_resume_kind == resume_stop
183be222 3327 && ourstatus->kind () == TARGET_WAITKIND_STOPPED
25289eb2
PA
3328 && WSTOPSIG (status) == SIGSTOP)
3329 {
3330 /* A thread that has been requested to stop by GDB with
3331 target_stop, and it stopped cleanly, so report as SIG0. The
3332 use of SIGSTOP is an implementation detail. */
183be222 3333 ourstatus->set_stopped (GDB_SIGNAL_0);
25289eb2
PA
3334 }
3335
183be222
SM
3336 if (ourstatus->kind () == TARGET_WAITKIND_EXITED
3337 || ourstatus->kind () == TARGET_WAITKIND_SIGNALLED)
1e225492
JK
3338 lp->core = -1;
3339 else
2e794194 3340 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3341
183be222 3342 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
aa01bd36
PA
3343 return filter_exit_event (lp, ourstatus);
3344
f973ed9c 3345 return lp->ptid;
d6b0e80f
AC
3346}
3347
e3e9f5a2
PA
3348/* Resume LWPs that are currently stopped without any pending status
3349 to report, but are resumed from the core's perspective. */
3350
3351static int
d3a70e03 3352resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid)
e3e9f5a2 3353{
4dd63d48
PA
3354 if (!lp->stopped)
3355 {
9327494e 3356 linux_nat_debug_printf ("NOT resuming LWP %s, not stopped",
e53c95d4 3357 lp->ptid.to_string ().c_str ());
4dd63d48
PA
3358 }
3359 else if (!lp->resumed)
3360 {
9327494e 3361 linux_nat_debug_printf ("NOT resuming LWP %s, not resumed",
e53c95d4 3362 lp->ptid.to_string ().c_str ());
4dd63d48
PA
3363 }
3364 else if (lwp_status_pending_p (lp))
3365 {
9327494e 3366 linux_nat_debug_printf ("NOT resuming LWP %s, has pending status",
e53c95d4 3367 lp->ptid.to_string ().c_str ());
4dd63d48
PA
3368 }
3369 else
e3e9f5a2 3370 {
5b6d1e4f 3371 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 3372 struct gdbarch *gdbarch = regcache->arch ();
336060f3 3373
a70b8144 3374 try
e3e9f5a2 3375 {
23f238d3
PA
3376 CORE_ADDR pc = regcache_read_pc (regcache);
3377 int leave_stopped = 0;
e3e9f5a2 3378
23f238d3
PA
3379 /* Don't bother if there's a breakpoint at PC that we'd hit
3380 immediately, and we're not waiting for this LWP. */
d3a70e03 3381 if (!lp->ptid.matches (wait_ptid))
23f238d3 3382 {
a01bda52 3383 if (breakpoint_inserted_here_p (regcache->aspace (), pc))
23f238d3
PA
3384 leave_stopped = 1;
3385 }
e3e9f5a2 3386
23f238d3
PA
3387 if (!leave_stopped)
3388 {
9327494e
SM
3389 linux_nat_debug_printf
3390 ("resuming stopped-resumed LWP %s at %s: step=%d",
e53c95d4 3391 lp->ptid.to_string ().c_str (), paddress (gdbarch, pc),
9327494e 3392 lp->step);
23f238d3
PA
3393
3394 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3395 }
3396 }
230d2906 3397 catch (const gdb_exception_error &ex)
23f238d3
PA
3398 {
3399 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 3400 throw;
23f238d3 3401 }
e3e9f5a2
PA
3402 }
3403
3404 return 0;
3405}
3406
f6ac5f3d
PA
3407ptid_t
3408linux_nat_target::wait (ptid_t ptid, struct target_waitstatus *ourstatus,
b60cea74 3409 target_wait_flags target_options)
7feb7d06
PA
3410{
3411 ptid_t event_ptid;
3412
e53c95d4 3413 linux_nat_debug_printf ("[%s], [%s]", ptid.to_string ().c_str (),
9327494e 3414 target_options_to_string (target_options).c_str ());
7feb7d06
PA
3415
3416 /* Flush the async file first. */
d9d41e78 3417 if (target_is_async_p ())
7feb7d06
PA
3418 async_file_flush ();
3419
e3e9f5a2
PA
3420 /* Resume LWPs that are currently stopped without any pending status
3421 to report, but are resumed from the core's perspective. LWPs get
3422 in this state if we find them stopping at a time we're not
3423 interested in reporting the event (target_wait on a
3424 specific_process, for example, see linux_nat_wait_1), and
3425 meanwhile the event became uninteresting. Don't bother resuming
3426 LWPs we're not going to wait for if they'd stop immediately. */
fbea99ea 3427 if (target_is_non_stop_p ())
d3a70e03
TT
3428 iterate_over_lwps (minus_one_ptid,
3429 [=] (struct lwp_info *info)
3430 {
3431 return resume_stopped_resumed_lwps (info, ptid);
3432 });
e3e9f5a2 3433
f6ac5f3d 3434 event_ptid = linux_nat_wait_1 (ptid, ourstatus, target_options);
7feb7d06
PA
3435
3436 /* If we requested any event, and something came out, assume there
3437 may be more. If we requested a specific lwp or process, also
3438 assume there may be more. */
d9d41e78 3439 if (target_is_async_p ()
183be222
SM
3440 && ((ourstatus->kind () != TARGET_WAITKIND_IGNORE
3441 && ourstatus->kind () != TARGET_WAITKIND_NO_RESUMED)
d7e15655 3442 || ptid != minus_one_ptid))
7feb7d06
PA
3443 async_file_mark ();
3444
7feb7d06
PA
3445 return event_ptid;
3446}
3447
1d2736d4
PA
3448/* Kill one LWP. */
3449
3450static void
3451kill_one_lwp (pid_t pid)
d6b0e80f 3452{
ed731959
JK
3453 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3454
3455 errno = 0;
1d2736d4 3456 kill_lwp (pid, SIGKILL);
9327494e 3457
ed731959 3458 if (debug_linux_nat)
57745c90
PA
3459 {
3460 int save_errno = errno;
3461
9327494e
SM
3462 linux_nat_debug_printf
3463 ("kill (SIGKILL) %ld, 0, 0 (%s)", (long) pid,
3464 save_errno != 0 ? safe_strerror (save_errno) : "OK");
57745c90 3465 }
ed731959
JK
3466
3467 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3468
d6b0e80f 3469 errno = 0;
1d2736d4 3470 ptrace (PTRACE_KILL, pid, 0, 0);
d6b0e80f 3471 if (debug_linux_nat)
57745c90
PA
3472 {
3473 int save_errno = errno;
3474
9327494e
SM
3475 linux_nat_debug_printf
3476 ("PTRACE_KILL %ld, 0, 0 (%s)", (long) pid,
3477 save_errno ? safe_strerror (save_errno) : "OK");
57745c90 3478 }
d6b0e80f
AC
3479}
3480
1d2736d4
PA
3481/* Wait for an LWP to die. */
3482
3483static void
3484kill_wait_one_lwp (pid_t pid)
d6b0e80f 3485{
1d2736d4 3486 pid_t res;
d6b0e80f
AC
3487
3488 /* We must make sure that there are no pending events (delayed
3489 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3490 program doesn't interfere with any following debugging session. */
3491
d6b0e80f
AC
3492 do
3493 {
1d2736d4
PA
3494 res = my_waitpid (pid, NULL, __WALL);
3495 if (res != (pid_t) -1)
d6b0e80f 3496 {
9327494e
SM
3497 linux_nat_debug_printf ("wait %ld received unknown.", (long) pid);
3498
4a6ed09b
PA
3499 /* The Linux kernel sometimes fails to kill a thread
3500 completely after PTRACE_KILL; that goes from the stop
3501 point in do_fork out to the one in get_signal_to_deliver
3502 and waits again. So kill it again. */
1d2736d4 3503 kill_one_lwp (pid);
d6b0e80f
AC
3504 }
3505 }
1d2736d4
PA
3506 while (res == pid);
3507
3508 gdb_assert (res == -1 && errno == ECHILD);
3509}
3510
3511/* Callback for iterate_over_lwps. */
d6b0e80f 3512
1d2736d4 3513static int
d3a70e03 3514kill_callback (struct lwp_info *lp)
1d2736d4 3515{
e38504b3 3516 kill_one_lwp (lp->ptid.lwp ());
d6b0e80f
AC
3517 return 0;
3518}
3519
1d2736d4
PA
3520/* Callback for iterate_over_lwps. */
3521
3522static int
d3a70e03 3523kill_wait_callback (struct lwp_info *lp)
1d2736d4 3524{
e38504b3 3525 kill_wait_one_lwp (lp->ptid.lwp ());
1d2736d4
PA
3526 return 0;
3527}
3528
3529/* Kill the fork children of any threads of inferior INF that are
3530 stopped at a fork event. */
3531
3532static void
3533kill_unfollowed_fork_children (struct inferior *inf)
3534{
08036331
PA
3535 for (thread_info *thread : inf->non_exited_threads ())
3536 {
3537 struct target_waitstatus *ws = &thread->pending_follow;
1d2736d4 3538
183be222
SM
3539 if (ws->kind () == TARGET_WAITKIND_FORKED
3540 || ws->kind () == TARGET_WAITKIND_VFORKED)
08036331 3541 {
183be222 3542 ptid_t child_ptid = ws->child_ptid ();
08036331
PA
3543 int child_pid = child_ptid.pid ();
3544 int child_lwp = child_ptid.lwp ();
3545
3546 kill_one_lwp (child_lwp);
3547 kill_wait_one_lwp (child_lwp);
3548
3549 /* Let the arch-specific native code know this process is
3550 gone. */
3551 linux_target->low_forget_process (child_pid);
3552 }
3553 }
1d2736d4
PA
3554}
3555
f6ac5f3d
PA
3556void
3557linux_nat_target::kill ()
d6b0e80f 3558{
f973ed9c
DJ
3559 /* If we're stopped while forking and we haven't followed yet,
3560 kill the other task. We need to do this first because the
3561 parent will be sleeping if this is a vfork. */
1d2736d4 3562 kill_unfollowed_fork_children (current_inferior ());
f973ed9c
DJ
3563
3564 if (forks_exist_p ())
7feb7d06 3565 linux_fork_killall ();
f973ed9c
DJ
3566 else
3567 {
e99b03dc 3568 ptid_t ptid = ptid_t (inferior_ptid.pid ());
e0881a8e 3569
4c28f408 3570 /* Stop all threads before killing them, since ptrace requires
30baf67b 3571 that the thread is stopped to successfully PTRACE_KILL. */
d3a70e03 3572 iterate_over_lwps (ptid, stop_callback);
4c28f408
PA
3573 /* ... and wait until all of them have reported back that
3574 they're no longer running. */
d3a70e03 3575 iterate_over_lwps (ptid, stop_wait_callback);
4c28f408 3576
f973ed9c 3577 /* Kill all LWP's ... */
d3a70e03 3578 iterate_over_lwps (ptid, kill_callback);
f973ed9c
DJ
3579
3580 /* ... and wait until we've flushed all events. */
d3a70e03 3581 iterate_over_lwps (ptid, kill_wait_callback);
f973ed9c
DJ
3582 }
3583
bc1e6c81 3584 target_mourn_inferior (inferior_ptid);
d6b0e80f
AC
3585}
3586
f6ac5f3d
PA
3587void
3588linux_nat_target::mourn_inferior ()
d6b0e80f 3589{
e99b03dc 3590 int pid = inferior_ptid.pid ();
26cb8b7c
PA
3591
3592 purge_lwp_list (pid);
d6b0e80f 3593
8a89ddbd 3594 close_proc_mem_file (pid);
05c06f31 3595
f973ed9c 3596 if (! forks_exist_p ())
d90e17a7 3597 /* Normal case, no other forks available. */
f6ac5f3d 3598 inf_ptrace_target::mourn_inferior ();
f973ed9c
DJ
3599 else
3600 /* Multi-fork case. The current inferior_ptid has exited, but
3601 there are other viable forks to debug. Delete the exiting
3602 one and context-switch to the first available. */
3603 linux_fork_mourn_inferior ();
26cb8b7c
PA
3604
3605 /* Let the arch-specific native code know this process is gone. */
135340af 3606 linux_target->low_forget_process (pid);
d6b0e80f
AC
3607}
3608
5b009018
PA
3609/* Convert a native/host siginfo object, into/from the siginfo in the
3610 layout of the inferiors' architecture. */
3611
3612static void
a5362b9a 3613siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018 3614{
135340af
PA
3615 /* If the low target didn't do anything, then just do a straight
3616 memcpy. */
3617 if (!linux_target->low_siginfo_fixup (siginfo, inf_siginfo, direction))
5b009018
PA
3618 {
3619 if (direction == 1)
a5362b9a 3620 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 3621 else
a5362b9a 3622 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
3623 }
3624}
3625
9b409511 3626static enum target_xfer_status
f6ac5f3d 3627linux_xfer_siginfo (enum target_object object,
dda83cd7 3628 const char *annex, gdb_byte *readbuf,
9b409511
YQ
3629 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3630 ULONGEST *xfered_len)
4aa995e1 3631{
4aa995e1 3632 int pid;
a5362b9a
TS
3633 siginfo_t siginfo;
3634 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
3635
3636 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3637 gdb_assert (readbuf || writebuf);
3638
e38504b3 3639 pid = inferior_ptid.lwp ();
4aa995e1 3640 if (pid == 0)
e99b03dc 3641 pid = inferior_ptid.pid ();
4aa995e1
PA
3642
3643 if (offset > sizeof (siginfo))
2ed4b548 3644 return TARGET_XFER_E_IO;
4aa995e1
PA
3645
3646 errno = 0;
3647 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3648 if (errno != 0)
2ed4b548 3649 return TARGET_XFER_E_IO;
4aa995e1 3650
5b009018
PA
3651 /* When GDB is built as a 64-bit application, ptrace writes into
3652 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3653 inferior with a 64-bit GDB should look the same as debugging it
3654 with a 32-bit GDB, we need to convert it. GDB core always sees
3655 the converted layout, so any read/write will have to be done
3656 post-conversion. */
3657 siginfo_fixup (&siginfo, inf_siginfo, 0);
3658
4aa995e1
PA
3659 if (offset + len > sizeof (siginfo))
3660 len = sizeof (siginfo) - offset;
3661
3662 if (readbuf != NULL)
5b009018 3663 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3664 else
3665 {
5b009018
PA
3666 memcpy (inf_siginfo + offset, writebuf, len);
3667
3668 /* Convert back to ptrace layout before flushing it out. */
3669 siginfo_fixup (&siginfo, inf_siginfo, 1);
3670
4aa995e1
PA
3671 errno = 0;
3672 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3673 if (errno != 0)
2ed4b548 3674 return TARGET_XFER_E_IO;
4aa995e1
PA
3675 }
3676
9b409511
YQ
3677 *xfered_len = len;
3678 return TARGET_XFER_OK;
4aa995e1
PA
3679}
3680
9b409511 3681static enum target_xfer_status
f6ac5f3d
PA
3682linux_nat_xfer_osdata (enum target_object object,
3683 const char *annex, gdb_byte *readbuf,
3684 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3685 ULONGEST *xfered_len);
3686
f6ac5f3d 3687static enum target_xfer_status
05c06f31
PA
3688linux_proc_xfer_memory_partial (gdb_byte *readbuf, const gdb_byte *writebuf,
3689 ULONGEST offset, LONGEST len, ULONGEST *xfered_len);
f6ac5f3d
PA
3690
3691enum target_xfer_status
3692linux_nat_target::xfer_partial (enum target_object object,
3693 const char *annex, gdb_byte *readbuf,
3694 const gdb_byte *writebuf,
3695 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
d6b0e80f 3696{
4aa995e1 3697 if (object == TARGET_OBJECT_SIGNAL_INFO)
f6ac5f3d 3698 return linux_xfer_siginfo (object, annex, readbuf, writebuf,
9b409511 3699 offset, len, xfered_len);
4aa995e1 3700
c35b1492
PA
3701 /* The target is connected but no live inferior is selected. Pass
3702 this request down to a lower stratum (e.g., the executable
3703 file). */
d7e15655 3704 if (object == TARGET_OBJECT_MEMORY && inferior_ptid == null_ptid)
9b409511 3705 return TARGET_XFER_EOF;
c35b1492 3706
f6ac5f3d
PA
3707 if (object == TARGET_OBJECT_AUXV)
3708 return memory_xfer_auxv (this, object, annex, readbuf, writebuf,
3709 offset, len, xfered_len);
3710
3711 if (object == TARGET_OBJECT_OSDATA)
3712 return linux_nat_xfer_osdata (object, annex, readbuf, writebuf,
3713 offset, len, xfered_len);
d6b0e80f 3714
f6ac5f3d
PA
3715 if (object == TARGET_OBJECT_MEMORY)
3716 {
05c06f31
PA
3717 /* GDB calculates all addresses in the largest possible address
3718 width. The address width must be masked before its final use
3719 by linux_proc_xfer_partial.
3720
3721 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
f6ac5f3d
PA
3722 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
3723
3724 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
3725 offset &= ((ULONGEST) 1 << addr_bit) - 1;
f6ac5f3d 3726
05c06f31
PA
3727 return linux_proc_xfer_memory_partial (readbuf, writebuf,
3728 offset, len, xfered_len);
3729 }
f6ac5f3d
PA
3730
3731 return inf_ptrace_target::xfer_partial (object, annex, readbuf, writebuf,
3732 offset, len, xfered_len);
d6b0e80f
AC
3733}
3734
57810aa7 3735bool
f6ac5f3d 3736linux_nat_target::thread_alive (ptid_t ptid)
28439f5e 3737{
4a6ed09b
PA
3738 /* As long as a PTID is in lwp list, consider it alive. */
3739 return find_lwp_pid (ptid) != NULL;
28439f5e
PA
3740}
3741
8a06aea7
PA
3742/* Implement the to_update_thread_list target method for this
3743 target. */
3744
f6ac5f3d
PA
3745void
3746linux_nat_target::update_thread_list ()
8a06aea7 3747{
4a6ed09b
PA
3748 /* We add/delete threads from the list as clone/exit events are
3749 processed, so just try deleting exited threads still in the
3750 thread list. */
3751 delete_exited_threads ();
a6904d5a
PA
3752
3753 /* Update the processor core that each lwp/thread was last seen
3754 running on. */
901b9821 3755 for (lwp_info *lwp : all_lwps ())
1ad3de98
PA
3756 {
3757 /* Avoid accessing /proc if the thread hasn't run since we last
3758 time we fetched the thread's core. Accessing /proc becomes
3759 noticeably expensive when we have thousands of LWPs. */
3760 if (lwp->core == -1)
3761 lwp->core = linux_common_core_of_thread (lwp->ptid);
3762 }
8a06aea7
PA
3763}
3764
a068643d 3765std::string
f6ac5f3d 3766linux_nat_target::pid_to_str (ptid_t ptid)
d6b0e80f 3767{
15a9e13e 3768 if (ptid.lwp_p ()
e38504b3 3769 && (ptid.pid () != ptid.lwp ()
e99b03dc 3770 || num_lwps (ptid.pid ()) > 1))
a068643d 3771 return string_printf ("LWP %ld", ptid.lwp ());
d6b0e80f
AC
3772
3773 return normal_pid_to_str (ptid);
3774}
3775
f6ac5f3d
PA
3776const char *
3777linux_nat_target::thread_name (struct thread_info *thr)
4694da01 3778{
79efa585 3779 return linux_proc_tid_get_name (thr->ptid);
4694da01
TT
3780}
3781
dba24537
AC
3782/* Accepts an integer PID; Returns a string representing a file that
3783 can be opened to get the symbols for the child process. */
3784
f6ac5f3d
PA
3785char *
3786linux_nat_target::pid_to_exec_file (int pid)
dba24537 3787{
e0d86d2c 3788 return linux_proc_pid_to_exec_file (pid);
dba24537
AC
3789}
3790
8a89ddbd
PA
3791/* Object representing an /proc/PID/mem open file. We keep one such
3792 file open per inferior.
3793
3794 It might be tempting to think about only ever opening one file at
3795 most for all inferiors, closing/reopening the file as we access
3796 memory of different inferiors, to minimize number of file
3797 descriptors open, which can otherwise run into resource limits.
3798 However, that does not work correctly -- if the inferior execs and
3799 we haven't processed the exec event yet, and, we opened a
3800 /proc/PID/mem file, we will get a mem file accessing the post-exec
3801 address space, thinking we're opening it for the pre-exec address
3802 space. That is dangerous as we can poke memory (e.g. clearing
3803 breakpoints) in the post-exec memory by mistake, corrupting the
3804 inferior. For that reason, we open the mem file as early as
3805 possible, right after spawning, forking or attaching to the
3806 inferior, when the inferior is stopped and thus before it has a
3807 chance of execing.
3808
3809 Note that after opening the file, even if the thread we opened it
3810 for subsequently exits, the open file is still usable for accessing
3811 memory. It's only when the whole process exits or execs that the
3812 file becomes invalid, at which point reads/writes return EOF. */
3813
3814class proc_mem_file
3815{
3816public:
3817 proc_mem_file (ptid_t ptid, int fd)
3818 : m_ptid (ptid), m_fd (fd)
3819 {
3820 gdb_assert (m_fd != -1);
3821 }
05c06f31 3822
8a89ddbd 3823 ~proc_mem_file ()
05c06f31 3824 {
89662f69 3825 linux_nat_debug_printf ("closing fd %d for /proc/%d/task/%ld/mem",
8a89ddbd
PA
3826 m_fd, m_ptid.pid (), m_ptid.lwp ());
3827 close (m_fd);
05c06f31 3828 }
05c06f31 3829
8a89ddbd
PA
3830 DISABLE_COPY_AND_ASSIGN (proc_mem_file);
3831
3832 int fd ()
3833 {
3834 return m_fd;
3835 }
3836
3837private:
3838 /* The LWP this file was opened for. Just for debugging
3839 purposes. */
3840 ptid_t m_ptid;
3841
3842 /* The file descriptor. */
3843 int m_fd = -1;
3844};
3845
3846/* The map between an inferior process id, and the open /proc/PID/mem
3847 file. This is stored in a map instead of in a per-inferior
3848 structure because we need to be able to access memory of processes
3849 which don't have a corresponding struct inferior object. E.g.,
3850 with "detach-on-fork on" (the default), and "follow-fork parent"
3851 (also default), we don't create an inferior for the fork child, but
3852 we still need to remove breakpoints from the fork child's
3853 memory. */
3854static std::unordered_map<int, proc_mem_file> proc_mem_file_map;
3855
3856/* Close the /proc/PID/mem file for PID. */
05c06f31
PA
3857
3858static void
8a89ddbd 3859close_proc_mem_file (pid_t pid)
dba24537 3860{
8a89ddbd 3861 proc_mem_file_map.erase (pid);
05c06f31 3862}
dba24537 3863
8a89ddbd
PA
3864/* Open the /proc/PID/mem file for the process (thread group) of PTID.
3865 We actually open /proc/PID/task/LWP/mem, as that's the LWP we know
3866 exists and is stopped right now. We prefer the
3867 /proc/PID/task/LWP/mem form over /proc/LWP/mem to avoid tid-reuse
3868 races, just in case this is ever called on an already-waited
3869 LWP. */
dba24537 3870
8a89ddbd
PA
3871static void
3872open_proc_mem_file (ptid_t ptid)
05c06f31 3873{
8a89ddbd
PA
3874 auto iter = proc_mem_file_map.find (ptid.pid ());
3875 gdb_assert (iter == proc_mem_file_map.end ());
dba24537 3876
8a89ddbd
PA
3877 char filename[64];
3878 xsnprintf (filename, sizeof filename,
3879 "/proc/%d/task/%ld/mem", ptid.pid (), ptid.lwp ());
3880
3881 int fd = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
05c06f31 3882
8a89ddbd
PA
3883 if (fd == -1)
3884 {
3885 warning (_("opening /proc/PID/mem file for lwp %d.%ld failed: %s (%d)"),
3886 ptid.pid (), ptid.lwp (),
3887 safe_strerror (errno), errno);
3888 return;
05c06f31
PA
3889 }
3890
8a89ddbd
PA
3891 proc_mem_file_map.emplace (std::piecewise_construct,
3892 std::forward_as_tuple (ptid.pid ()),
3893 std::forward_as_tuple (ptid, fd));
3894
9221923c 3895 linux_nat_debug_printf ("opened fd %d for lwp %d.%ld",
8a89ddbd
PA
3896 fd, ptid.pid (), ptid.lwp ());
3897}
3898
3899/* Implement the to_xfer_partial target method using /proc/PID/mem.
3900 Because we can use a single read/write call, this can be much more
3901 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
3902 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running
3903 threads. */
3904
3905static enum target_xfer_status
3906linux_proc_xfer_memory_partial (gdb_byte *readbuf, const gdb_byte *writebuf,
3907 ULONGEST offset, LONGEST len,
3908 ULONGEST *xfered_len)
3909{
3910 ssize_t ret;
3911
3912 auto iter = proc_mem_file_map.find (inferior_ptid.pid ());
3913 if (iter == proc_mem_file_map.end ())
3914 return TARGET_XFER_EOF;
3915
3916 int fd = iter->second.fd ();
3917
3918 gdb_assert (fd != -1);
dba24537 3919
a379284a
AA
3920 /* Use pread64/pwrite64 if available, since they save a syscall and can
3921 handle 64-bit offsets even on 32-bit platforms (for instance, SPARC
3922 debugging a SPARC64 application). */
dba24537 3923#ifdef HAVE_PREAD64
a379284a
AA
3924 ret = (readbuf ? pread64 (fd, readbuf, len, offset)
3925 : pwrite64 (fd, writebuf, len, offset));
dba24537 3926#else
a379284a
AA
3927 ret = lseek (fd, offset, SEEK_SET);
3928 if (ret != -1)
3929 ret = (readbuf ? read (fd, readbuf, len)
3930 : write (fd, writebuf, len));
dba24537 3931#endif
dba24537 3932
05c06f31
PA
3933 if (ret == -1)
3934 {
9221923c 3935 linux_nat_debug_printf ("accessing fd %d for pid %d failed: %s (%d)",
8a89ddbd 3936 fd, inferior_ptid.pid (),
05c06f31 3937 safe_strerror (errno), errno);
8a89ddbd 3938 return TARGET_XFER_EOF;
05c06f31
PA
3939 }
3940 else if (ret == 0)
3941 {
8a89ddbd
PA
3942 /* EOF means the address space is gone, the whole process exited
3943 or execed. */
9221923c 3944 linux_nat_debug_printf ("accessing fd %d for pid %d got EOF",
8a89ddbd 3945 fd, inferior_ptid.pid ());
05c06f31
PA
3946 return TARGET_XFER_EOF;
3947 }
9b409511
YQ
3948 else
3949 {
8a89ddbd 3950 *xfered_len = ret;
9b409511
YQ
3951 return TARGET_XFER_OK;
3952 }
05c06f31 3953}
efcbbd14 3954
dba24537
AC
3955/* Parse LINE as a signal set and add its set bits to SIGS. */
3956
3957static void
3958add_line_to_sigset (const char *line, sigset_t *sigs)
3959{
3960 int len = strlen (line) - 1;
3961 const char *p;
3962 int signum;
3963
3964 if (line[len] != '\n')
8a3fe4f8 3965 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3966
3967 p = line;
3968 signum = len * 4;
3969 while (len-- > 0)
3970 {
3971 int digit;
3972
3973 if (*p >= '0' && *p <= '9')
3974 digit = *p - '0';
3975 else if (*p >= 'a' && *p <= 'f')
3976 digit = *p - 'a' + 10;
3977 else
8a3fe4f8 3978 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3979
3980 signum -= 4;
3981
3982 if (digit & 1)
3983 sigaddset (sigs, signum + 1);
3984 if (digit & 2)
3985 sigaddset (sigs, signum + 2);
3986 if (digit & 4)
3987 sigaddset (sigs, signum + 3);
3988 if (digit & 8)
3989 sigaddset (sigs, signum + 4);
3990
3991 p++;
3992 }
3993}
3994
3995/* Find process PID's pending signals from /proc/pid/status and set
3996 SIGS to match. */
3997
3998void
3e43a32a
MS
3999linux_proc_pending_signals (int pid, sigset_t *pending,
4000 sigset_t *blocked, sigset_t *ignored)
dba24537 4001{
d8d2a3ee 4002 char buffer[PATH_MAX], fname[PATH_MAX];
dba24537
AC
4003
4004 sigemptyset (pending);
4005 sigemptyset (blocked);
4006 sigemptyset (ignored);
cde33bf1 4007 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
d419f42d 4008 gdb_file_up procfile = gdb_fopen_cloexec (fname, "r");
dba24537 4009 if (procfile == NULL)
8a3fe4f8 4010 error (_("Could not open %s"), fname);
dba24537 4011
d419f42d 4012 while (fgets (buffer, PATH_MAX, procfile.get ()) != NULL)
dba24537
AC
4013 {
4014 /* Normal queued signals are on the SigPnd line in the status
4015 file. However, 2.6 kernels also have a "shared" pending
4016 queue for delivering signals to a thread group, so check for
4017 a ShdPnd line also.
4018
4019 Unfortunately some Red Hat kernels include the shared pending
4020 queue but not the ShdPnd status field. */
4021
61012eef 4022 if (startswith (buffer, "SigPnd:\t"))
dba24537 4023 add_line_to_sigset (buffer + 8, pending);
61012eef 4024 else if (startswith (buffer, "ShdPnd:\t"))
dba24537 4025 add_line_to_sigset (buffer + 8, pending);
61012eef 4026 else if (startswith (buffer, "SigBlk:\t"))
dba24537 4027 add_line_to_sigset (buffer + 8, blocked);
61012eef 4028 else if (startswith (buffer, "SigIgn:\t"))
dba24537
AC
4029 add_line_to_sigset (buffer + 8, ignored);
4030 }
dba24537
AC
4031}
4032
9b409511 4033static enum target_xfer_status
f6ac5f3d 4034linux_nat_xfer_osdata (enum target_object object,
e0881a8e 4035 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4036 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4037 ULONGEST *xfered_len)
07e059b5 4038{
07e059b5
VP
4039 gdb_assert (object == TARGET_OBJECT_OSDATA);
4040
9b409511
YQ
4041 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4042 if (*xfered_len == 0)
4043 return TARGET_XFER_EOF;
4044 else
4045 return TARGET_XFER_OK;
07e059b5
VP
4046}
4047
f6ac5f3d
PA
4048std::vector<static_tracepoint_marker>
4049linux_nat_target::static_tracepoint_markers_by_strid (const char *strid)
5808517f
YQ
4050{
4051 char s[IPA_CMD_BUF_SIZE];
e99b03dc 4052 int pid = inferior_ptid.pid ();
5d9310c4 4053 std::vector<static_tracepoint_marker> markers;
256642e8 4054 const char *p = s;
184ea2f7 4055 ptid_t ptid = ptid_t (pid, 0);
5d9310c4 4056 static_tracepoint_marker marker;
5808517f
YQ
4057
4058 /* Pause all */
4059 target_stop (ptid);
4060
4061 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4062 s[sizeof ("qTfSTM")] = 0;
4063
42476b70 4064 agent_run_command (pid, s, strlen (s) + 1);
5808517f 4065
1db93f14
TT
4066 /* Unpause all. */
4067 SCOPE_EXIT { target_continue_no_signal (ptid); };
5808517f
YQ
4068
4069 while (*p++ == 'm')
4070 {
5808517f
YQ
4071 do
4072 {
5d9310c4 4073 parse_static_tracepoint_marker_definition (p, &p, &marker);
5808517f 4074
5d9310c4
SM
4075 if (strid == NULL || marker.str_id == strid)
4076 markers.push_back (std::move (marker));
5808517f
YQ
4077 }
4078 while (*p++ == ','); /* comma-separated list */
4079
4080 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4081 s[sizeof ("qTsSTM")] = 0;
42476b70 4082 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4083 p = s;
4084 }
4085
5808517f
YQ
4086 return markers;
4087}
4088
b84876c2
PA
4089/* target_can_async_p implementation. */
4090
57810aa7 4091bool
f6ac5f3d 4092linux_nat_target::can_async_p ()
b84876c2 4093{
fce6cd34
AB
4094 /* This flag should be checked in the common target.c code. */
4095 gdb_assert (target_async_permitted);
4096
4097 /* Otherwise, this targets is always able to support async mode. */
4098 return true;
b84876c2
PA
4099}
4100
57810aa7 4101bool
f6ac5f3d 4102linux_nat_target::supports_non_stop ()
9908b566 4103{
f80c8ec4 4104 return true;
9908b566
VP
4105}
4106
fbea99ea
PA
4107/* to_always_non_stop_p implementation. */
4108
57810aa7 4109bool
f6ac5f3d 4110linux_nat_target::always_non_stop_p ()
fbea99ea 4111{
f80c8ec4 4112 return true;
fbea99ea
PA
4113}
4114
57810aa7 4115bool
f6ac5f3d 4116linux_nat_target::supports_multi_process ()
d90e17a7 4117{
aee91db3 4118 return true;
d90e17a7
PA
4119}
4120
57810aa7 4121bool
f6ac5f3d 4122linux_nat_target::supports_disable_randomization ()
03583c20 4123{
f80c8ec4 4124 return true;
03583c20
UW
4125}
4126
7feb7d06
PA
4127/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4128 so we notice when any child changes state, and notify the
4129 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4130 above to wait for the arrival of a SIGCHLD. */
4131
b84876c2 4132static void
7feb7d06 4133sigchld_handler (int signo)
b84876c2 4134{
7feb7d06
PA
4135 int old_errno = errno;
4136
01124a23 4137 if (debug_linux_nat)
da5bd37e 4138 gdb_stdlog->write_async_safe ("sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06 4139
b146ba14
JB
4140 if (signo == SIGCHLD)
4141 {
4142 /* Let the event loop know that there are events to handle. */
4143 linux_nat_target::async_file_mark_if_open ();
4144 }
7feb7d06
PA
4145
4146 errno = old_errno;
4147}
4148
4149/* Callback registered with the target events file descriptor. */
4150
4151static void
4152handle_target_event (int error, gdb_client_data client_data)
4153{
b1a35af2 4154 inferior_event_handler (INF_REG_EVENT);
7feb7d06
PA
4155}
4156
b84876c2
PA
4157/* target_async implementation. */
4158
f6ac5f3d
PA
4159void
4160linux_nat_target::async (int enable)
b84876c2 4161{
b146ba14
JB
4162 if ((enable != 0) == is_async_p ())
4163 return;
4164
4165 /* Block child signals while we create/destroy the pipe, as their
4166 handler writes to it. */
4167 gdb::block_signals blocker;
4168
6a3753b3 4169 if (enable)
b84876c2 4170 {
b146ba14
JB
4171 if (!async_file_open ())
4172 internal_error (__FILE__, __LINE__, "creating event pipe failed.");
4173
4174 add_file_handler (async_wait_fd (), handle_target_event, NULL,
4175 "linux-nat");
4176
4177 /* There may be pending events to handle. Tell the event loop
4178 to poll them. */
4179 async_file_mark ();
b84876c2
PA
4180 }
4181 else
4182 {
b146ba14
JB
4183 delete_file_handler (async_wait_fd ());
4184 async_file_close ();
b84876c2 4185 }
b84876c2
PA
4186}
4187
a493e3e2 4188/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4189 event came out. */
4190
4c28f408 4191static int
d3a70e03 4192linux_nat_stop_lwp (struct lwp_info *lwp)
4c28f408 4193{
d90e17a7 4194 if (!lwp->stopped)
252fbfc8 4195 {
9327494e 4196 linux_nat_debug_printf ("running -> suspending %s",
e53c95d4 4197 lwp->ptid.to_string ().c_str ());
252fbfc8 4198
252fbfc8 4199
25289eb2
PA
4200 if (lwp->last_resume_kind == resume_stop)
4201 {
9327494e
SM
4202 linux_nat_debug_printf ("already stopping LWP %ld at GDB's request",
4203 lwp->ptid.lwp ());
25289eb2
PA
4204 return 0;
4205 }
252fbfc8 4206
d3a70e03 4207 stop_callback (lwp);
25289eb2 4208 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
4209 }
4210 else
4211 {
4212 /* Already known to be stopped; do nothing. */
252fbfc8 4213
d90e17a7
PA
4214 if (debug_linux_nat)
4215 {
5b6d1e4f 4216 if (find_thread_ptid (linux_target, lwp->ptid)->stop_requested)
9327494e 4217 linux_nat_debug_printf ("already stopped/stop_requested %s",
e53c95d4 4218 lwp->ptid.to_string ().c_str ());
d90e17a7 4219 else
9327494e 4220 linux_nat_debug_printf ("already stopped/no stop_requested yet %s",
e53c95d4 4221 lwp->ptid.to_string ().c_str ());
252fbfc8
PA
4222 }
4223 }
4c28f408
PA
4224 return 0;
4225}
4226
f6ac5f3d
PA
4227void
4228linux_nat_target::stop (ptid_t ptid)
4c28f408 4229{
b6e52a0b 4230 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
d3a70e03 4231 iterate_over_lwps (ptid, linux_nat_stop_lwp);
bfedc46a
PA
4232}
4233
c0694254
PA
4234/* When requests are passed down from the linux-nat layer to the
4235 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4236 used. The address space pointer is stored in the inferior object,
4237 but the common code that is passed such ptid can't tell whether
4238 lwpid is a "main" process id or not (it assumes so). We reverse
4239 look up the "main" process id from the lwp here. */
4240
f6ac5f3d
PA
4241struct address_space *
4242linux_nat_target::thread_address_space (ptid_t ptid)
c0694254
PA
4243{
4244 struct lwp_info *lwp;
4245 struct inferior *inf;
4246 int pid;
4247
e38504b3 4248 if (ptid.lwp () == 0)
c0694254
PA
4249 {
4250 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4251 tgid. */
4252 lwp = find_lwp_pid (ptid);
e99b03dc 4253 pid = lwp->ptid.pid ();
c0694254
PA
4254 }
4255 else
4256 {
4257 /* A (pid,lwpid,0) ptid. */
e99b03dc 4258 pid = ptid.pid ();
c0694254
PA
4259 }
4260
5b6d1e4f 4261 inf = find_inferior_pid (this, pid);
c0694254
PA
4262 gdb_assert (inf != NULL);
4263 return inf->aspace;
4264}
4265
dc146f7c
VP
4266/* Return the cached value of the processor core for thread PTID. */
4267
f6ac5f3d
PA
4268int
4269linux_nat_target::core_of_thread (ptid_t ptid)
dc146f7c
VP
4270{
4271 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 4272
dc146f7c
VP
4273 if (info)
4274 return info->core;
4275 return -1;
4276}
4277
7a6a1731
GB
4278/* Implementation of to_filesystem_is_local. */
4279
57810aa7 4280bool
f6ac5f3d 4281linux_nat_target::filesystem_is_local ()
7a6a1731
GB
4282{
4283 struct inferior *inf = current_inferior ();
4284
4285 if (inf->fake_pid_p || inf->pid == 0)
57810aa7 4286 return true;
7a6a1731
GB
4287
4288 return linux_ns_same (inf->pid, LINUX_NS_MNT);
4289}
4290
4291/* Convert the INF argument passed to a to_fileio_* method
4292 to a process ID suitable for passing to its corresponding
4293 linux_mntns_* function. If INF is non-NULL then the
4294 caller is requesting the filesystem seen by INF. If INF
4295 is NULL then the caller is requesting the filesystem seen
4296 by the GDB. We fall back to GDB's filesystem in the case
4297 that INF is non-NULL but its PID is unknown. */
4298
4299static pid_t
4300linux_nat_fileio_pid_of (struct inferior *inf)
4301{
4302 if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4303 return getpid ();
4304 else
4305 return inf->pid;
4306}
4307
4308/* Implementation of to_fileio_open. */
4309
f6ac5f3d
PA
4310int
4311linux_nat_target::fileio_open (struct inferior *inf, const char *filename,
4312 int flags, int mode, int warn_if_slow,
4313 int *target_errno)
7a6a1731
GB
4314{
4315 int nat_flags;
4316 mode_t nat_mode;
4317 int fd;
4318
4319 if (fileio_to_host_openflags (flags, &nat_flags) == -1
4320 || fileio_to_host_mode (mode, &nat_mode) == -1)
4321 {
4322 *target_errno = FILEIO_EINVAL;
4323 return -1;
4324 }
4325
4326 fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
4327 filename, nat_flags, nat_mode);
4328 if (fd == -1)
4329 *target_errno = host_to_fileio_error (errno);
4330
4331 return fd;
4332}
4333
4334/* Implementation of to_fileio_readlink. */
4335
f6ac5f3d
PA
4336gdb::optional<std::string>
4337linux_nat_target::fileio_readlink (struct inferior *inf, const char *filename,
4338 int *target_errno)
7a6a1731
GB
4339{
4340 char buf[PATH_MAX];
4341 int len;
7a6a1731
GB
4342
4343 len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
4344 filename, buf, sizeof (buf));
4345 if (len < 0)
4346 {
4347 *target_errno = host_to_fileio_error (errno);
e0d3522b 4348 return {};
7a6a1731
GB
4349 }
4350
e0d3522b 4351 return std::string (buf, len);
7a6a1731
GB
4352}
4353
4354/* Implementation of to_fileio_unlink. */
4355
f6ac5f3d
PA
4356int
4357linux_nat_target::fileio_unlink (struct inferior *inf, const char *filename,
4358 int *target_errno)
7a6a1731
GB
4359{
4360 int ret;
4361
4362 ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
4363 filename);
4364 if (ret == -1)
4365 *target_errno = host_to_fileio_error (errno);
4366
4367 return ret;
4368}
4369
aa01bd36
PA
4370/* Implementation of the to_thread_events method. */
4371
f6ac5f3d
PA
4372void
4373linux_nat_target::thread_events (int enable)
aa01bd36
PA
4374{
4375 report_thread_events = enable;
4376}
4377
f6ac5f3d
PA
4378linux_nat_target::linux_nat_target ()
4379{
f973ed9c
DJ
4380 /* We don't change the stratum; this target will sit at
4381 process_stratum and thread_db will set at thread_stratum. This
4382 is a little strange, since this is a multi-threaded-capable
4383 target, but we want to be on the stack below thread_db, and we
4384 also want to be used for single-threaded processes. */
f973ed9c
DJ
4385}
4386
f865ee35
JK
4387/* See linux-nat.h. */
4388
4389int
4390linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
9f0bdab8 4391{
da559b09 4392 int pid;
9f0bdab8 4393
e38504b3 4394 pid = ptid.lwp ();
da559b09 4395 if (pid == 0)
e99b03dc 4396 pid = ptid.pid ();
f865ee35 4397
da559b09
JK
4398 errno = 0;
4399 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4400 if (errno != 0)
4401 {
4402 memset (siginfo, 0, sizeof (*siginfo));
4403 return 0;
4404 }
f865ee35 4405 return 1;
9f0bdab8
DJ
4406}
4407
7b669087
GB
4408/* See nat/linux-nat.h. */
4409
4410ptid_t
4411current_lwp_ptid (void)
4412{
15a9e13e 4413 gdb_assert (inferior_ptid.lwp_p ());
7b669087
GB
4414 return inferior_ptid;
4415}
4416
6c265988 4417void _initialize_linux_nat ();
d6b0e80f 4418void
6c265988 4419_initialize_linux_nat ()
d6b0e80f 4420{
8864ef42 4421 add_setshow_boolean_cmd ("linux-nat", class_maintenance,
b6e52a0b
AB
4422 &debug_linux_nat, _("\
4423Set debugging of GNU/Linux native target."), _(" \
4424Show debugging of GNU/Linux native target."), _(" \
4425When on, print debug messages relating to the GNU/Linux native target."),
4426 nullptr,
4427 show_debug_linux_nat,
4428 &setdebuglist, &showdebuglist);
b84876c2 4429
7a6a1731
GB
4430 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
4431 &debug_linux_namespaces, _("\
4432Set debugging of GNU/Linux namespaces module."), _("\
4433Show debugging of GNU/Linux namespaces module."), _("\
4434Enables printf debugging output."),
4435 NULL,
4436 NULL,
4437 &setdebuglist, &showdebuglist);
4438
7feb7d06
PA
4439 /* Install a SIGCHLD handler. */
4440 sigchld_action.sa_handler = sigchld_handler;
4441 sigemptyset (&sigchld_action.sa_mask);
4442 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
4443
4444 /* Make it the default. */
7feb7d06 4445 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
4446
4447 /* Make sure we don't block SIGCHLD during a sigsuspend. */
21987b9c 4448 gdb_sigmask (SIG_SETMASK, NULL, &suspend_mask);
d6b0e80f
AC
4449 sigdelset (&suspend_mask, SIGCHLD);
4450
7feb7d06 4451 sigemptyset (&blocked_mask);
774113b0
PA
4452
4453 lwp_lwpid_htab_create ();
d6b0e80f
AC
4454}
4455\f
4456
4457/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4458 the GNU/Linux Threads library and therefore doesn't really belong
4459 here. */
4460
089436f7
TV
4461/* NPTL reserves the first two RT signals, but does not provide any
4462 way for the debugger to query the signal numbers - fortunately
4463 they don't change. */
4464static int lin_thread_signals[] = { __SIGRTMIN, __SIGRTMIN + 1 };
d6b0e80f 4465
089436f7
TV
4466/* See linux-nat.h. */
4467
4468unsigned int
4469lin_thread_get_thread_signal_num (void)
d6b0e80f 4470{
089436f7
TV
4471 return sizeof (lin_thread_signals) / sizeof (lin_thread_signals[0]);
4472}
d6b0e80f 4473
089436f7
TV
4474/* See linux-nat.h. */
4475
4476int
4477lin_thread_get_thread_signal (unsigned int i)
4478{
4479 gdb_assert (i < lin_thread_get_thread_signal_num ());
4480 return lin_thread_signals[i];
d6b0e80f 4481}