]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/linux-nat.c
3ba072bc8d702b4998007e691ab653cef3683e7c
[thirdparty/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001-2024 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "infrun.h"
23 #include "target.h"
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdbsupport/gdb_wait.h"
27 #include <unistd.h>
28 #include <sys/syscall.h>
29 #include "nat/gdb_ptrace.h"
30 #include "linux-nat.h"
31 #include "nat/linux-ptrace.h"
32 #include "nat/linux-procfs.h"
33 #include "nat/linux-personality.h"
34 #include "linux-fork.h"
35 #include "gdbthread.h"
36 #include "gdbcmd.h"
37 #include "regcache.h"
38 #include "regset.h"
39 #include "inf-child.h"
40 #include "inf-ptrace.h"
41 #include "auxv.h"
42 #include <sys/procfs.h>
43 #include "elf-bfd.h"
44 #include "gregset.h"
45 #include "gdbcore.h"
46 #include <ctype.h>
47 #include <sys/stat.h>
48 #include <fcntl.h>
49 #include "inf-loop.h"
50 #include "gdbsupport/event-loop.h"
51 #include "event-top.h"
52 #include <pwd.h>
53 #include <sys/types.h>
54 #include <dirent.h>
55 #include "xml-support.h"
56 #include <sys/vfs.h>
57 #include "solib.h"
58 #include "nat/linux-osdata.h"
59 #include "linux-tdep.h"
60 #include "symfile.h"
61 #include "gdbsupport/agent.h"
62 #include "tracepoint.h"
63 #include "target-descriptions.h"
64 #include "gdbsupport/filestuff.h"
65 #include "objfiles.h"
66 #include "nat/linux-namespaces.h"
67 #include "gdbsupport/block-signals.h"
68 #include "gdbsupport/fileio.h"
69 #include "gdbsupport/scope-exit.h"
70 #include "gdbsupport/gdb-sigmask.h"
71 #include "gdbsupport/common-debug.h"
72 #include <unordered_map>
73
74 /* This comment documents high-level logic of this file.
75
76 Waiting for events in sync mode
77 ===============================
78
79 When waiting for an event in a specific thread, we just use waitpid,
80 passing the specific pid, and not passing WNOHANG.
81
82 When waiting for an event in all threads, waitpid is not quite good:
83
84 - If the thread group leader exits while other threads in the thread
85 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
86 return an exit status until the other threads in the group are
87 reaped.
88
89 - When a non-leader thread execs, that thread just vanishes without
90 reporting an exit (so we'd hang if we waited for it explicitly in
91 that case). The exec event is instead reported to the TGID pid.
92
93 The solution is to always use -1 and WNOHANG, together with
94 sigsuspend.
95
96 First, we use non-blocking waitpid to check for events. If nothing is
97 found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
98 it means something happened to a child process. As soon as we know
99 there's an event, we get back to calling nonblocking waitpid.
100
101 Note that SIGCHLD should be blocked between waitpid and sigsuspend
102 calls, so that we don't miss a signal. If SIGCHLD arrives in between,
103 when it's blocked, the signal becomes pending and sigsuspend
104 immediately notices it and returns.
105
106 Waiting for events in async mode (TARGET_WNOHANG)
107 =================================================
108
109 In async mode, GDB should always be ready to handle both user input
110 and target events, so neither blocking waitpid nor sigsuspend are
111 viable options. Instead, we should asynchronously notify the GDB main
112 event loop whenever there's an unprocessed event from the target. We
113 detect asynchronous target events by handling SIGCHLD signals. To
114 notify the event loop about target events, an event pipe is used
115 --- the pipe is registered as waitable event source in the event loop,
116 the event loop select/poll's on the read end of this pipe (as well on
117 other event sources, e.g., stdin), and the SIGCHLD handler marks the
118 event pipe to raise an event. This is more portable than relying on
119 pselect/ppoll, since on kernels that lack those syscalls, libc
120 emulates them with select/poll+sigprocmask, and that is racy
121 (a.k.a. plain broken).
122
123 Obviously, if we fail to notify the event loop if there's a target
124 event, it's bad. OTOH, if we notify the event loop when there's no
125 event from the target, linux_nat_wait will detect that there's no real
126 event to report, and return event of type TARGET_WAITKIND_IGNORE.
127 This is mostly harmless, but it will waste time and is better avoided.
128
129 The main design point is that every time GDB is outside linux-nat.c,
130 we have a SIGCHLD handler installed that is called when something
131 happens to the target and notifies the GDB event loop. Whenever GDB
132 core decides to handle the event, and calls into linux-nat.c, we
133 process things as in sync mode, except that the we never block in
134 sigsuspend.
135
136 While processing an event, we may end up momentarily blocked in
137 waitpid calls. Those waitpid calls, while blocking, are guarantied to
138 return quickly. E.g., in all-stop mode, before reporting to the core
139 that an LWP hit a breakpoint, all LWPs are stopped by sending them
140 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141 Note that this is different from blocking indefinitely waiting for the
142 next event --- here, we're already handling an event.
143
144 Use of signals
145 ==============
146
147 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
148 signal is not entirely significant; we just need for a signal to be delivered,
149 so that we can intercept it. SIGSTOP's advantage is that it can not be
150 blocked. A disadvantage is that it is not a real-time signal, so it can only
151 be queued once; we do not keep track of other sources of SIGSTOP.
152
153 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
154 use them, because they have special behavior when the signal is generated -
155 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
156 kills the entire thread group.
157
158 A delivered SIGSTOP would stop the entire thread group, not just the thread we
159 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
160 cancel it (by PTRACE_CONT without passing SIGSTOP).
161
162 We could use a real-time signal instead. This would solve those problems; we
163 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165 generates it, and there are races with trying to find a signal that is not
166 blocked.
167
168 Exec events
169 ===========
170
171 The case of a thread group (process) with 3 or more threads, and a
172 thread other than the leader execs is worth detailing:
173
174 On an exec, the Linux kernel destroys all threads except the execing
175 one in the thread group, and resets the execing thread's tid to the
176 tgid. No exit notification is sent for the execing thread -- from the
177 ptracer's perspective, it appears as though the execing thread just
178 vanishes. Until we reap all other threads except the leader and the
179 execing thread, the leader will be zombie, and the execing thread will
180 be in `D (disc sleep)' state. As soon as all other threads are
181 reaped, the execing thread changes its tid to the tgid, and the
182 previous (zombie) leader vanishes, giving place to the "new"
183 leader.
184
185 Accessing inferior memory
186 =========================
187
188 To access inferior memory, we strongly prefer /proc/PID/mem. We
189 fallback to ptrace if and only if /proc/PID/mem is not writable, as a
190 concession for obsolescent kernels (such as found in RHEL6). For
191 modern kernels, the fallback shouldn't trigger. GDBserver does not
192 have the ptrace fallback already, and at some point, we'll consider
193 removing it from native GDB too.
194
195 /proc/PID/mem has a few advantages over alternatives like
196 PTRACE_PEEKTEXT/PTRACE_POKETEXT or process_vm_readv/process_vm_writev:
197
198 - Because we can use a single read/write call, /proc/PID/mem can be
199 much more efficient than banging away at
200 PTRACE_PEEKTEXT/PTRACE_POKETEXT, one word at a time.
201
202 - /proc/PID/mem allows writing to read-only pages, which we need to
203 e.g., plant breakpoint instructions. process_vm_writev does not
204 allow this.
205
206 - /proc/PID/mem allows memory access even if all threads are running.
207 OTOH, PTRACE_PEEKTEXT/PTRACE_POKETEXT require passing down the tid
208 of a stopped task. This lets us e.g., install breakpoints while the
209 inferior is running, clear a displaced stepping scratch pad when the
210 thread that was displaced stepping exits, print inferior globals,
211 etc., all without having to worry about temporarily pausing some
212 thread.
213
214 - /proc/PID/mem does not suffer from a race that could cause us to
215 access memory of the wrong address space when the inferior execs.
216
217 process_vm_readv/process_vm_writev have this problem.
218
219 E.g., say GDB decides to write to memory just while the inferior
220 execs. In this scenario, GDB could write memory to the post-exec
221 address space thinking it was writing to the pre-exec address space,
222 with high probability of corrupting the inferior. Or if GDB decides
223 instead to read memory just while the inferior execs, it could read
224 bogus contents out of the wrong address space.
225
226 ptrace used to have this problem too, but no longer has since Linux
227 commit dbb5afad100a ("ptrace: make ptrace() fail if the tracee
228 changed its pid unexpectedly"), in Linux 5.13. (And if ptrace were
229 ever changed to allow access memory via zombie or running threads,
230 it would better not forget to consider this scenario.)
231
232 We avoid this race with /proc/PID/mem, by opening the file as soon
233 as we start debugging the inferior, when it is known the inferior is
234 stopped, and holding on to the open file descriptor, to be used
235 whenever we need to access inferior memory. If the inferior execs
236 or exits, reading/writing from/to the file returns 0 (EOF),
237 indicating the address space is gone, and so we return
238 TARGET_XFER_EOF to the core. We close the old file and open a new
239 one when we finally see the PTRACE_EVENT_EXEC event. */
240
241 #ifndef O_LARGEFILE
242 #define O_LARGEFILE 0
243 #endif
244
245 struct linux_nat_target *linux_target;
246
247 /* Does the current host support PTRACE_GETREGSET? */
248 enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
249
250 /* When true, print debug messages relating to the linux native target. */
251
252 static bool debug_linux_nat;
253
254 /* Implement 'show debug linux-nat'. */
255
256 static void
257 show_debug_linux_nat (struct ui_file *file, int from_tty,
258 struct cmd_list_element *c, const char *value)
259 {
260 gdb_printf (file, _("Debugging of GNU/Linux native targets is %s.\n"),
261 value);
262 }
263
264 /* Print a linux-nat debug statement. */
265
266 #define linux_nat_debug_printf(fmt, ...) \
267 debug_prefixed_printf_cond (debug_linux_nat, "linux-nat", fmt, ##__VA_ARGS__)
268
269 /* Print "linux-nat" enter/exit debug statements. */
270
271 #define LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT \
272 scoped_debug_enter_exit (debug_linux_nat, "linux-nat")
273
274 struct simple_pid_list
275 {
276 int pid;
277 int status;
278 struct simple_pid_list *next;
279 };
280 static struct simple_pid_list *stopped_pids;
281
282 /* Whether target_thread_events is in effect. */
283 static int report_thread_events;
284
285 static int kill_lwp (int lwpid, int signo);
286
287 static int stop_callback (struct lwp_info *lp);
288
289 static void block_child_signals (sigset_t *prev_mask);
290 static void restore_child_signals_mask (sigset_t *prev_mask);
291
292 struct lwp_info;
293 static struct lwp_info *add_lwp (ptid_t ptid);
294 static void purge_lwp_list (int pid);
295 static void delete_lwp (ptid_t ptid);
296 static struct lwp_info *find_lwp_pid (ptid_t ptid);
297
298 static int lwp_status_pending_p (struct lwp_info *lp);
299
300 static void save_stop_reason (struct lwp_info *lp);
301
302 static bool proc_mem_file_is_writable ();
303 static void close_proc_mem_file (pid_t pid);
304 static void open_proc_mem_file (ptid_t ptid);
305
306 /* Return TRUE if LWP is the leader thread of the process. */
307
308 static bool
309 is_leader (lwp_info *lp)
310 {
311 return lp->ptid.pid () == lp->ptid.lwp ();
312 }
313
314 /* Convert an LWP's pending status to a std::string. */
315
316 static std::string
317 pending_status_str (lwp_info *lp)
318 {
319 gdb_assert (lwp_status_pending_p (lp));
320
321 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
322 return lp->waitstatus.to_string ();
323 else
324 return status_to_str (lp->status);
325 }
326
327 /* Return true if we should report exit events for LP. */
328
329 static bool
330 report_exit_events_for (lwp_info *lp)
331 {
332 thread_info *thr = linux_target->find_thread (lp->ptid);
333 gdb_assert (thr != nullptr);
334
335 return (report_thread_events
336 || (thr->thread_options () & GDB_THREAD_OPTION_EXIT) != 0);
337 }
338
339 \f
340 /* LWP accessors. */
341
342 /* See nat/linux-nat.h. */
343
344 ptid_t
345 ptid_of_lwp (struct lwp_info *lwp)
346 {
347 return lwp->ptid;
348 }
349
350 /* See nat/linux-nat.h. */
351
352 void
353 lwp_set_arch_private_info (struct lwp_info *lwp,
354 struct arch_lwp_info *info)
355 {
356 lwp->arch_private = info;
357 }
358
359 /* See nat/linux-nat.h. */
360
361 struct arch_lwp_info *
362 lwp_arch_private_info (struct lwp_info *lwp)
363 {
364 return lwp->arch_private;
365 }
366
367 /* See nat/linux-nat.h. */
368
369 int
370 lwp_is_stopped (struct lwp_info *lwp)
371 {
372 return lwp->stopped;
373 }
374
375 /* See nat/linux-nat.h. */
376
377 enum target_stop_reason
378 lwp_stop_reason (struct lwp_info *lwp)
379 {
380 return lwp->stop_reason;
381 }
382
383 /* See nat/linux-nat.h. */
384
385 int
386 lwp_is_stepping (struct lwp_info *lwp)
387 {
388 return lwp->step;
389 }
390
391 \f
392 /* Trivial list manipulation functions to keep track of a list of
393 new stopped processes. */
394 static void
395 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
396 {
397 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
398
399 new_pid->pid = pid;
400 new_pid->status = status;
401 new_pid->next = *listp;
402 *listp = new_pid;
403 }
404
405 static int
406 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
407 {
408 struct simple_pid_list **p;
409
410 for (p = listp; *p != NULL; p = &(*p)->next)
411 if ((*p)->pid == pid)
412 {
413 struct simple_pid_list *next = (*p)->next;
414
415 *statusp = (*p)->status;
416 xfree (*p);
417 *p = next;
418 return 1;
419 }
420 return 0;
421 }
422
423 /* Return the ptrace options that we want to try to enable. */
424
425 static int
426 linux_nat_ptrace_options (int attached)
427 {
428 int options = 0;
429
430 if (!attached)
431 options |= PTRACE_O_EXITKILL;
432
433 options |= (PTRACE_O_TRACESYSGOOD
434 | PTRACE_O_TRACEVFORKDONE
435 | PTRACE_O_TRACEVFORK
436 | PTRACE_O_TRACEFORK
437 | PTRACE_O_TRACEEXEC);
438
439 return options;
440 }
441
442 /* Initialize ptrace and procfs warnings and check for supported
443 ptrace features given PID.
444
445 ATTACHED should be nonzero iff we attached to the inferior. */
446
447 static void
448 linux_init_ptrace_procfs (pid_t pid, int attached)
449 {
450 int options = linux_nat_ptrace_options (attached);
451
452 linux_enable_event_reporting (pid, options);
453 linux_ptrace_init_warnings ();
454 linux_proc_init_warnings ();
455 proc_mem_file_is_writable ();
456 }
457
458 linux_nat_target::~linux_nat_target ()
459 {}
460
461 void
462 linux_nat_target::post_attach (int pid)
463 {
464 linux_init_ptrace_procfs (pid, 1);
465 }
466
467 /* Implement the virtual inf_ptrace_target::post_startup_inferior method. */
468
469 void
470 linux_nat_target::post_startup_inferior (ptid_t ptid)
471 {
472 linux_init_ptrace_procfs (ptid.pid (), 0);
473 }
474
475 /* Return the number of known LWPs in the tgid given by PID. */
476
477 static int
478 num_lwps (int pid)
479 {
480 int count = 0;
481
482 for (const lwp_info *lp ATTRIBUTE_UNUSED : all_lwps ())
483 if (lp->ptid.pid () == pid)
484 count++;
485
486 return count;
487 }
488
489 /* Deleter for lwp_info unique_ptr specialisation. */
490
491 struct lwp_deleter
492 {
493 void operator() (struct lwp_info *lwp) const
494 {
495 delete_lwp (lwp->ptid);
496 }
497 };
498
499 /* A unique_ptr specialisation for lwp_info. */
500
501 typedef std::unique_ptr<struct lwp_info, lwp_deleter> lwp_info_up;
502
503 /* Target hook for follow_fork. */
504
505 void
506 linux_nat_target::follow_fork (inferior *child_inf, ptid_t child_ptid,
507 target_waitkind fork_kind, bool follow_child,
508 bool detach_fork)
509 {
510 inf_ptrace_target::follow_fork (child_inf, child_ptid, fork_kind,
511 follow_child, detach_fork);
512
513 if (!follow_child)
514 {
515 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
516 ptid_t parent_ptid = inferior_ptid;
517 int parent_pid = parent_ptid.lwp ();
518 int child_pid = child_ptid.lwp ();
519
520 /* We're already attached to the parent, by default. */
521 lwp_info *child_lp = add_lwp (child_ptid);
522 child_lp->stopped = 1;
523 child_lp->last_resume_kind = resume_stop;
524
525 /* Detach new forked process? */
526 if (detach_fork)
527 {
528 int child_stop_signal = 0;
529 bool detach_child = true;
530
531 /* Move CHILD_LP into a unique_ptr and clear the source pointer
532 to prevent us doing anything stupid with it. */
533 lwp_info_up child_lp_ptr (child_lp);
534 child_lp = nullptr;
535
536 linux_target->low_prepare_to_resume (child_lp_ptr.get ());
537
538 /* When debugging an inferior in an architecture that supports
539 hardware single stepping on a kernel without commit
540 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
541 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
542 set if the parent process had them set.
543 To work around this, single step the child process
544 once before detaching to clear the flags. */
545
546 /* Note that we consult the parent's architecture instead of
547 the child's because there's no inferior for the child at
548 this point. */
549 if (!gdbarch_software_single_step_p (target_thread_architecture
550 (parent_ptid)))
551 {
552 int status;
553
554 linux_disable_event_reporting (child_pid);
555 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
556 perror_with_name (_("Couldn't do single step"));
557 if (my_waitpid (child_pid, &status, 0) < 0)
558 perror_with_name (_("Couldn't wait vfork process"));
559 else
560 {
561 detach_child = WIFSTOPPED (status);
562 child_stop_signal = WSTOPSIG (status);
563 }
564 }
565
566 if (detach_child)
567 {
568 int signo = child_stop_signal;
569
570 if (signo != 0
571 && !signal_pass_state (gdb_signal_from_host (signo)))
572 signo = 0;
573 ptrace (PTRACE_DETACH, child_pid, 0, signo);
574
575 close_proc_mem_file (child_pid);
576 }
577 }
578
579 if (has_vforked)
580 {
581 lwp_info *parent_lp = find_lwp_pid (parent_ptid);
582 linux_nat_debug_printf ("waiting for VFORK_DONE on %d", parent_pid);
583 parent_lp->stopped = 1;
584
585 /* We'll handle the VFORK_DONE event like any other
586 event, in target_wait. */
587 }
588 }
589 else
590 {
591 struct lwp_info *child_lp;
592
593 child_lp = add_lwp (child_ptid);
594 child_lp->stopped = 1;
595 child_lp->last_resume_kind = resume_stop;
596 }
597 }
598
599 \f
600 int
601 linux_nat_target::insert_fork_catchpoint (int pid)
602 {
603 return 0;
604 }
605
606 int
607 linux_nat_target::remove_fork_catchpoint (int pid)
608 {
609 return 0;
610 }
611
612 int
613 linux_nat_target::insert_vfork_catchpoint (int pid)
614 {
615 return 0;
616 }
617
618 int
619 linux_nat_target::remove_vfork_catchpoint (int pid)
620 {
621 return 0;
622 }
623
624 int
625 linux_nat_target::insert_exec_catchpoint (int pid)
626 {
627 return 0;
628 }
629
630 int
631 linux_nat_target::remove_exec_catchpoint (int pid)
632 {
633 return 0;
634 }
635
636 int
637 linux_nat_target::set_syscall_catchpoint (int pid, bool needed, int any_count,
638 gdb::array_view<const int> syscall_counts)
639 {
640 /* On GNU/Linux, we ignore the arguments. It means that we only
641 enable the syscall catchpoints, but do not disable them.
642
643 Also, we do not use the `syscall_counts' information because we do not
644 filter system calls here. We let GDB do the logic for us. */
645 return 0;
646 }
647
648 /* List of known LWPs, keyed by LWP PID. This speeds up the common
649 case of mapping a PID returned from the kernel to our corresponding
650 lwp_info data structure. */
651 static htab_t lwp_lwpid_htab;
652
653 /* Calculate a hash from a lwp_info's LWP PID. */
654
655 static hashval_t
656 lwp_info_hash (const void *ap)
657 {
658 const struct lwp_info *lp = (struct lwp_info *) ap;
659 pid_t pid = lp->ptid.lwp ();
660
661 return iterative_hash_object (pid, 0);
662 }
663
664 /* Equality function for the lwp_info hash table. Compares the LWP's
665 PID. */
666
667 static int
668 lwp_lwpid_htab_eq (const void *a, const void *b)
669 {
670 const struct lwp_info *entry = (const struct lwp_info *) a;
671 const struct lwp_info *element = (const struct lwp_info *) b;
672
673 return entry->ptid.lwp () == element->ptid.lwp ();
674 }
675
676 /* Create the lwp_lwpid_htab hash table. */
677
678 static void
679 lwp_lwpid_htab_create (void)
680 {
681 lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL);
682 }
683
684 /* Add LP to the hash table. */
685
686 static void
687 lwp_lwpid_htab_add_lwp (struct lwp_info *lp)
688 {
689 void **slot;
690
691 slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT);
692 gdb_assert (slot != NULL && *slot == NULL);
693 *slot = lp;
694 }
695
696 /* Head of doubly-linked list of known LWPs. Sorted by reverse
697 creation order. This order is assumed in some cases. E.g.,
698 reaping status after killing alls lwps of a process: the leader LWP
699 must be reaped last. */
700
701 static intrusive_list<lwp_info> lwp_list;
702
703 /* See linux-nat.h. */
704
705 lwp_info_range
706 all_lwps ()
707 {
708 return lwp_info_range (lwp_list.begin ());
709 }
710
711 /* See linux-nat.h. */
712
713 lwp_info_safe_range
714 all_lwps_safe ()
715 {
716 return lwp_info_safe_range (lwp_list.begin ());
717 }
718
719 /* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
720
721 static void
722 lwp_list_add (struct lwp_info *lp)
723 {
724 lwp_list.push_front (*lp);
725 }
726
727 /* Remove LP from sorted-by-reverse-creation-order doubly-linked
728 list. */
729
730 static void
731 lwp_list_remove (struct lwp_info *lp)
732 {
733 /* Remove from sorted-by-creation-order list. */
734 lwp_list.erase (lwp_list.iterator_to (*lp));
735 }
736
737 \f
738
739 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
740 _initialize_linux_nat. */
741 static sigset_t suspend_mask;
742
743 /* Signals to block to make that sigsuspend work. */
744 static sigset_t blocked_mask;
745
746 /* SIGCHLD action. */
747 static struct sigaction sigchld_action;
748
749 /* Block child signals (SIGCHLD and linux threads signals), and store
750 the previous mask in PREV_MASK. */
751
752 static void
753 block_child_signals (sigset_t *prev_mask)
754 {
755 /* Make sure SIGCHLD is blocked. */
756 if (!sigismember (&blocked_mask, SIGCHLD))
757 sigaddset (&blocked_mask, SIGCHLD);
758
759 gdb_sigmask (SIG_BLOCK, &blocked_mask, prev_mask);
760 }
761
762 /* Restore child signals mask, previously returned by
763 block_child_signals. */
764
765 static void
766 restore_child_signals_mask (sigset_t *prev_mask)
767 {
768 gdb_sigmask (SIG_SETMASK, prev_mask, NULL);
769 }
770
771 /* Mask of signals to pass directly to the inferior. */
772 static sigset_t pass_mask;
773
774 /* Update signals to pass to the inferior. */
775 void
776 linux_nat_target::pass_signals
777 (gdb::array_view<const unsigned char> pass_signals)
778 {
779 int signo;
780
781 sigemptyset (&pass_mask);
782
783 for (signo = 1; signo < NSIG; signo++)
784 {
785 int target_signo = gdb_signal_from_host (signo);
786 if (target_signo < pass_signals.size () && pass_signals[target_signo])
787 sigaddset (&pass_mask, signo);
788 }
789 }
790
791 \f
792
793 /* Prototypes for local functions. */
794 static int stop_wait_callback (struct lwp_info *lp);
795 static int resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid);
796 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
797
798 \f
799
800 /* Destroy and free LP. */
801
802 lwp_info::~lwp_info ()
803 {
804 /* Let the arch specific bits release arch_lwp_info. */
805 linux_target->low_delete_thread (this->arch_private);
806 }
807
808 /* Traversal function for purge_lwp_list. */
809
810 static int
811 lwp_lwpid_htab_remove_pid (void **slot, void *info)
812 {
813 struct lwp_info *lp = (struct lwp_info *) *slot;
814 int pid = *(int *) info;
815
816 if (lp->ptid.pid () == pid)
817 {
818 htab_clear_slot (lwp_lwpid_htab, slot);
819 lwp_list_remove (lp);
820 delete lp;
821 }
822
823 return 1;
824 }
825
826 /* Remove all LWPs belong to PID from the lwp list. */
827
828 static void
829 purge_lwp_list (int pid)
830 {
831 htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid);
832 }
833
834 /* Add the LWP specified by PTID to the list. PTID is the first LWP
835 in the process. Return a pointer to the structure describing the
836 new LWP.
837
838 This differs from add_lwp in that we don't let the arch specific
839 bits know about this new thread. Current clients of this callback
840 take the opportunity to install watchpoints in the new thread, and
841 we shouldn't do that for the first thread. If we're spawning a
842 child ("run"), the thread executes the shell wrapper first, and we
843 shouldn't touch it until it execs the program we want to debug.
844 For "attach", it'd be okay to call the callback, but it's not
845 necessary, because watchpoints can't yet have been inserted into
846 the inferior. */
847
848 static struct lwp_info *
849 add_initial_lwp (ptid_t ptid)
850 {
851 gdb_assert (ptid.lwp_p ());
852
853 lwp_info *lp = new lwp_info (ptid);
854
855
856 /* Add to sorted-by-reverse-creation-order list. */
857 lwp_list_add (lp);
858
859 /* Add to keyed-by-pid htab. */
860 lwp_lwpid_htab_add_lwp (lp);
861
862 return lp;
863 }
864
865 /* Add the LWP specified by PID to the list. Return a pointer to the
866 structure describing the new LWP. The LWP should already be
867 stopped. */
868
869 static struct lwp_info *
870 add_lwp (ptid_t ptid)
871 {
872 struct lwp_info *lp;
873
874 lp = add_initial_lwp (ptid);
875
876 /* Let the arch specific bits know about this new thread. Current
877 clients of this callback take the opportunity to install
878 watchpoints in the new thread. We don't do this for the first
879 thread though. See add_initial_lwp. */
880 linux_target->low_new_thread (lp);
881
882 return lp;
883 }
884
885 /* Remove the LWP specified by PID from the list. */
886
887 static void
888 delete_lwp (ptid_t ptid)
889 {
890 lwp_info dummy (ptid);
891
892 void **slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
893 if (slot == NULL)
894 return;
895
896 lwp_info *lp = *(struct lwp_info **) slot;
897 gdb_assert (lp != NULL);
898
899 htab_clear_slot (lwp_lwpid_htab, slot);
900
901 /* Remove from sorted-by-creation-order list. */
902 lwp_list_remove (lp);
903
904 /* Release. */
905 delete lp;
906 }
907
908 /* Return a pointer to the structure describing the LWP corresponding
909 to PID. If no corresponding LWP could be found, return NULL. */
910
911 static struct lwp_info *
912 find_lwp_pid (ptid_t ptid)
913 {
914 int lwp;
915
916 if (ptid.lwp_p ())
917 lwp = ptid.lwp ();
918 else
919 lwp = ptid.pid ();
920
921 lwp_info dummy (ptid_t (0, lwp));
922 return (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
923 }
924
925 /* See nat/linux-nat.h. */
926
927 struct lwp_info *
928 iterate_over_lwps (ptid_t filter,
929 gdb::function_view<iterate_over_lwps_ftype> callback)
930 {
931 for (lwp_info *lp : all_lwps_safe ())
932 {
933 if (lp->ptid.matches (filter))
934 {
935 if (callback (lp) != 0)
936 return lp;
937 }
938 }
939
940 return NULL;
941 }
942
943 /* Update our internal state when changing from one checkpoint to
944 another indicated by NEW_PTID. We can only switch single-threaded
945 applications, so we only create one new LWP, and the previous list
946 is discarded. */
947
948 void
949 linux_nat_switch_fork (ptid_t new_ptid)
950 {
951 struct lwp_info *lp;
952
953 purge_lwp_list (inferior_ptid.pid ());
954
955 lp = add_lwp (new_ptid);
956 lp->stopped = 1;
957
958 /* This changes the thread's ptid while preserving the gdb thread
959 num. Also changes the inferior pid, while preserving the
960 inferior num. */
961 thread_change_ptid (linux_target, inferior_ptid, new_ptid);
962
963 /* We've just told GDB core that the thread changed target id, but,
964 in fact, it really is a different thread, with different register
965 contents. */
966 registers_changed ();
967 }
968
969 /* Handle the exit of a single thread LP. If DEL_THREAD is true,
970 delete the thread_info associated to LP, if it exists. */
971
972 static void
973 exit_lwp (struct lwp_info *lp, bool del_thread = true)
974 {
975 struct thread_info *th = linux_target->find_thread (lp->ptid);
976
977 if (th != nullptr && del_thread)
978 delete_thread (th);
979
980 delete_lwp (lp->ptid);
981 }
982
983 /* Wait for the LWP specified by LP, which we have just attached to.
984 Returns a wait status for that LWP, to cache. */
985
986 static int
987 linux_nat_post_attach_wait (ptid_t ptid, int *signalled)
988 {
989 pid_t new_pid, pid = ptid.lwp ();
990 int status;
991
992 if (linux_proc_pid_is_stopped (pid))
993 {
994 linux_nat_debug_printf ("Attaching to a stopped process");
995
996 /* The process is definitely stopped. It is in a job control
997 stop, unless the kernel predates the TASK_STOPPED /
998 TASK_TRACED distinction, in which case it might be in a
999 ptrace stop. Make sure it is in a ptrace stop; from there we
1000 can kill it, signal it, et cetera.
1001
1002 First make sure there is a pending SIGSTOP. Since we are
1003 already attached, the process can not transition from stopped
1004 to running without a PTRACE_CONT; so we know this signal will
1005 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1006 probably already in the queue (unless this kernel is old
1007 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1008 is not an RT signal, it can only be queued once. */
1009 kill_lwp (pid, SIGSTOP);
1010
1011 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1012 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1013 ptrace (PTRACE_CONT, pid, 0, 0);
1014 }
1015
1016 /* Make sure the initial process is stopped. The user-level threads
1017 layer might want to poke around in the inferior, and that won't
1018 work if things haven't stabilized yet. */
1019 new_pid = my_waitpid (pid, &status, __WALL);
1020 gdb_assert (pid == new_pid);
1021
1022 if (!WIFSTOPPED (status))
1023 {
1024 /* The pid we tried to attach has apparently just exited. */
1025 linux_nat_debug_printf ("Failed to stop %d: %s", pid,
1026 status_to_str (status).c_str ());
1027 return status;
1028 }
1029
1030 if (WSTOPSIG (status) != SIGSTOP)
1031 {
1032 *signalled = 1;
1033 linux_nat_debug_printf ("Received %s after attaching",
1034 status_to_str (status).c_str ());
1035 }
1036
1037 return status;
1038 }
1039
1040 void
1041 linux_nat_target::create_inferior (const char *exec_file,
1042 const std::string &allargs,
1043 char **env, int from_tty)
1044 {
1045 maybe_disable_address_space_randomization restore_personality
1046 (disable_randomization);
1047
1048 /* The fork_child mechanism is synchronous and calls target_wait, so
1049 we have to mask the async mode. */
1050
1051 /* Make sure we report all signals during startup. */
1052 pass_signals ({});
1053
1054 inf_ptrace_target::create_inferior (exec_file, allargs, env, from_tty);
1055
1056 open_proc_mem_file (inferior_ptid);
1057 }
1058
1059 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1060 already attached. Returns true if a new LWP is found, false
1061 otherwise. */
1062
1063 static int
1064 attach_proc_task_lwp_callback (ptid_t ptid)
1065 {
1066 struct lwp_info *lp;
1067
1068 /* Ignore LWPs we're already attached to. */
1069 lp = find_lwp_pid (ptid);
1070 if (lp == NULL)
1071 {
1072 int lwpid = ptid.lwp ();
1073
1074 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1075 {
1076 int err = errno;
1077
1078 /* Be quiet if we simply raced with the thread exiting.
1079 EPERM is returned if the thread's task still exists, and
1080 is marked as exited or zombie, as well as other
1081 conditions, so in that case, confirm the status in
1082 /proc/PID/status. */
1083 if (err == ESRCH
1084 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1085 {
1086 linux_nat_debug_printf
1087 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1088 lwpid, err, safe_strerror (err));
1089
1090 }
1091 else
1092 {
1093 std::string reason
1094 = linux_ptrace_attach_fail_reason_string (ptid, err);
1095
1096 error (_("Cannot attach to lwp %d: %s"),
1097 lwpid, reason.c_str ());
1098 }
1099 }
1100 else
1101 {
1102 linux_nat_debug_printf ("PTRACE_ATTACH %s, 0, 0 (OK)",
1103 ptid.to_string ().c_str ());
1104
1105 lp = add_lwp (ptid);
1106
1107 /* The next time we wait for this LWP we'll see a SIGSTOP as
1108 PTRACE_ATTACH brings it to a halt. */
1109 lp->signalled = 1;
1110
1111 /* We need to wait for a stop before being able to make the
1112 next ptrace call on this LWP. */
1113 lp->must_set_ptrace_flags = 1;
1114
1115 /* So that wait collects the SIGSTOP. */
1116 lp->resumed = 1;
1117 }
1118
1119 return 1;
1120 }
1121 return 0;
1122 }
1123
1124 void
1125 linux_nat_target::attach (const char *args, int from_tty)
1126 {
1127 struct lwp_info *lp;
1128 int status;
1129 ptid_t ptid;
1130
1131 /* Make sure we report all signals during attach. */
1132 pass_signals ({});
1133
1134 try
1135 {
1136 inf_ptrace_target::attach (args, from_tty);
1137 }
1138 catch (const gdb_exception_error &ex)
1139 {
1140 pid_t pid = parse_pid_to_attach (args);
1141 std::string reason = linux_ptrace_attach_fail_reason (pid);
1142
1143 if (!reason.empty ())
1144 throw_error (ex.error, "warning: %s\n%s", reason.c_str (),
1145 ex.what ());
1146 else
1147 throw_error (ex.error, "%s", ex.what ());
1148 }
1149
1150 /* The ptrace base target adds the main thread with (pid,0,0)
1151 format. Decorate it with lwp info. */
1152 ptid = ptid_t (inferior_ptid.pid (),
1153 inferior_ptid.pid ());
1154 thread_change_ptid (linux_target, inferior_ptid, ptid);
1155
1156 /* Add the initial process as the first LWP to the list. */
1157 lp = add_initial_lwp (ptid);
1158
1159 status = linux_nat_post_attach_wait (lp->ptid, &lp->signalled);
1160 if (!WIFSTOPPED (status))
1161 {
1162 if (WIFEXITED (status))
1163 {
1164 int exit_code = WEXITSTATUS (status);
1165
1166 target_terminal::ours ();
1167 target_mourn_inferior (inferior_ptid);
1168 if (exit_code == 0)
1169 error (_("Unable to attach: program exited normally."));
1170 else
1171 error (_("Unable to attach: program exited with code %d."),
1172 exit_code);
1173 }
1174 else if (WIFSIGNALED (status))
1175 {
1176 enum gdb_signal signo;
1177
1178 target_terminal::ours ();
1179 target_mourn_inferior (inferior_ptid);
1180
1181 signo = gdb_signal_from_host (WTERMSIG (status));
1182 error (_("Unable to attach: program terminated with signal "
1183 "%s, %s."),
1184 gdb_signal_to_name (signo),
1185 gdb_signal_to_string (signo));
1186 }
1187
1188 internal_error (_("unexpected status %d for PID %ld"),
1189 status, (long) ptid.lwp ());
1190 }
1191
1192 lp->stopped = 1;
1193
1194 open_proc_mem_file (lp->ptid);
1195
1196 /* Save the wait status to report later. */
1197 lp->resumed = 1;
1198 linux_nat_debug_printf ("waitpid %ld, saving status %s",
1199 (long) lp->ptid.pid (),
1200 status_to_str (status).c_str ());
1201
1202 lp->status = status;
1203
1204 /* We must attach to every LWP. If /proc is mounted, use that to
1205 find them now. The inferior may be using raw clone instead of
1206 using pthreads. But even if it is using pthreads, thread_db
1207 walks structures in the inferior's address space to find the list
1208 of threads/LWPs, and those structures may well be corrupted.
1209 Note that once thread_db is loaded, we'll still use it to list
1210 threads and associate pthread info with each LWP. */
1211 try
1212 {
1213 linux_proc_attach_tgid_threads (lp->ptid.pid (),
1214 attach_proc_task_lwp_callback);
1215 }
1216 catch (const gdb_exception_error &)
1217 {
1218 /* Failed to attach to some LWP. Detach any we've already
1219 attached to. */
1220 iterate_over_lwps (ptid_t (ptid.pid ()),
1221 [] (struct lwp_info *lwp) -> int
1222 {
1223 /* Ignore errors when detaching. */
1224 ptrace (PTRACE_DETACH, lwp->ptid.lwp (), 0, 0);
1225 delete_lwp (lwp->ptid);
1226 return 0;
1227 });
1228
1229 target_terminal::ours ();
1230 target_mourn_inferior (inferior_ptid);
1231
1232 throw;
1233 }
1234
1235 /* Add all the LWPs to gdb's thread list. */
1236 iterate_over_lwps (ptid_t (ptid.pid ()),
1237 [] (struct lwp_info *lwp) -> int
1238 {
1239 if (lwp->ptid.pid () != lwp->ptid.lwp ())
1240 {
1241 add_thread (linux_target, lwp->ptid);
1242 set_running (linux_target, lwp->ptid, true);
1243 set_executing (linux_target, lwp->ptid, true);
1244 }
1245 return 0;
1246 });
1247 }
1248
1249 /* Ptrace-detach the thread with pid PID. */
1250
1251 static void
1252 detach_one_pid (int pid, int signo)
1253 {
1254 if (ptrace (PTRACE_DETACH, pid, 0, signo) < 0)
1255 {
1256 int save_errno = errno;
1257
1258 /* We know the thread exists, so ESRCH must mean the lwp is
1259 zombie. This can happen if one of the already-detached
1260 threads exits the whole thread group. In that case we're
1261 still attached, and must reap the lwp. */
1262 if (save_errno == ESRCH)
1263 {
1264 int ret, status;
1265
1266 ret = my_waitpid (pid, &status, __WALL);
1267 if (ret == -1)
1268 {
1269 warning (_("Couldn't reap LWP %d while detaching: %s"),
1270 pid, safe_strerror (errno));
1271 }
1272 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1273 {
1274 warning (_("Reaping LWP %d while detaching "
1275 "returned unexpected status 0x%x"),
1276 pid, status);
1277 }
1278 }
1279 else
1280 error (_("Can't detach %d: %s"),
1281 pid, safe_strerror (save_errno));
1282 }
1283 else
1284 linux_nat_debug_printf ("PTRACE_DETACH (%d, %s, 0) (OK)",
1285 pid, strsignal (signo));
1286 }
1287
1288 /* Get pending signal of THREAD as a host signal number, for detaching
1289 purposes. This is the signal the thread last stopped for, which we
1290 need to deliver to the thread when detaching, otherwise, it'd be
1291 suppressed/lost. */
1292
1293 static int
1294 get_detach_signal (struct lwp_info *lp)
1295 {
1296 enum gdb_signal signo = GDB_SIGNAL_0;
1297
1298 /* If we paused threads momentarily, we may have stored pending
1299 events in lp->status or lp->waitstatus (see stop_wait_callback),
1300 and GDB core hasn't seen any signal for those threads.
1301 Otherwise, the last signal reported to the core is found in the
1302 thread object's stop_signal.
1303
1304 There's a corner case that isn't handled here at present. Only
1305 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1306 stop_signal make sense as a real signal to pass to the inferior.
1307 Some catchpoint related events, like
1308 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1309 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1310 those traps are debug API (ptrace in our case) related and
1311 induced; the inferior wouldn't see them if it wasn't being
1312 traced. Hence, we should never pass them to the inferior, even
1313 when set to pass state. Since this corner case isn't handled by
1314 infrun.c when proceeding with a signal, for consistency, neither
1315 do we handle it here (or elsewhere in the file we check for
1316 signal pass state). Normally SIGTRAP isn't set to pass state, so
1317 this is really a corner case. */
1318
1319 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
1320 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1321 else if (lp->status)
1322 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1323 else
1324 {
1325 thread_info *tp = linux_target->find_thread (lp->ptid);
1326
1327 if (target_is_non_stop_p () && !tp->executing ())
1328 {
1329 if (tp->has_pending_waitstatus ())
1330 {
1331 /* If the thread has a pending event, and it was stopped with a
1332 signal, use that signal to resume it. If it has a pending
1333 event of another kind, it was not stopped with a signal, so
1334 resume it without a signal. */
1335 if (tp->pending_waitstatus ().kind () == TARGET_WAITKIND_STOPPED)
1336 signo = tp->pending_waitstatus ().sig ();
1337 else
1338 signo = GDB_SIGNAL_0;
1339 }
1340 else
1341 signo = tp->stop_signal ();
1342 }
1343 else if (!target_is_non_stop_p ())
1344 {
1345 ptid_t last_ptid;
1346 process_stratum_target *last_target;
1347
1348 get_last_target_status (&last_target, &last_ptid, nullptr);
1349
1350 if (last_target == linux_target
1351 && lp->ptid.lwp () == last_ptid.lwp ())
1352 signo = tp->stop_signal ();
1353 }
1354 }
1355
1356 if (signo == GDB_SIGNAL_0)
1357 {
1358 linux_nat_debug_printf ("lwp %s has no pending signal",
1359 lp->ptid.to_string ().c_str ());
1360 }
1361 else if (!signal_pass_state (signo))
1362 {
1363 linux_nat_debug_printf
1364 ("lwp %s had signal %s but it is in no pass state",
1365 lp->ptid.to_string ().c_str (), gdb_signal_to_string (signo));
1366 }
1367 else
1368 {
1369 linux_nat_debug_printf ("lwp %s has pending signal %s",
1370 lp->ptid.to_string ().c_str (),
1371 gdb_signal_to_string (signo));
1372
1373 return gdb_signal_to_host (signo);
1374 }
1375
1376 return 0;
1377 }
1378
1379 /* If LP has a pending fork/vfork/clone status, return it. */
1380
1381 static std::optional<target_waitstatus>
1382 get_pending_child_status (lwp_info *lp)
1383 {
1384 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
1385
1386 linux_nat_debug_printf ("lwp %s (stopped = %d)",
1387 lp->ptid.to_string ().c_str (), lp->stopped);
1388
1389 /* Check in lwp_info::status. */
1390 if (WIFSTOPPED (lp->status) && linux_is_extended_waitstatus (lp->status))
1391 {
1392 int event = linux_ptrace_get_extended_event (lp->status);
1393
1394 if (event == PTRACE_EVENT_FORK
1395 || event == PTRACE_EVENT_VFORK
1396 || event == PTRACE_EVENT_CLONE)
1397 {
1398 unsigned long child_pid;
1399 int ret = ptrace (PTRACE_GETEVENTMSG, lp->ptid.lwp (), 0, &child_pid);
1400 if (ret == 0)
1401 {
1402 target_waitstatus ws;
1403
1404 if (event == PTRACE_EVENT_FORK)
1405 ws.set_forked (ptid_t (child_pid, child_pid));
1406 else if (event == PTRACE_EVENT_VFORK)
1407 ws.set_vforked (ptid_t (child_pid, child_pid));
1408 else if (event == PTRACE_EVENT_CLONE)
1409 ws.set_thread_cloned (ptid_t (lp->ptid.pid (), child_pid));
1410 else
1411 gdb_assert_not_reached ("unhandled");
1412
1413 return ws;
1414 }
1415 else
1416 {
1417 perror_warning_with_name (_("Failed to retrieve event msg"));
1418 return {};
1419 }
1420 }
1421 }
1422
1423 /* Check in lwp_info::waitstatus. */
1424 if (is_new_child_status (lp->waitstatus.kind ()))
1425 return lp->waitstatus;
1426
1427 thread_info *tp = linux_target->find_thread (lp->ptid);
1428
1429 /* Check in thread_info::pending_waitstatus. */
1430 if (tp->has_pending_waitstatus ()
1431 && is_new_child_status (tp->pending_waitstatus ().kind ()))
1432 return tp->pending_waitstatus ();
1433
1434 /* Check in thread_info::pending_follow. */
1435 if (is_new_child_status (tp->pending_follow.kind ()))
1436 return tp->pending_follow;
1437
1438 return {};
1439 }
1440
1441 /* Detach from LP. If SIGNO_P is non-NULL, then it points to the
1442 signal number that should be passed to the LWP when detaching.
1443 Otherwise pass any pending signal the LWP may have, if any. */
1444
1445 static void
1446 detach_one_lwp (struct lwp_info *lp, int *signo_p)
1447 {
1448 int lwpid = lp->ptid.lwp ();
1449 int signo;
1450
1451 /* If the lwp/thread we are about to detach has a pending fork/clone
1452 event, there is a process/thread GDB is attached to that the core
1453 of GDB doesn't know about. Detach from it. */
1454
1455 std::optional<target_waitstatus> ws = get_pending_child_status (lp);
1456 if (ws.has_value ())
1457 detach_one_pid (ws->child_ptid ().lwp (), 0);
1458
1459 /* If there is a pending SIGSTOP, get rid of it. */
1460 if (lp->signalled)
1461 {
1462 linux_nat_debug_printf ("Sending SIGCONT to %s",
1463 lp->ptid.to_string ().c_str ());
1464
1465 kill_lwp (lwpid, SIGCONT);
1466 lp->signalled = 0;
1467 }
1468
1469 /* If the lwp has exited or was terminated due to a signal, there's
1470 nothing left to do. */
1471 if (lp->waitstatus.kind () == TARGET_WAITKIND_EXITED
1472 || lp->waitstatus.kind () == TARGET_WAITKIND_THREAD_EXITED
1473 || lp->waitstatus.kind () == TARGET_WAITKIND_SIGNALLED)
1474 {
1475 linux_nat_debug_printf
1476 ("Can't detach %s - it has exited or was terminated: %s.",
1477 lp->ptid.to_string ().c_str (),
1478 lp->waitstatus.to_string ().c_str ());
1479 delete_lwp (lp->ptid);
1480 return;
1481 }
1482
1483 if (signo_p == NULL)
1484 {
1485 /* Pass on any pending signal for this LWP. */
1486 signo = get_detach_signal (lp);
1487 }
1488 else
1489 signo = *signo_p;
1490
1491 linux_nat_debug_printf ("preparing to resume lwp %s (stopped = %d)",
1492 lp->ptid.to_string ().c_str (),
1493 lp->stopped);
1494
1495 /* Preparing to resume may try to write registers, and fail if the
1496 lwp is zombie. If that happens, ignore the error. We'll handle
1497 it below, when detach fails with ESRCH. */
1498 try
1499 {
1500 linux_target->low_prepare_to_resume (lp);
1501 }
1502 catch (const gdb_exception_error &ex)
1503 {
1504 if (!check_ptrace_stopped_lwp_gone (lp))
1505 throw;
1506 }
1507
1508 detach_one_pid (lwpid, signo);
1509
1510 delete_lwp (lp->ptid);
1511 }
1512
1513 static int
1514 detach_callback (struct lwp_info *lp)
1515 {
1516 /* We don't actually detach from the thread group leader just yet.
1517 If the thread group exits, we must reap the zombie clone lwps
1518 before we're able to reap the leader. */
1519 if (lp->ptid.lwp () != lp->ptid.pid ())
1520 detach_one_lwp (lp, NULL);
1521 return 0;
1522 }
1523
1524 void
1525 linux_nat_target::detach (inferior *inf, int from_tty)
1526 {
1527 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
1528
1529 struct lwp_info *main_lwp;
1530 int pid = inf->pid;
1531
1532 /* Don't unregister from the event loop, as there may be other
1533 inferiors running. */
1534
1535 /* Stop all threads before detaching. ptrace requires that the
1536 thread is stopped to successfully detach. */
1537 iterate_over_lwps (ptid_t (pid), stop_callback);
1538 /* ... and wait until all of them have reported back that
1539 they're no longer running. */
1540 iterate_over_lwps (ptid_t (pid), stop_wait_callback);
1541
1542 /* We can now safely remove breakpoints. We don't this in earlier
1543 in common code because this target doesn't currently support
1544 writing memory while the inferior is running. */
1545 remove_breakpoints_inf (current_inferior ());
1546
1547 iterate_over_lwps (ptid_t (pid), detach_callback);
1548
1549 /* We have detached from everything except the main thread now, so
1550 should only have one thread left. However, in non-stop mode the
1551 main thread might have exited, in which case we'll have no threads
1552 left. */
1553 gdb_assert (num_lwps (pid) == 1
1554 || (target_is_non_stop_p () && num_lwps (pid) == 0));
1555
1556 if (pid == inferior_ptid.pid () && forks_exist_p ())
1557 {
1558 /* Multi-fork case. The current inferior_ptid is being detached
1559 from, but there are other viable forks to debug. Detach from
1560 the current fork, and context-switch to the first
1561 available. */
1562 linux_fork_detach (from_tty, find_lwp_pid (ptid_t (pid)));
1563 }
1564 else
1565 {
1566 target_announce_detach (from_tty);
1567
1568 /* In non-stop mode it is possible that the main thread has exited,
1569 in which case we don't try to detach. */
1570 main_lwp = find_lwp_pid (ptid_t (pid));
1571 if (main_lwp != nullptr)
1572 {
1573 /* Pass on any pending signal for the last LWP. */
1574 int signo = get_detach_signal (main_lwp);
1575
1576 detach_one_lwp (main_lwp, &signo);
1577 }
1578 else
1579 gdb_assert (target_is_non_stop_p ());
1580
1581 detach_success (inf);
1582 }
1583
1584 close_proc_mem_file (pid);
1585 }
1586
1587 /* Resume execution of the inferior process. If STEP is nonzero,
1588 single-step it. If SIGNAL is nonzero, give it that signal. */
1589
1590 static void
1591 linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1592 enum gdb_signal signo)
1593 {
1594 lp->step = step;
1595
1596 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1597 We only presently need that if the LWP is stepped though (to
1598 handle the case of stepping a breakpoint instruction). */
1599 if (step)
1600 {
1601 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
1602
1603 lp->stop_pc = regcache_read_pc (regcache);
1604 }
1605 else
1606 lp->stop_pc = 0;
1607
1608 linux_target->low_prepare_to_resume (lp);
1609 linux_target->low_resume (lp->ptid, step, signo);
1610
1611 /* Successfully resumed. Clear state that no longer makes sense,
1612 and mark the LWP as running. Must not do this before resuming
1613 otherwise if that fails other code will be confused. E.g., we'd
1614 later try to stop the LWP and hang forever waiting for a stop
1615 status. Note that we must not throw after this is cleared,
1616 otherwise handle_zombie_lwp_error would get confused. */
1617 lp->stopped = 0;
1618 lp->core = -1;
1619 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1620 registers_changed_ptid (linux_target, lp->ptid);
1621 }
1622
1623 /* Called when we try to resume a stopped LWP and that errors out. If
1624 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1625 or about to become), discard the error, clear any pending status
1626 the LWP may have, and return true (we'll collect the exit status
1627 soon enough). Otherwise, return false. */
1628
1629 static int
1630 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1631 {
1632 /* If we get an error after resuming the LWP successfully, we'd
1633 confuse !T state for the LWP being gone. */
1634 gdb_assert (lp->stopped);
1635
1636 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1637 because even if ptrace failed with ESRCH, the tracee may be "not
1638 yet fully dead", but already refusing ptrace requests. In that
1639 case the tracee has 'R (Running)' state for a little bit
1640 (observed in Linux 3.18). See also the note on ESRCH in the
1641 ptrace(2) man page. Instead, check whether the LWP has any state
1642 other than ptrace-stopped. */
1643
1644 /* Don't assume anything if /proc/PID/status can't be read. */
1645 if (linux_proc_pid_is_trace_stopped_nowarn (lp->ptid.lwp ()) == 0)
1646 {
1647 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1648 lp->status = 0;
1649 lp->waitstatus.set_ignore ();
1650 return 1;
1651 }
1652 return 0;
1653 }
1654
1655 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1656 disappears while we try to resume it. */
1657
1658 static void
1659 linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1660 {
1661 try
1662 {
1663 linux_resume_one_lwp_throw (lp, step, signo);
1664 }
1665 catch (const gdb_exception_error &ex)
1666 {
1667 if (!check_ptrace_stopped_lwp_gone (lp))
1668 throw;
1669 }
1670 }
1671
1672 /* Resume LP. */
1673
1674 static void
1675 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1676 {
1677 if (lp->stopped)
1678 {
1679 struct inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
1680
1681 if (inf->vfork_child != NULL)
1682 {
1683 linux_nat_debug_printf ("Not resuming sibling %s (vfork parent)",
1684 lp->ptid.to_string ().c_str ());
1685 }
1686 else if (!lwp_status_pending_p (lp))
1687 {
1688 linux_nat_debug_printf ("Resuming sibling %s, %s, %s",
1689 lp->ptid.to_string ().c_str (),
1690 (signo != GDB_SIGNAL_0
1691 ? strsignal (gdb_signal_to_host (signo))
1692 : "0"),
1693 step ? "step" : "resume");
1694
1695 linux_resume_one_lwp (lp, step, signo);
1696 }
1697 else
1698 {
1699 linux_nat_debug_printf ("Not resuming sibling %s (has pending)",
1700 lp->ptid.to_string ().c_str ());
1701 }
1702 }
1703 else
1704 linux_nat_debug_printf ("Not resuming sibling %s (not stopped)",
1705 lp->ptid.to_string ().c_str ());
1706 }
1707
1708 /* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1709 Resume LWP with the last stop signal, if it is in pass state. */
1710
1711 static int
1712 linux_nat_resume_callback (struct lwp_info *lp, struct lwp_info *except)
1713 {
1714 enum gdb_signal signo = GDB_SIGNAL_0;
1715
1716 if (lp == except)
1717 return 0;
1718
1719 if (lp->stopped)
1720 {
1721 struct thread_info *thread;
1722
1723 thread = linux_target->find_thread (lp->ptid);
1724 if (thread != NULL)
1725 {
1726 signo = thread->stop_signal ();
1727 thread->set_stop_signal (GDB_SIGNAL_0);
1728 }
1729 }
1730
1731 resume_lwp (lp, 0, signo);
1732 return 0;
1733 }
1734
1735 static int
1736 resume_clear_callback (struct lwp_info *lp)
1737 {
1738 lp->resumed = 0;
1739 lp->last_resume_kind = resume_stop;
1740 return 0;
1741 }
1742
1743 static int
1744 resume_set_callback (struct lwp_info *lp)
1745 {
1746 lp->resumed = 1;
1747 lp->last_resume_kind = resume_continue;
1748 return 0;
1749 }
1750
1751 void
1752 linux_nat_target::resume (ptid_t scope_ptid, int step, enum gdb_signal signo)
1753 {
1754 struct lwp_info *lp;
1755
1756 linux_nat_debug_printf ("Preparing to %s %s, %s, inferior_ptid %s",
1757 step ? "step" : "resume",
1758 scope_ptid.to_string ().c_str (),
1759 (signo != GDB_SIGNAL_0
1760 ? strsignal (gdb_signal_to_host (signo)) : "0"),
1761 inferior_ptid.to_string ().c_str ());
1762
1763 /* Mark the lwps we're resuming as resumed and update their
1764 last_resume_kind to resume_continue. */
1765 iterate_over_lwps (scope_ptid, resume_set_callback);
1766
1767 lp = find_lwp_pid (inferior_ptid);
1768 gdb_assert (lp != NULL);
1769
1770 /* Remember if we're stepping. */
1771 lp->last_resume_kind = step ? resume_step : resume_continue;
1772
1773 /* If we have a pending wait status for this thread, there is no
1774 point in resuming the process. But first make sure that
1775 linux_nat_wait won't preemptively handle the event - we
1776 should never take this short-circuit if we are going to
1777 leave LP running, since we have skipped resuming all the
1778 other threads. This bit of code needs to be synchronized
1779 with linux_nat_wait. */
1780
1781 if (lp->status && WIFSTOPPED (lp->status))
1782 {
1783 if (!lp->step
1784 && WSTOPSIG (lp->status)
1785 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1786 {
1787 linux_nat_debug_printf
1788 ("Not short circuiting for ignored status 0x%x", lp->status);
1789
1790 /* FIXME: What should we do if we are supposed to continue
1791 this thread with a signal? */
1792 gdb_assert (signo == GDB_SIGNAL_0);
1793 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1794 lp->status = 0;
1795 }
1796 }
1797
1798 if (lwp_status_pending_p (lp))
1799 {
1800 /* FIXME: What should we do if we are supposed to continue
1801 this thread with a signal? */
1802 gdb_assert (signo == GDB_SIGNAL_0);
1803
1804 linux_nat_debug_printf ("Short circuiting for status %s",
1805 pending_status_str (lp).c_str ());
1806
1807 if (target_can_async_p ())
1808 {
1809 target_async (true);
1810 /* Tell the event loop we have something to process. */
1811 async_file_mark ();
1812 }
1813 return;
1814 }
1815
1816 /* No use iterating unless we're resuming other threads. */
1817 if (scope_ptid != lp->ptid)
1818 iterate_over_lwps (scope_ptid, [=] (struct lwp_info *info)
1819 {
1820 return linux_nat_resume_callback (info, lp);
1821 });
1822
1823 linux_nat_debug_printf ("%s %s, %s (resume event thread)",
1824 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1825 lp->ptid.to_string ().c_str (),
1826 (signo != GDB_SIGNAL_0
1827 ? strsignal (gdb_signal_to_host (signo)) : "0"));
1828
1829 linux_resume_one_lwp (lp, step, signo);
1830 }
1831
1832 /* Send a signal to an LWP. */
1833
1834 static int
1835 kill_lwp (int lwpid, int signo)
1836 {
1837 int ret;
1838
1839 errno = 0;
1840 ret = syscall (__NR_tkill, lwpid, signo);
1841 if (errno == ENOSYS)
1842 {
1843 /* If tkill fails, then we are not using nptl threads, a
1844 configuration we no longer support. */
1845 perror_with_name (("tkill"));
1846 }
1847 return ret;
1848 }
1849
1850 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1851 event, check if the core is interested in it: if not, ignore the
1852 event, and keep waiting; otherwise, we need to toggle the LWP's
1853 syscall entry/exit status, since the ptrace event itself doesn't
1854 indicate it, and report the trap to higher layers. */
1855
1856 static int
1857 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1858 {
1859 struct target_waitstatus *ourstatus = &lp->waitstatus;
1860 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1861 thread_info *thread = linux_target->find_thread (lp->ptid);
1862 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, thread);
1863
1864 if (stopping)
1865 {
1866 /* If we're stopping threads, there's a SIGSTOP pending, which
1867 makes it so that the LWP reports an immediate syscall return,
1868 followed by the SIGSTOP. Skip seeing that "return" using
1869 PTRACE_CONT directly, and let stop_wait_callback collect the
1870 SIGSTOP. Later when the thread is resumed, a new syscall
1871 entry event. If we didn't do this (and returned 0), we'd
1872 leave a syscall entry pending, and our caller, by using
1873 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1874 itself. Later, when the user re-resumes this LWP, we'd see
1875 another syscall entry event and we'd mistake it for a return.
1876
1877 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1878 (leaving immediately with LWP->signalled set, without issuing
1879 a PTRACE_CONT), it would still be problematic to leave this
1880 syscall enter pending, as later when the thread is resumed,
1881 it would then see the same syscall exit mentioned above,
1882 followed by the delayed SIGSTOP, while the syscall didn't
1883 actually get to execute. It seems it would be even more
1884 confusing to the user. */
1885
1886 linux_nat_debug_printf
1887 ("ignoring syscall %d for LWP %ld (stopping threads), resuming with "
1888 "PTRACE_CONT for SIGSTOP", syscall_number, lp->ptid.lwp ());
1889
1890 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1891 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
1892 lp->stopped = 0;
1893 return 1;
1894 }
1895
1896 /* Always update the entry/return state, even if this particular
1897 syscall isn't interesting to the core now. In async mode,
1898 the user could install a new catchpoint for this syscall
1899 between syscall enter/return, and we'll need to know to
1900 report a syscall return if that happens. */
1901 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1902 ? TARGET_WAITKIND_SYSCALL_RETURN
1903 : TARGET_WAITKIND_SYSCALL_ENTRY);
1904
1905 if (catch_syscall_enabled ())
1906 {
1907 if (catching_syscall_number (syscall_number))
1908 {
1909 /* Alright, an event to report. */
1910 if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
1911 ourstatus->set_syscall_entry (syscall_number);
1912 else if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
1913 ourstatus->set_syscall_return (syscall_number);
1914 else
1915 gdb_assert_not_reached ("unexpected syscall state");
1916
1917 linux_nat_debug_printf
1918 ("stopping for %s of syscall %d for LWP %ld",
1919 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1920 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1921
1922 return 0;
1923 }
1924
1925 linux_nat_debug_printf
1926 ("ignoring %s of syscall %d for LWP %ld",
1927 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1928 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1929 }
1930 else
1931 {
1932 /* If we had been syscall tracing, and hence used PT_SYSCALL
1933 before on this LWP, it could happen that the user removes all
1934 syscall catchpoints before we get to process this event.
1935 There are two noteworthy issues here:
1936
1937 - When stopped at a syscall entry event, resuming with
1938 PT_STEP still resumes executing the syscall and reports a
1939 syscall return.
1940
1941 - Only PT_SYSCALL catches syscall enters. If we last
1942 single-stepped this thread, then this event can't be a
1943 syscall enter. If we last single-stepped this thread, this
1944 has to be a syscall exit.
1945
1946 The points above mean that the next resume, be it PT_STEP or
1947 PT_CONTINUE, can not trigger a syscall trace event. */
1948 linux_nat_debug_printf
1949 ("caught syscall event with no syscall catchpoints. %d for LWP %ld, "
1950 "ignoring", syscall_number, lp->ptid.lwp ());
1951 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1952 }
1953
1954 /* The core isn't interested in this event. For efficiency, avoid
1955 stopping all threads only to have the core resume them all again.
1956 Since we're not stopping threads, if we're still syscall tracing
1957 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1958 subsequent syscall. Simply resume using the inf-ptrace layer,
1959 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1960
1961 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
1962 return 1;
1963 }
1964
1965 /* See target.h. */
1966
1967 void
1968 linux_nat_target::follow_clone (ptid_t child_ptid)
1969 {
1970 lwp_info *new_lp = add_lwp (child_ptid);
1971 new_lp->stopped = 1;
1972
1973 /* If the thread_db layer is active, let it record the user
1974 level thread id and status, and add the thread to GDB's
1975 list. */
1976 if (!thread_db_notice_clone (inferior_ptid, new_lp->ptid))
1977 {
1978 /* The process is not using thread_db. Add the LWP to
1979 GDB's list. */
1980 add_thread (linux_target, new_lp->ptid);
1981 }
1982
1983 /* We just created NEW_LP so it cannot yet contain STATUS. */
1984 gdb_assert (new_lp->status == 0);
1985
1986 if (!pull_pid_from_list (&stopped_pids, child_ptid.lwp (), &new_lp->status))
1987 internal_error (_("no saved status for clone lwp"));
1988
1989 if (WSTOPSIG (new_lp->status) != SIGSTOP)
1990 {
1991 /* This can happen if someone starts sending signals to
1992 the new thread before it gets a chance to run, which
1993 have a lower number than SIGSTOP (e.g. SIGUSR1).
1994 This is an unlikely case, and harder to handle for
1995 fork / vfork than for clone, so we do not try - but
1996 we handle it for clone events here. */
1997
1998 new_lp->signalled = 1;
1999
2000 /* Save the wait status to report later. */
2001 linux_nat_debug_printf
2002 ("waitpid of new LWP %ld, saving status %s",
2003 (long) new_lp->ptid.lwp (), status_to_str (new_lp->status).c_str ());
2004 }
2005 else
2006 {
2007 new_lp->status = 0;
2008
2009 if (report_thread_events)
2010 new_lp->waitstatus.set_thread_created ();
2011 }
2012 }
2013
2014 /* Handle a GNU/Linux extended wait response. If we see a clone
2015 event, we need to add the new LWP to our list (and not report the
2016 trap to higher layers). This function returns non-zero if the
2017 event should be ignored and we should wait again. If STOPPING is
2018 true, the new LWP remains stopped, otherwise it is continued. */
2019
2020 static int
2021 linux_handle_extended_wait (struct lwp_info *lp, int status)
2022 {
2023 int pid = lp->ptid.lwp ();
2024 struct target_waitstatus *ourstatus = &lp->waitstatus;
2025 int event = linux_ptrace_get_extended_event (status);
2026
2027 /* All extended events we currently use are mid-syscall. Only
2028 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
2029 you have to be using PTRACE_SEIZE to get that. */
2030 lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
2031
2032 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2033 || event == PTRACE_EVENT_CLONE)
2034 {
2035 unsigned long new_pid;
2036 int ret;
2037
2038 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
2039
2040 /* If we haven't already seen the new PID stop, wait for it now. */
2041 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2042 {
2043 /* The new child has a pending SIGSTOP. We can't affect it until it
2044 hits the SIGSTOP, but we're already attached. */
2045 ret = my_waitpid (new_pid, &status, __WALL);
2046 if (ret == -1)
2047 perror_with_name (_("waiting for new child"));
2048 else if (ret != new_pid)
2049 internal_error (_("wait returned unexpected PID %d"), ret);
2050 else if (!WIFSTOPPED (status))
2051 internal_error (_("wait returned unexpected status 0x%x"), status);
2052 }
2053
2054 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
2055 {
2056 open_proc_mem_file (ptid_t (new_pid, new_pid));
2057
2058 /* The arch-specific native code may need to know about new
2059 forks even if those end up never mapped to an
2060 inferior. */
2061 linux_target->low_new_fork (lp, new_pid);
2062 }
2063 else if (event == PTRACE_EVENT_CLONE)
2064 {
2065 linux_target->low_new_clone (lp, new_pid);
2066 }
2067
2068 if (event == PTRACE_EVENT_FORK
2069 && linux_fork_checkpointing_p (lp->ptid.pid ()))
2070 {
2071 /* Handle checkpointing by linux-fork.c here as a special
2072 case. We don't want the follow-fork-mode or 'catch fork'
2073 to interfere with this. */
2074
2075 /* This won't actually modify the breakpoint list, but will
2076 physically remove the breakpoints from the child. */
2077 detach_breakpoints (ptid_t (new_pid, new_pid));
2078
2079 /* Retain child fork in ptrace (stopped) state. */
2080 if (!find_fork_pid (new_pid))
2081 add_fork (new_pid);
2082
2083 /* Report as spurious, so that infrun doesn't want to follow
2084 this fork. We're actually doing an infcall in
2085 linux-fork.c. */
2086 ourstatus->set_spurious ();
2087
2088 /* Report the stop to the core. */
2089 return 0;
2090 }
2091
2092 if (event == PTRACE_EVENT_FORK)
2093 ourstatus->set_forked (ptid_t (new_pid, new_pid));
2094 else if (event == PTRACE_EVENT_VFORK)
2095 ourstatus->set_vforked (ptid_t (new_pid, new_pid));
2096 else if (event == PTRACE_EVENT_CLONE)
2097 {
2098 linux_nat_debug_printf
2099 ("Got clone event from LWP %d, new child is LWP %ld", pid, new_pid);
2100
2101 /* Save the status again, we'll use it in follow_clone. */
2102 add_to_pid_list (&stopped_pids, new_pid, status);
2103
2104 ourstatus->set_thread_cloned (ptid_t (lp->ptid.pid (), new_pid));
2105 }
2106
2107 return 0;
2108 }
2109
2110 if (event == PTRACE_EVENT_EXEC)
2111 {
2112 linux_nat_debug_printf ("Got exec event from LWP %ld", lp->ptid.lwp ());
2113
2114 /* Close the previous /proc/PID/mem file for this inferior,
2115 which was using the address space which is now gone.
2116 Reading/writing from this file would return 0/EOF. */
2117 close_proc_mem_file (lp->ptid.pid ());
2118
2119 /* Open a new file for the new address space. */
2120 open_proc_mem_file (lp->ptid);
2121
2122 ourstatus->set_execd
2123 (make_unique_xstrdup (linux_proc_pid_to_exec_file (pid)));
2124
2125 /* The thread that execed must have been resumed, but, when a
2126 thread execs, it changes its tid to the tgid, and the old
2127 tgid thread might have not been resumed. */
2128 lp->resumed = 1;
2129
2130 /* All other LWPs are gone now. We'll have received a thread
2131 exit notification for all threads other the execing one.
2132 That one, if it wasn't the leader, just silently changes its
2133 tid to the tgid, and the previous leader vanishes. Since
2134 Linux 3.0, the former thread ID can be retrieved with
2135 PTRACE_GETEVENTMSG, but since we support older kernels, don't
2136 bother with it, and just walk the LWP list. Even with
2137 PTRACE_GETEVENTMSG, we'd still need to lookup the
2138 corresponding LWP object, and it would be an extra ptrace
2139 syscall, so this way may even be more efficient. */
2140 for (lwp_info *other_lp : all_lwps_safe ())
2141 if (other_lp != lp && other_lp->ptid.pid () == lp->ptid.pid ())
2142 exit_lwp (other_lp);
2143
2144 return 0;
2145 }
2146
2147 if (event == PTRACE_EVENT_VFORK_DONE)
2148 {
2149 linux_nat_debug_printf
2150 ("Got PTRACE_EVENT_VFORK_DONE from LWP %ld",
2151 lp->ptid.lwp ());
2152 ourstatus->set_vfork_done ();
2153 return 0;
2154 }
2155
2156 internal_error (_("unknown ptrace event %d"), event);
2157 }
2158
2159 /* Suspend waiting for a signal. We're mostly interested in
2160 SIGCHLD/SIGINT. */
2161
2162 static void
2163 wait_for_signal ()
2164 {
2165 linux_nat_debug_printf ("about to sigsuspend");
2166 sigsuspend (&suspend_mask);
2167
2168 /* If the quit flag is set, it means that the user pressed Ctrl-C
2169 and we're debugging a process that is running on a separate
2170 terminal, so we must forward the Ctrl-C to the inferior. (If the
2171 inferior is sharing GDB's terminal, then the Ctrl-C reaches the
2172 inferior directly.) We must do this here because functions that
2173 need to block waiting for a signal loop forever until there's an
2174 event to report before returning back to the event loop. */
2175 if (!target_terminal::is_ours ())
2176 {
2177 if (check_quit_flag ())
2178 target_pass_ctrlc ();
2179 }
2180 }
2181
2182 /* Mark LWP dead, with STATUS as exit status pending to report
2183 later. */
2184
2185 static void
2186 mark_lwp_dead (lwp_info *lp, int status)
2187 {
2188 /* Store the exit status lp->waitstatus, because lp->status would be
2189 ambiguous (W_EXITCODE(0,0) == 0). */
2190 lp->waitstatus = host_status_to_waitstatus (status);
2191
2192 /* If we're processing LP's status, there should be no other event
2193 already recorded as pending. */
2194 gdb_assert (lp->status == 0);
2195
2196 /* Dead LWPs aren't expected to report a pending sigstop. */
2197 lp->signalled = 0;
2198
2199 /* Prevent trying to stop it. */
2200 lp->stopped = 1;
2201 }
2202
2203 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2204 exited. */
2205
2206 static int
2207 wait_lwp (struct lwp_info *lp)
2208 {
2209 pid_t pid;
2210 int status = 0;
2211 int thread_dead = 0;
2212 sigset_t prev_mask;
2213
2214 gdb_assert (!lp->stopped);
2215 gdb_assert (lp->status == 0);
2216
2217 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2218 block_child_signals (&prev_mask);
2219
2220 for (;;)
2221 {
2222 pid = my_waitpid (lp->ptid.lwp (), &status, __WALL | WNOHANG);
2223 if (pid == -1 && errno == ECHILD)
2224 {
2225 /* The thread has previously exited. We need to delete it
2226 now because if this was a non-leader thread execing, we
2227 won't get an exit event. See comments on exec events at
2228 the top of the file. */
2229 thread_dead = 1;
2230 linux_nat_debug_printf ("%s vanished.",
2231 lp->ptid.to_string ().c_str ());
2232 }
2233 if (pid != 0)
2234 break;
2235
2236 /* Bugs 10970, 12702.
2237 Thread group leader may have exited in which case we'll lock up in
2238 waitpid if there are other threads, even if they are all zombies too.
2239 Basically, we're not supposed to use waitpid this way.
2240 tkill(pid,0) cannot be used here as it gets ESRCH for both
2241 for zombie and running processes.
2242
2243 As a workaround, check if we're waiting for the thread group leader and
2244 if it's a zombie, and avoid calling waitpid if it is.
2245
2246 This is racy, what if the tgl becomes a zombie right after we check?
2247 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2248 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2249
2250 if (lp->ptid.pid () == lp->ptid.lwp ()
2251 && linux_proc_pid_is_zombie (lp->ptid.lwp ()))
2252 {
2253 thread_dead = 1;
2254 linux_nat_debug_printf ("Thread group leader %s vanished.",
2255 lp->ptid.to_string ().c_str ());
2256 break;
2257 }
2258
2259 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2260 get invoked despite our caller had them intentionally blocked by
2261 block_child_signals. This is sensitive only to the loop of
2262 linux_nat_wait_1 and there if we get called my_waitpid gets called
2263 again before it gets to sigsuspend so we can safely let the handlers
2264 get executed here. */
2265 wait_for_signal ();
2266 }
2267
2268 restore_child_signals_mask (&prev_mask);
2269
2270 if (!thread_dead)
2271 {
2272 gdb_assert (pid == lp->ptid.lwp ());
2273
2274 linux_nat_debug_printf ("waitpid %s received %s",
2275 lp->ptid.to_string ().c_str (),
2276 status_to_str (status).c_str ());
2277
2278 /* Check if the thread has exited. */
2279 if (WIFEXITED (status) || WIFSIGNALED (status))
2280 {
2281 if (report_exit_events_for (lp) || is_leader (lp))
2282 {
2283 linux_nat_debug_printf ("LWP %d exited.", lp->ptid.pid ());
2284
2285 /* If this is the leader exiting, it means the whole
2286 process is gone. Store the status to report to the
2287 core. */
2288 mark_lwp_dead (lp, status);
2289 return 0;
2290 }
2291
2292 thread_dead = 1;
2293 linux_nat_debug_printf ("%s exited.",
2294 lp->ptid.to_string ().c_str ());
2295 }
2296 }
2297
2298 if (thread_dead)
2299 {
2300 exit_lwp (lp);
2301 return 0;
2302 }
2303
2304 gdb_assert (WIFSTOPPED (status));
2305 lp->stopped = 1;
2306
2307 if (lp->must_set_ptrace_flags)
2308 {
2309 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
2310 int options = linux_nat_ptrace_options (inf->attach_flag);
2311
2312 linux_enable_event_reporting (lp->ptid.lwp (), options);
2313 lp->must_set_ptrace_flags = 0;
2314 }
2315
2316 /* Handle GNU/Linux's syscall SIGTRAPs. */
2317 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2318 {
2319 /* No longer need the sysgood bit. The ptrace event ends up
2320 recorded in lp->waitstatus if we care for it. We can carry
2321 on handling the event like a regular SIGTRAP from here
2322 on. */
2323 status = W_STOPCODE (SIGTRAP);
2324 if (linux_handle_syscall_trap (lp, 1))
2325 return wait_lwp (lp);
2326 }
2327 else
2328 {
2329 /* Almost all other ptrace-stops are known to be outside of system
2330 calls, with further exceptions in linux_handle_extended_wait. */
2331 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2332 }
2333
2334 /* Handle GNU/Linux's extended waitstatus for trace events. */
2335 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2336 && linux_is_extended_waitstatus (status))
2337 {
2338 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
2339 linux_handle_extended_wait (lp, status);
2340 return 0;
2341 }
2342
2343 return status;
2344 }
2345
2346 /* Send a SIGSTOP to LP. */
2347
2348 static int
2349 stop_callback (struct lwp_info *lp)
2350 {
2351 if (!lp->stopped && !lp->signalled)
2352 {
2353 int ret;
2354
2355 linux_nat_debug_printf ("kill %s **<SIGSTOP>**",
2356 lp->ptid.to_string ().c_str ());
2357
2358 errno = 0;
2359 ret = kill_lwp (lp->ptid.lwp (), SIGSTOP);
2360 linux_nat_debug_printf ("lwp kill %d %s", ret,
2361 errno ? safe_strerror (errno) : "ERRNO-OK");
2362
2363 lp->signalled = 1;
2364 gdb_assert (lp->status == 0);
2365 }
2366
2367 return 0;
2368 }
2369
2370 /* Request a stop on LWP. */
2371
2372 void
2373 linux_stop_lwp (struct lwp_info *lwp)
2374 {
2375 stop_callback (lwp);
2376 }
2377
2378 /* See linux-nat.h */
2379
2380 void
2381 linux_stop_and_wait_all_lwps (void)
2382 {
2383 /* Stop all LWP's ... */
2384 iterate_over_lwps (minus_one_ptid, stop_callback);
2385
2386 /* ... and wait until all of them have reported back that
2387 they're no longer running. */
2388 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
2389 }
2390
2391 /* See linux-nat.h */
2392
2393 void
2394 linux_unstop_all_lwps (void)
2395 {
2396 iterate_over_lwps (minus_one_ptid,
2397 [] (struct lwp_info *info)
2398 {
2399 return resume_stopped_resumed_lwps (info, minus_one_ptid);
2400 });
2401 }
2402
2403 /* Return non-zero if LWP PID has a pending SIGINT. */
2404
2405 static int
2406 linux_nat_has_pending_sigint (int pid)
2407 {
2408 sigset_t pending, blocked, ignored;
2409
2410 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2411
2412 if (sigismember (&pending, SIGINT)
2413 && !sigismember (&ignored, SIGINT))
2414 return 1;
2415
2416 return 0;
2417 }
2418
2419 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2420
2421 static int
2422 set_ignore_sigint (struct lwp_info *lp)
2423 {
2424 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2425 flag to consume the next one. */
2426 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2427 && WSTOPSIG (lp->status) == SIGINT)
2428 lp->status = 0;
2429 else
2430 lp->ignore_sigint = 1;
2431
2432 return 0;
2433 }
2434
2435 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2436 This function is called after we know the LWP has stopped; if the LWP
2437 stopped before the expected SIGINT was delivered, then it will never have
2438 arrived. Also, if the signal was delivered to a shared queue and consumed
2439 by a different thread, it will never be delivered to this LWP. */
2440
2441 static void
2442 maybe_clear_ignore_sigint (struct lwp_info *lp)
2443 {
2444 if (!lp->ignore_sigint)
2445 return;
2446
2447 if (!linux_nat_has_pending_sigint (lp->ptid.lwp ()))
2448 {
2449 linux_nat_debug_printf ("Clearing bogus flag for %s",
2450 lp->ptid.to_string ().c_str ());
2451 lp->ignore_sigint = 0;
2452 }
2453 }
2454
2455 /* Fetch the possible triggered data watchpoint info and store it in
2456 LP.
2457
2458 On some archs, like x86, that use debug registers to set
2459 watchpoints, it's possible that the way to know which watched
2460 address trapped, is to check the register that is used to select
2461 which address to watch. Problem is, between setting the watchpoint
2462 and reading back which data address trapped, the user may change
2463 the set of watchpoints, and, as a consequence, GDB changes the
2464 debug registers in the inferior. To avoid reading back a stale
2465 stopped-data-address when that happens, we cache in LP the fact
2466 that a watchpoint trapped, and the corresponding data address, as
2467 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2468 registers meanwhile, we have the cached data we can rely on. */
2469
2470 static int
2471 check_stopped_by_watchpoint (struct lwp_info *lp)
2472 {
2473 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
2474 inferior_ptid = lp->ptid;
2475
2476 if (linux_target->low_stopped_by_watchpoint ())
2477 {
2478 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2479 lp->stopped_data_address_p
2480 = linux_target->low_stopped_data_address (&lp->stopped_data_address);
2481 }
2482
2483 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2484 }
2485
2486 /* Returns true if the LWP had stopped for a watchpoint. */
2487
2488 bool
2489 linux_nat_target::stopped_by_watchpoint ()
2490 {
2491 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2492
2493 gdb_assert (lp != NULL);
2494
2495 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2496 }
2497
2498 bool
2499 linux_nat_target::stopped_data_address (CORE_ADDR *addr_p)
2500 {
2501 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2502
2503 gdb_assert (lp != NULL);
2504
2505 *addr_p = lp->stopped_data_address;
2506
2507 return lp->stopped_data_address_p;
2508 }
2509
2510 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2511
2512 bool
2513 linux_nat_target::low_status_is_event (int status)
2514 {
2515 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2516 }
2517
2518 /* Wait until LP is stopped. */
2519
2520 static int
2521 stop_wait_callback (struct lwp_info *lp)
2522 {
2523 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
2524
2525 /* If this is a vfork parent, bail out, it is not going to report
2526 any SIGSTOP until the vfork is done with. */
2527 if (inf->vfork_child != NULL)
2528 return 0;
2529
2530 if (!lp->stopped)
2531 {
2532 int status;
2533
2534 status = wait_lwp (lp);
2535 if (status == 0)
2536 return 0;
2537
2538 if (lp->ignore_sigint && WIFSTOPPED (status)
2539 && WSTOPSIG (status) == SIGINT)
2540 {
2541 lp->ignore_sigint = 0;
2542
2543 errno = 0;
2544 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
2545 lp->stopped = 0;
2546 linux_nat_debug_printf
2547 ("PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)",
2548 lp->ptid.to_string ().c_str (),
2549 errno ? safe_strerror (errno) : "OK");
2550
2551 return stop_wait_callback (lp);
2552 }
2553
2554 maybe_clear_ignore_sigint (lp);
2555
2556 if (WSTOPSIG (status) != SIGSTOP)
2557 {
2558 /* The thread was stopped with a signal other than SIGSTOP. */
2559
2560 linux_nat_debug_printf ("Pending event %s in %s",
2561 status_to_str ((int) status).c_str (),
2562 lp->ptid.to_string ().c_str ());
2563
2564 /* Save the sigtrap event. */
2565 lp->status = status;
2566 gdb_assert (lp->signalled);
2567 save_stop_reason (lp);
2568 }
2569 else
2570 {
2571 /* We caught the SIGSTOP that we intended to catch. */
2572
2573 linux_nat_debug_printf ("Expected SIGSTOP caught for %s.",
2574 lp->ptid.to_string ().c_str ());
2575
2576 lp->signalled = 0;
2577
2578 /* If we are waiting for this stop so we can report the thread
2579 stopped then we need to record this status. Otherwise, we can
2580 now discard this stop event. */
2581 if (lp->last_resume_kind == resume_stop)
2582 {
2583 lp->status = status;
2584 save_stop_reason (lp);
2585 }
2586 }
2587 }
2588
2589 return 0;
2590 }
2591
2592 /* Get the inferior associated to LWP. Must be called with an LWP that has
2593 an associated inferior. Always return non-nullptr. */
2594
2595 static inferior *
2596 lwp_inferior (const lwp_info *lwp)
2597 {
2598 inferior *inf = find_inferior_ptid (linux_target, lwp->ptid);
2599 gdb_assert (inf != nullptr);
2600 return inf;
2601 }
2602
2603 /* Return non-zero if LP has a wait status pending. Discard the
2604 pending event and resume the LWP if the event that originally
2605 caused the stop became uninteresting. */
2606
2607 static int
2608 status_callback (struct lwp_info *lp)
2609 {
2610 /* Only report a pending wait status if we pretend that this has
2611 indeed been resumed. */
2612 if (!lp->resumed)
2613 return 0;
2614
2615 if (!lwp_status_pending_p (lp))
2616 return 0;
2617
2618 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2619 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2620 {
2621 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
2622 CORE_ADDR pc;
2623 int discard = 0;
2624
2625 pc = regcache_read_pc (regcache);
2626
2627 if (pc != lp->stop_pc)
2628 {
2629 linux_nat_debug_printf ("PC of %s changed. was=%s, now=%s",
2630 lp->ptid.to_string ().c_str (),
2631 paddress (current_inferior ()->arch (),
2632 lp->stop_pc),
2633 paddress (current_inferior ()->arch (), pc));
2634 discard = 1;
2635 }
2636
2637 #if !USE_SIGTRAP_SIGINFO
2638 else if (!breakpoint_inserted_here_p (lwp_inferior (lp)->aspace, pc))
2639 {
2640 linux_nat_debug_printf ("previous breakpoint of %s, at %s gone",
2641 lp->ptid.to_string ().c_str (),
2642 paddress (current_inferior ()->arch (),
2643 lp->stop_pc));
2644
2645 discard = 1;
2646 }
2647 #endif
2648
2649 if (discard)
2650 {
2651 linux_nat_debug_printf ("pending event of %s cancelled.",
2652 lp->ptid.to_string ().c_str ());
2653
2654 lp->status = 0;
2655 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2656 return 0;
2657 }
2658 }
2659
2660 return 1;
2661 }
2662
2663 /* Count the LWP's that have had events. */
2664
2665 static int
2666 count_events_callback (struct lwp_info *lp, int *count)
2667 {
2668 gdb_assert (count != NULL);
2669
2670 /* Select only resumed LWPs that have an event pending. */
2671 if (lp->resumed && lwp_status_pending_p (lp))
2672 (*count)++;
2673
2674 return 0;
2675 }
2676
2677 /* Select the LWP (if any) that is currently being single-stepped. */
2678
2679 static int
2680 select_singlestep_lwp_callback (struct lwp_info *lp)
2681 {
2682 if (lp->last_resume_kind == resume_step
2683 && lp->status != 0)
2684 return 1;
2685 else
2686 return 0;
2687 }
2688
2689 /* Returns true if LP has a status pending. */
2690
2691 static int
2692 lwp_status_pending_p (struct lwp_info *lp)
2693 {
2694 /* We check for lp->waitstatus in addition to lp->status, because we
2695 can have pending process exits recorded in lp->status and
2696 W_EXITCODE(0,0) happens to be 0. */
2697 return lp->status != 0 || lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE;
2698 }
2699
2700 /* Select the Nth LWP that has had an event. */
2701
2702 static int
2703 select_event_lwp_callback (struct lwp_info *lp, int *selector)
2704 {
2705 gdb_assert (selector != NULL);
2706
2707 /* Select only resumed LWPs that have an event pending. */
2708 if (lp->resumed && lwp_status_pending_p (lp))
2709 if ((*selector)-- == 0)
2710 return 1;
2711
2712 return 0;
2713 }
2714
2715 /* Called when the LWP stopped for a signal/trap. If it stopped for a
2716 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2717 and save the result in the LWP's stop_reason field. If it stopped
2718 for a breakpoint, decrement the PC if necessary on the lwp's
2719 architecture. */
2720
2721 static void
2722 save_stop_reason (struct lwp_info *lp)
2723 {
2724 struct regcache *regcache;
2725 struct gdbarch *gdbarch;
2726 CORE_ADDR pc;
2727 CORE_ADDR sw_bp_pc;
2728 #if USE_SIGTRAP_SIGINFO
2729 siginfo_t siginfo;
2730 #endif
2731
2732 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
2733 gdb_assert (lp->status != 0);
2734
2735 if (!linux_target->low_status_is_event (lp->status))
2736 return;
2737
2738 inferior *inf = lwp_inferior (lp);
2739 if (inf->starting_up)
2740 return;
2741
2742 regcache = get_thread_regcache (linux_target, lp->ptid);
2743 gdbarch = regcache->arch ();
2744
2745 pc = regcache_read_pc (regcache);
2746 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
2747
2748 #if USE_SIGTRAP_SIGINFO
2749 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2750 {
2751 if (siginfo.si_signo == SIGTRAP)
2752 {
2753 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
2754 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
2755 {
2756 /* The si_code is ambiguous on this arch -- check debug
2757 registers. */
2758 if (!check_stopped_by_watchpoint (lp))
2759 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2760 }
2761 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
2762 {
2763 /* If we determine the LWP stopped for a SW breakpoint,
2764 trust it. Particularly don't check watchpoint
2765 registers, because, at least on s390, we'd find
2766 stopped-by-watchpoint as long as there's a watchpoint
2767 set. */
2768 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2769 }
2770 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
2771 {
2772 /* This can indicate either a hardware breakpoint or
2773 hardware watchpoint. Check debug registers. */
2774 if (!check_stopped_by_watchpoint (lp))
2775 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2776 }
2777 else if (siginfo.si_code == TRAP_TRACE)
2778 {
2779 linux_nat_debug_printf ("%s stopped by trace",
2780 lp->ptid.to_string ().c_str ());
2781
2782 /* We may have single stepped an instruction that
2783 triggered a watchpoint. In that case, on some
2784 architectures (such as x86), instead of TRAP_HWBKPT,
2785 si_code indicates TRAP_TRACE, and we need to check
2786 the debug registers separately. */
2787 check_stopped_by_watchpoint (lp);
2788 }
2789 }
2790 }
2791 #else
2792 if ((!lp->step || lp->stop_pc == sw_bp_pc)
2793 && software_breakpoint_inserted_here_p (inf->aspace, sw_bp_pc))
2794 {
2795 /* The LWP was either continued, or stepped a software
2796 breakpoint instruction. */
2797 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2798 }
2799
2800 if (hardware_breakpoint_inserted_here_p (inf->aspace, pc))
2801 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2802
2803 if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
2804 check_stopped_by_watchpoint (lp);
2805 #endif
2806
2807 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2808 {
2809 linux_nat_debug_printf ("%s stopped by software breakpoint",
2810 lp->ptid.to_string ().c_str ());
2811
2812 /* Back up the PC if necessary. */
2813 if (pc != sw_bp_pc)
2814 regcache_write_pc (regcache, sw_bp_pc);
2815
2816 /* Update this so we record the correct stop PC below. */
2817 pc = sw_bp_pc;
2818 }
2819 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2820 {
2821 linux_nat_debug_printf ("%s stopped by hardware breakpoint",
2822 lp->ptid.to_string ().c_str ());
2823 }
2824 else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2825 {
2826 linux_nat_debug_printf ("%s stopped by hardware watchpoint",
2827 lp->ptid.to_string ().c_str ());
2828 }
2829
2830 lp->stop_pc = pc;
2831 }
2832
2833
2834 /* Returns true if the LWP had stopped for a software breakpoint. */
2835
2836 bool
2837 linux_nat_target::stopped_by_sw_breakpoint ()
2838 {
2839 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2840
2841 gdb_assert (lp != NULL);
2842
2843 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2844 }
2845
2846 /* Implement the supports_stopped_by_sw_breakpoint method. */
2847
2848 bool
2849 linux_nat_target::supports_stopped_by_sw_breakpoint ()
2850 {
2851 return USE_SIGTRAP_SIGINFO;
2852 }
2853
2854 /* Returns true if the LWP had stopped for a hardware
2855 breakpoint/watchpoint. */
2856
2857 bool
2858 linux_nat_target::stopped_by_hw_breakpoint ()
2859 {
2860 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2861
2862 gdb_assert (lp != NULL);
2863
2864 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2865 }
2866
2867 /* Implement the supports_stopped_by_hw_breakpoint method. */
2868
2869 bool
2870 linux_nat_target::supports_stopped_by_hw_breakpoint ()
2871 {
2872 return USE_SIGTRAP_SIGINFO;
2873 }
2874
2875 /* Select one LWP out of those that have events pending. */
2876
2877 static void
2878 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
2879 {
2880 int num_events = 0;
2881 int random_selector;
2882 struct lwp_info *event_lp = NULL;
2883
2884 /* Record the wait status for the original LWP. */
2885 (*orig_lp)->status = *status;
2886
2887 /* In all-stop, give preference to the LWP that is being
2888 single-stepped. There will be at most one, and it will be the
2889 LWP that the core is most interested in. If we didn't do this,
2890 then we'd have to handle pending step SIGTRAPs somehow in case
2891 the core later continues the previously-stepped thread, as
2892 otherwise we'd report the pending SIGTRAP then, and the core, not
2893 having stepped the thread, wouldn't understand what the trap was
2894 for, and therefore would report it to the user as a random
2895 signal. */
2896 if (!target_is_non_stop_p ())
2897 {
2898 event_lp = iterate_over_lwps (filter, select_singlestep_lwp_callback);
2899 if (event_lp != NULL)
2900 {
2901 linux_nat_debug_printf ("Select single-step %s",
2902 event_lp->ptid.to_string ().c_str ());
2903 }
2904 }
2905
2906 if (event_lp == NULL)
2907 {
2908 /* Pick one at random, out of those which have had events. */
2909
2910 /* First see how many events we have. */
2911 iterate_over_lwps (filter,
2912 [&] (struct lwp_info *info)
2913 {
2914 return count_events_callback (info, &num_events);
2915 });
2916 gdb_assert (num_events > 0);
2917
2918 /* Now randomly pick a LWP out of those that have had
2919 events. */
2920 random_selector = (int)
2921 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2922
2923 if (num_events > 1)
2924 linux_nat_debug_printf ("Found %d events, selecting #%d",
2925 num_events, random_selector);
2926
2927 event_lp
2928 = (iterate_over_lwps
2929 (filter,
2930 [&] (struct lwp_info *info)
2931 {
2932 return select_event_lwp_callback (info,
2933 &random_selector);
2934 }));
2935 }
2936
2937 if (event_lp != NULL)
2938 {
2939 /* Switch the event LWP. */
2940 *orig_lp = event_lp;
2941 *status = event_lp->status;
2942 }
2943
2944 /* Flush the wait status for the event LWP. */
2945 (*orig_lp)->status = 0;
2946 }
2947
2948 /* Return non-zero if LP has been resumed. */
2949
2950 static int
2951 resumed_callback (struct lwp_info *lp)
2952 {
2953 return lp->resumed;
2954 }
2955
2956 /* Check if we should go on and pass this event to common code.
2957
2958 If so, save the status to the lwp_info structure associated to LWPID. */
2959
2960 static void
2961 linux_nat_filter_event (int lwpid, int status)
2962 {
2963 struct lwp_info *lp;
2964 int event = linux_ptrace_get_extended_event (status);
2965
2966 lp = find_lwp_pid (ptid_t (lwpid));
2967
2968 /* Check for events reported by anything not in our LWP list. */
2969 if (lp == nullptr)
2970 {
2971 if (WIFSTOPPED (status))
2972 {
2973 if (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC)
2974 {
2975 /* A non-leader thread exec'ed after we've seen the
2976 leader zombie, and removed it from our lists (in
2977 check_zombie_leaders). The non-leader thread changes
2978 its tid to the tgid. */
2979 linux_nat_debug_printf
2980 ("Re-adding thread group leader LWP %d after exec.",
2981 lwpid);
2982
2983 lp = add_lwp (ptid_t (lwpid, lwpid));
2984 lp->stopped = 1;
2985 lp->resumed = 1;
2986 add_thread (linux_target, lp->ptid);
2987 }
2988 else
2989 {
2990 /* A process we are controlling has forked and the new
2991 child's stop was reported to us by the kernel. Save
2992 its PID and go back to waiting for the fork event to
2993 be reported - the stopped process might be returned
2994 from waitpid before or after the fork event is. */
2995 linux_nat_debug_printf
2996 ("Saving LWP %d status %s in stopped_pids list",
2997 lwpid, status_to_str (status).c_str ());
2998 add_to_pid_list (&stopped_pids, lwpid, status);
2999 }
3000 }
3001 else
3002 {
3003 /* Don't report an event for the exit of an LWP not in our
3004 list, i.e. not part of any inferior we're debugging.
3005 This can happen if we detach from a program we originally
3006 forked and then it exits. However, note that we may have
3007 earlier deleted a leader of an inferior we're debugging,
3008 in check_zombie_leaders. Re-add it back here if so. */
3009 for (inferior *inf : all_inferiors (linux_target))
3010 {
3011 if (inf->pid == lwpid)
3012 {
3013 linux_nat_debug_printf
3014 ("Re-adding thread group leader LWP %d after exit.",
3015 lwpid);
3016
3017 lp = add_lwp (ptid_t (lwpid, lwpid));
3018 lp->resumed = 1;
3019 add_thread (linux_target, lp->ptid);
3020 break;
3021 }
3022 }
3023 }
3024
3025 if (lp == nullptr)
3026 return;
3027 }
3028
3029 /* This LWP is stopped now. (And if dead, this prevents it from
3030 ever being continued.) */
3031 lp->stopped = 1;
3032
3033 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
3034 {
3035 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
3036 int options = linux_nat_ptrace_options (inf->attach_flag);
3037
3038 linux_enable_event_reporting (lp->ptid.lwp (), options);
3039 lp->must_set_ptrace_flags = 0;
3040 }
3041
3042 /* Handle GNU/Linux's syscall SIGTRAPs. */
3043 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3044 {
3045 /* No longer need the sysgood bit. The ptrace event ends up
3046 recorded in lp->waitstatus if we care for it. We can carry
3047 on handling the event like a regular SIGTRAP from here
3048 on. */
3049 status = W_STOPCODE (SIGTRAP);
3050 if (linux_handle_syscall_trap (lp, 0))
3051 return;
3052 }
3053 else
3054 {
3055 /* Almost all other ptrace-stops are known to be outside of system
3056 calls, with further exceptions in linux_handle_extended_wait. */
3057 lp->syscall_state = TARGET_WAITKIND_IGNORE;
3058 }
3059
3060 /* Handle GNU/Linux's extended waitstatus for trace events. */
3061 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
3062 && linux_is_extended_waitstatus (status))
3063 {
3064 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
3065
3066 if (linux_handle_extended_wait (lp, status))
3067 return;
3068 }
3069
3070 /* Check if the thread has exited. */
3071 if (WIFEXITED (status) || WIFSIGNALED (status))
3072 {
3073 if (!report_exit_events_for (lp) && !is_leader (lp))
3074 {
3075 linux_nat_debug_printf ("%s exited.",
3076 lp->ptid.to_string ().c_str ());
3077
3078 /* If this was not the leader exiting, then the exit signal
3079 was not the end of the debugged application and should be
3080 ignored. */
3081 exit_lwp (lp);
3082 return;
3083 }
3084
3085 /* Note that even if the leader was ptrace-stopped, it can still
3086 exit, if e.g., some other thread brings down the whole
3087 process (calls `exit'). So don't assert that the lwp is
3088 resumed. */
3089 linux_nat_debug_printf ("LWP %ld exited (resumed=%d)",
3090 lp->ptid.lwp (), lp->resumed);
3091
3092 mark_lwp_dead (lp, status);
3093 return;
3094 }
3095
3096 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3097 an attempt to stop an LWP. */
3098 if (lp->signalled
3099 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3100 {
3101 lp->signalled = 0;
3102
3103 if (lp->last_resume_kind == resume_stop)
3104 {
3105 linux_nat_debug_printf ("resume_stop SIGSTOP caught for %s.",
3106 lp->ptid.to_string ().c_str ());
3107 }
3108 else
3109 {
3110 /* This is a delayed SIGSTOP. Filter out the event. */
3111
3112 linux_nat_debug_printf
3113 ("%s %s, 0, 0 (discard delayed SIGSTOP)",
3114 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3115 lp->ptid.to_string ().c_str ());
3116
3117 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3118 gdb_assert (lp->resumed);
3119 return;
3120 }
3121 }
3122
3123 /* Make sure we don't report a SIGINT that we have already displayed
3124 for another thread. */
3125 if (lp->ignore_sigint
3126 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3127 {
3128 linux_nat_debug_printf ("Delayed SIGINT caught for %s.",
3129 lp->ptid.to_string ().c_str ());
3130
3131 /* This is a delayed SIGINT. */
3132 lp->ignore_sigint = 0;
3133
3134 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3135 linux_nat_debug_printf ("%s %s, 0, 0 (discard SIGINT)",
3136 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3137 lp->ptid.to_string ().c_str ());
3138 gdb_assert (lp->resumed);
3139
3140 /* Discard the event. */
3141 return;
3142 }
3143
3144 /* Don't report signals that GDB isn't interested in, such as
3145 signals that are neither printed nor stopped upon. Stopping all
3146 threads can be a bit time-consuming, so if we want decent
3147 performance with heavily multi-threaded programs, especially when
3148 they're using a high frequency timer, we'd better avoid it if we
3149 can. */
3150 if (WIFSTOPPED (status))
3151 {
3152 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3153
3154 if (!target_is_non_stop_p ())
3155 {
3156 /* Only do the below in all-stop, as we currently use SIGSTOP
3157 to implement target_stop (see linux_nat_stop) in
3158 non-stop. */
3159 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3160 {
3161 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3162 forwarded to the entire process group, that is, all LWPs
3163 will receive it - unless they're using CLONE_THREAD to
3164 share signals. Since we only want to report it once, we
3165 mark it as ignored for all LWPs except this one. */
3166 iterate_over_lwps (ptid_t (lp->ptid.pid ()), set_ignore_sigint);
3167 lp->ignore_sigint = 0;
3168 }
3169 else
3170 maybe_clear_ignore_sigint (lp);
3171 }
3172
3173 /* When using hardware single-step, we need to report every signal.
3174 Otherwise, signals in pass_mask may be short-circuited
3175 except signals that might be caused by a breakpoint, or SIGSTOP
3176 if we sent the SIGSTOP and are waiting for it to arrive. */
3177 if (!lp->step
3178 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
3179 && (WSTOPSIG (status) != SIGSTOP
3180 || !linux_target->find_thread (lp->ptid)->stop_requested)
3181 && !linux_wstatus_maybe_breakpoint (status))
3182 {
3183 linux_resume_one_lwp (lp, lp->step, signo);
3184 linux_nat_debug_printf
3185 ("%s %s, %s (preempt 'handle')",
3186 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3187 lp->ptid.to_string ().c_str (),
3188 (signo != GDB_SIGNAL_0
3189 ? strsignal (gdb_signal_to_host (signo)) : "0"));
3190 return;
3191 }
3192 }
3193
3194 /* An interesting event. */
3195 gdb_assert (lp);
3196 lp->status = status;
3197 save_stop_reason (lp);
3198 }
3199
3200 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3201 their exits until all other threads in the group have exited. */
3202
3203 static void
3204 check_zombie_leaders (void)
3205 {
3206 for (inferior *inf : all_inferiors ())
3207 {
3208 struct lwp_info *leader_lp;
3209
3210 if (inf->pid == 0)
3211 continue;
3212
3213 leader_lp = find_lwp_pid (ptid_t (inf->pid));
3214 if (leader_lp != NULL
3215 /* Check if there are other threads in the group, as we may
3216 have raced with the inferior simply exiting. Note this
3217 isn't a watertight check. If the inferior is
3218 multi-threaded and is exiting, it may be we see the
3219 leader as zombie before we reap all the non-leader
3220 threads. See comments below. */
3221 && num_lwps (inf->pid) > 1
3222 && linux_proc_pid_is_zombie (inf->pid))
3223 {
3224 /* A zombie leader in a multi-threaded program can mean one
3225 of three things:
3226
3227 #1 - Only the leader exited, not the whole program, e.g.,
3228 with pthread_exit. Since we can't reap the leader's exit
3229 status until all other threads are gone and reaped too,
3230 we want to delete the zombie leader right away, as it
3231 can't be debugged, we can't read its registers, etc.
3232 This is the main reason we check for zombie leaders
3233 disappearing.
3234
3235 #2 - The whole thread-group/process exited (a group exit,
3236 via e.g. exit(3), and there is (or will be shortly) an
3237 exit reported for each thread in the process, and then
3238 finally an exit for the leader once the non-leaders are
3239 reaped.
3240
3241 #3 - There are 3 or more threads in the group, and a
3242 thread other than the leader exec'd. See comments on
3243 exec events at the top of the file.
3244
3245 Ideally we would never delete the leader for case #2.
3246 Instead, we want to collect the exit status of each
3247 non-leader thread, and then finally collect the exit
3248 status of the leader as normal and use its exit code as
3249 whole-process exit code. Unfortunately, there's no
3250 race-free way to distinguish cases #1 and #2. We can't
3251 assume the exit events for the non-leaders threads are
3252 already pending in the kernel, nor can we assume the
3253 non-leader threads are in zombie state already. Between
3254 the leader becoming zombie and the non-leaders exiting
3255 and becoming zombie themselves, there's a small time
3256 window, so such a check would be racy. Temporarily
3257 pausing all threads and checking to see if all threads
3258 exit or not before re-resuming them would work in the
3259 case that all threads are running right now, but it
3260 wouldn't work if some thread is currently already
3261 ptrace-stopped, e.g., due to scheduler-locking.
3262
3263 So what we do is we delete the leader anyhow, and then
3264 later on when we see its exit status, we re-add it back.
3265 We also make sure that we only report a whole-process
3266 exit when we see the leader exiting, as opposed to when
3267 the last LWP in the LWP list exits, which can be a
3268 non-leader if we deleted the leader here. */
3269 linux_nat_debug_printf ("Thread group leader %d zombie "
3270 "(it exited, or another thread execd), "
3271 "deleting it.",
3272 inf->pid);
3273 exit_lwp (leader_lp);
3274 }
3275 }
3276 }
3277
3278 /* Convenience function that is called when we're about to return an
3279 event to the core. If the event is an exit or signalled event,
3280 then this decides whether to report it as process-wide event, as a
3281 thread exit event, or to suppress it. All other event kinds are
3282 passed through unmodified. */
3283
3284 static ptid_t
3285 filter_exit_event (struct lwp_info *event_child,
3286 struct target_waitstatus *ourstatus)
3287 {
3288 ptid_t ptid = event_child->ptid;
3289
3290 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
3291 if a non-leader thread exits with a signal, we'd report it to the
3292 core which would interpret it as the whole-process exiting.
3293 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
3294 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
3295 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
3296 return ptid;
3297
3298 if (!is_leader (event_child))
3299 {
3300 if (report_exit_events_for (event_child))
3301 {
3302 ourstatus->set_thread_exited (0);
3303 /* Delete lwp, but not thread_info, infrun will need it to
3304 process the event. */
3305 exit_lwp (event_child, false);
3306 }
3307 else
3308 {
3309 ourstatus->set_ignore ();
3310 exit_lwp (event_child);
3311 }
3312 }
3313
3314 return ptid;
3315 }
3316
3317 static ptid_t
3318 linux_nat_wait_1 (ptid_t ptid, struct target_waitstatus *ourstatus,
3319 target_wait_flags target_options)
3320 {
3321 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3322
3323 sigset_t prev_mask;
3324 enum resume_kind last_resume_kind;
3325 struct lwp_info *lp;
3326 int status;
3327
3328 /* The first time we get here after starting a new inferior, we may
3329 not have added it to the LWP list yet - this is the earliest
3330 moment at which we know its PID. */
3331 if (ptid.is_pid () && find_lwp_pid (ptid) == nullptr)
3332 {
3333 ptid_t lwp_ptid (ptid.pid (), ptid.pid ());
3334
3335 /* Upgrade the main thread's ptid. */
3336 thread_change_ptid (linux_target, ptid, lwp_ptid);
3337 lp = add_initial_lwp (lwp_ptid);
3338 lp->resumed = 1;
3339 }
3340
3341 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
3342 block_child_signals (&prev_mask);
3343
3344 /* First check if there is a LWP with a wait status pending. */
3345 lp = iterate_over_lwps (ptid, status_callback);
3346 if (lp != NULL)
3347 {
3348 linux_nat_debug_printf ("Using pending wait status %s for %s.",
3349 pending_status_str (lp).c_str (),
3350 lp->ptid.to_string ().c_str ());
3351 }
3352
3353 /* But if we don't find a pending event, we'll have to wait. Always
3354 pull all events out of the kernel. We'll randomly select an
3355 event LWP out of all that have events, to prevent starvation. */
3356
3357 while (lp == NULL)
3358 {
3359 pid_t lwpid;
3360
3361 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3362 quirks:
3363
3364 - If the thread group leader exits while other threads in the
3365 thread group still exist, waitpid(TGID, ...) hangs. That
3366 waitpid won't return an exit status until the other threads
3367 in the group are reaped.
3368
3369 - When a non-leader thread execs, that thread just vanishes
3370 without reporting an exit (so we'd hang if we waited for it
3371 explicitly in that case). The exec event is reported to
3372 the TGID pid. */
3373
3374 errno = 0;
3375 lwpid = my_waitpid (-1, &status, __WALL | WNOHANG);
3376
3377 linux_nat_debug_printf ("waitpid(-1, ...) returned %d, %s",
3378 lwpid,
3379 errno ? safe_strerror (errno) : "ERRNO-OK");
3380
3381 if (lwpid > 0)
3382 {
3383 linux_nat_debug_printf ("waitpid %ld received %s",
3384 (long) lwpid,
3385 status_to_str (status).c_str ());
3386
3387 linux_nat_filter_event (lwpid, status);
3388 /* Retry until nothing comes out of waitpid. A single
3389 SIGCHLD can indicate more than one child stopped. */
3390 continue;
3391 }
3392
3393 /* Now that we've pulled all events out of the kernel, resume
3394 LWPs that don't have an interesting event to report. */
3395 iterate_over_lwps (minus_one_ptid,
3396 [] (struct lwp_info *info)
3397 {
3398 return resume_stopped_resumed_lwps (info, minus_one_ptid);
3399 });
3400
3401 /* ... and find an LWP with a status to report to the core, if
3402 any. */
3403 lp = iterate_over_lwps (ptid, status_callback);
3404 if (lp != NULL)
3405 break;
3406
3407 /* Check for zombie thread group leaders. Those can't be reaped
3408 until all other threads in the thread group are. */
3409 check_zombie_leaders ();
3410
3411 /* If there are no resumed children left, bail. We'd be stuck
3412 forever in the sigsuspend call below otherwise. */
3413 if (iterate_over_lwps (ptid, resumed_callback) == NULL)
3414 {
3415 linux_nat_debug_printf ("exit (no resumed LWP)");
3416
3417 ourstatus->set_no_resumed ();
3418
3419 restore_child_signals_mask (&prev_mask);
3420 return minus_one_ptid;
3421 }
3422
3423 /* No interesting event to report to the core. */
3424
3425 if (target_options & TARGET_WNOHANG)
3426 {
3427 linux_nat_debug_printf ("no interesting events found");
3428
3429 ourstatus->set_ignore ();
3430 restore_child_signals_mask (&prev_mask);
3431 return minus_one_ptid;
3432 }
3433
3434 /* We shouldn't end up here unless we want to try again. */
3435 gdb_assert (lp == NULL);
3436
3437 /* Block until we get an event reported with SIGCHLD. */
3438 wait_for_signal ();
3439 }
3440
3441 gdb_assert (lp);
3442 gdb_assert (lp->stopped);
3443
3444 status = lp->status;
3445 lp->status = 0;
3446
3447 if (!target_is_non_stop_p ())
3448 {
3449 /* Now stop all other LWP's ... */
3450 iterate_over_lwps (minus_one_ptid, stop_callback);
3451
3452 /* ... and wait until all of them have reported back that
3453 they're no longer running. */
3454 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
3455 }
3456
3457 /* If we're not waiting for a specific LWP, choose an event LWP from
3458 among those that have had events. Giving equal priority to all
3459 LWPs that have had events helps prevent starvation. */
3460 if (ptid == minus_one_ptid || ptid.is_pid ())
3461 select_event_lwp (ptid, &lp, &status);
3462
3463 gdb_assert (lp != NULL);
3464
3465 /* Now that we've selected our final event LWP, un-adjust its PC if
3466 it was a software breakpoint, and we can't reliably support the
3467 "stopped by software breakpoint" stop reason. */
3468 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3469 && !USE_SIGTRAP_SIGINFO)
3470 {
3471 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
3472 struct gdbarch *gdbarch = regcache->arch ();
3473 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3474
3475 if (decr_pc != 0)
3476 {
3477 CORE_ADDR pc;
3478
3479 pc = regcache_read_pc (regcache);
3480 regcache_write_pc (regcache, pc + decr_pc);
3481 }
3482 }
3483
3484 /* We'll need this to determine whether to report a SIGSTOP as
3485 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3486 clears it. */
3487 last_resume_kind = lp->last_resume_kind;
3488
3489 if (!target_is_non_stop_p ())
3490 {
3491 /* In all-stop, from the core's perspective, all LWPs are now
3492 stopped until a new resume action is sent over. */
3493 iterate_over_lwps (minus_one_ptid, resume_clear_callback);
3494 }
3495 else
3496 {
3497 resume_clear_callback (lp);
3498 }
3499
3500 if (linux_target->low_status_is_event (status))
3501 {
3502 linux_nat_debug_printf ("trap ptid is %s.",
3503 lp->ptid.to_string ().c_str ());
3504 }
3505
3506 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3507 {
3508 *ourstatus = lp->waitstatus;
3509 lp->waitstatus.set_ignore ();
3510 }
3511 else
3512 *ourstatus = host_status_to_waitstatus (status);
3513
3514 linux_nat_debug_printf ("event found");
3515
3516 restore_child_signals_mask (&prev_mask);
3517
3518 if (last_resume_kind == resume_stop
3519 && ourstatus->kind () == TARGET_WAITKIND_STOPPED
3520 && WSTOPSIG (status) == SIGSTOP)
3521 {
3522 /* A thread that has been requested to stop by GDB with
3523 target_stop, and it stopped cleanly, so report as SIG0. The
3524 use of SIGSTOP is an implementation detail. */
3525 ourstatus->set_stopped (GDB_SIGNAL_0);
3526 }
3527
3528 if (ourstatus->kind () == TARGET_WAITKIND_EXITED
3529 || ourstatus->kind () == TARGET_WAITKIND_SIGNALLED)
3530 lp->core = -1;
3531 else
3532 lp->core = linux_common_core_of_thread (lp->ptid);
3533
3534 return filter_exit_event (lp, ourstatus);
3535 }
3536
3537 /* Resume LWPs that are currently stopped without any pending status
3538 to report, but are resumed from the core's perspective. */
3539
3540 static int
3541 resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid)
3542 {
3543 inferior *inf = lwp_inferior (lp);
3544
3545 if (!lp->stopped)
3546 {
3547 linux_nat_debug_printf ("NOT resuming LWP %s, not stopped",
3548 lp->ptid.to_string ().c_str ());
3549 }
3550 else if (!lp->resumed)
3551 {
3552 linux_nat_debug_printf ("NOT resuming LWP %s, not resumed",
3553 lp->ptid.to_string ().c_str ());
3554 }
3555 else if (lwp_status_pending_p (lp))
3556 {
3557 linux_nat_debug_printf ("NOT resuming LWP %s, has pending status",
3558 lp->ptid.to_string ().c_str ());
3559 }
3560 else if (inf->vfork_child != nullptr)
3561 {
3562 linux_nat_debug_printf ("NOT resuming LWP %s (vfork parent)",
3563 lp->ptid.to_string ().c_str ());
3564 }
3565 else
3566 {
3567 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
3568 struct gdbarch *gdbarch = regcache->arch ();
3569
3570 try
3571 {
3572 CORE_ADDR pc = regcache_read_pc (regcache);
3573 int leave_stopped = 0;
3574
3575 /* Don't bother if there's a breakpoint at PC that we'd hit
3576 immediately, and we're not waiting for this LWP. */
3577 if (!lp->ptid.matches (wait_ptid))
3578 {
3579 if (breakpoint_inserted_here_p (inf->aspace.get (), pc))
3580 leave_stopped = 1;
3581 }
3582
3583 if (!leave_stopped)
3584 {
3585 linux_nat_debug_printf
3586 ("resuming stopped-resumed LWP %s at %s: step=%d",
3587 lp->ptid.to_string ().c_str (), paddress (gdbarch, pc),
3588 lp->step);
3589
3590 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3591 }
3592 }
3593 catch (const gdb_exception_error &ex)
3594 {
3595 if (!check_ptrace_stopped_lwp_gone (lp))
3596 throw;
3597 }
3598 }
3599
3600 return 0;
3601 }
3602
3603 ptid_t
3604 linux_nat_target::wait (ptid_t ptid, struct target_waitstatus *ourstatus,
3605 target_wait_flags target_options)
3606 {
3607 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3608
3609 ptid_t event_ptid;
3610
3611 linux_nat_debug_printf ("[%s], [%s]", ptid.to_string ().c_str (),
3612 target_options_to_string (target_options).c_str ());
3613
3614 /* Flush the async file first. */
3615 if (target_is_async_p ())
3616 async_file_flush ();
3617
3618 /* Resume LWPs that are currently stopped without any pending status
3619 to report, but are resumed from the core's perspective. LWPs get
3620 in this state if we find them stopping at a time we're not
3621 interested in reporting the event (target_wait on a
3622 specific_process, for example, see linux_nat_wait_1), and
3623 meanwhile the event became uninteresting. Don't bother resuming
3624 LWPs we're not going to wait for if they'd stop immediately. */
3625 if (target_is_non_stop_p ())
3626 iterate_over_lwps (minus_one_ptid,
3627 [=] (struct lwp_info *info)
3628 {
3629 return resume_stopped_resumed_lwps (info, ptid);
3630 });
3631
3632 event_ptid = linux_nat_wait_1 (ptid, ourstatus, target_options);
3633
3634 /* If we requested any event, and something came out, assume there
3635 may be more. If we requested a specific lwp or process, also
3636 assume there may be more. */
3637 if (target_is_async_p ()
3638 && ((ourstatus->kind () != TARGET_WAITKIND_IGNORE
3639 && ourstatus->kind () != TARGET_WAITKIND_NO_RESUMED)
3640 || ptid != minus_one_ptid))
3641 async_file_mark ();
3642
3643 return event_ptid;
3644 }
3645
3646 /* Kill one LWP. */
3647
3648 static void
3649 kill_one_lwp (pid_t pid)
3650 {
3651 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3652
3653 errno = 0;
3654 kill_lwp (pid, SIGKILL);
3655
3656 if (debug_linux_nat)
3657 {
3658 int save_errno = errno;
3659
3660 linux_nat_debug_printf
3661 ("kill (SIGKILL) %ld, 0, 0 (%s)", (long) pid,
3662 save_errno != 0 ? safe_strerror (save_errno) : "OK");
3663 }
3664
3665 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3666
3667 errno = 0;
3668 ptrace (PTRACE_KILL, pid, 0, 0);
3669 if (debug_linux_nat)
3670 {
3671 int save_errno = errno;
3672
3673 linux_nat_debug_printf
3674 ("PTRACE_KILL %ld, 0, 0 (%s)", (long) pid,
3675 save_errno ? safe_strerror (save_errno) : "OK");
3676 }
3677 }
3678
3679 /* Wait for an LWP to die. */
3680
3681 static void
3682 kill_wait_one_lwp (pid_t pid)
3683 {
3684 pid_t res;
3685
3686 /* We must make sure that there are no pending events (delayed
3687 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3688 program doesn't interfere with any following debugging session. */
3689
3690 do
3691 {
3692 res = my_waitpid (pid, NULL, __WALL);
3693 if (res != (pid_t) -1)
3694 {
3695 linux_nat_debug_printf ("wait %ld received unknown.", (long) pid);
3696
3697 /* The Linux kernel sometimes fails to kill a thread
3698 completely after PTRACE_KILL; that goes from the stop
3699 point in do_fork out to the one in get_signal_to_deliver
3700 and waits again. So kill it again. */
3701 kill_one_lwp (pid);
3702 }
3703 }
3704 while (res == pid);
3705
3706 gdb_assert (res == -1 && errno == ECHILD);
3707 }
3708
3709 /* Callback for iterate_over_lwps. */
3710
3711 static int
3712 kill_callback (struct lwp_info *lp)
3713 {
3714 kill_one_lwp (lp->ptid.lwp ());
3715 return 0;
3716 }
3717
3718 /* Callback for iterate_over_lwps. */
3719
3720 static int
3721 kill_wait_callback (struct lwp_info *lp)
3722 {
3723 kill_wait_one_lwp (lp->ptid.lwp ());
3724 return 0;
3725 }
3726
3727 /* Kill the fork/clone child of LP if it has an unfollowed child. */
3728
3729 static int
3730 kill_unfollowed_child_callback (lwp_info *lp)
3731 {
3732 std::optional<target_waitstatus> ws = get_pending_child_status (lp);
3733 if (ws.has_value ())
3734 {
3735 ptid_t child_ptid = ws->child_ptid ();
3736 int child_pid = child_ptid.pid ();
3737 int child_lwp = child_ptid.lwp ();
3738
3739 kill_one_lwp (child_lwp);
3740 kill_wait_one_lwp (child_lwp);
3741
3742 /* Let the arch-specific native code know this process is
3743 gone. */
3744 if (ws->kind () != TARGET_WAITKIND_THREAD_CLONED)
3745 linux_target->low_forget_process (child_pid);
3746 }
3747
3748 return 0;
3749 }
3750
3751 void
3752 linux_nat_target::kill ()
3753 {
3754 ptid_t pid_ptid (inferior_ptid.pid ());
3755
3756 /* If we're stopped while forking/cloning and we haven't followed
3757 yet, kill the child task. We need to do this first because the
3758 parent will be sleeping if this is a vfork. */
3759 iterate_over_lwps (pid_ptid, kill_unfollowed_child_callback);
3760
3761 if (forks_exist_p ())
3762 linux_fork_killall ();
3763 else
3764 {
3765 /* Stop all threads before killing them, since ptrace requires
3766 that the thread is stopped to successfully PTRACE_KILL. */
3767 iterate_over_lwps (pid_ptid, stop_callback);
3768 /* ... and wait until all of them have reported back that
3769 they're no longer running. */
3770 iterate_over_lwps (pid_ptid, stop_wait_callback);
3771
3772 /* Kill all LWP's ... */
3773 iterate_over_lwps (pid_ptid, kill_callback);
3774
3775 /* ... and wait until we've flushed all events. */
3776 iterate_over_lwps (pid_ptid, kill_wait_callback);
3777 }
3778
3779 target_mourn_inferior (inferior_ptid);
3780 }
3781
3782 void
3783 linux_nat_target::mourn_inferior ()
3784 {
3785 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3786
3787 int pid = inferior_ptid.pid ();
3788
3789 purge_lwp_list (pid);
3790
3791 close_proc_mem_file (pid);
3792
3793 if (! forks_exist_p ())
3794 /* Normal case, no other forks available. */
3795 inf_ptrace_target::mourn_inferior ();
3796 else
3797 /* Multi-fork case. The current inferior_ptid has exited, but
3798 there are other viable forks to debug. Delete the exiting
3799 one and context-switch to the first available. */
3800 linux_fork_mourn_inferior ();
3801
3802 /* Let the arch-specific native code know this process is gone. */
3803 linux_target->low_forget_process (pid);
3804 }
3805
3806 /* Convert a native/host siginfo object, into/from the siginfo in the
3807 layout of the inferiors' architecture. */
3808
3809 static void
3810 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
3811 {
3812 /* If the low target didn't do anything, then just do a straight
3813 memcpy. */
3814 if (!linux_target->low_siginfo_fixup (siginfo, inf_siginfo, direction))
3815 {
3816 if (direction == 1)
3817 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
3818 else
3819 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
3820 }
3821 }
3822
3823 static enum target_xfer_status
3824 linux_xfer_siginfo (ptid_t ptid, enum target_object object,
3825 const char *annex, gdb_byte *readbuf,
3826 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3827 ULONGEST *xfered_len)
3828 {
3829 siginfo_t siginfo;
3830 gdb_byte inf_siginfo[sizeof (siginfo_t)];
3831
3832 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3833 gdb_assert (readbuf || writebuf);
3834
3835 if (offset > sizeof (siginfo))
3836 return TARGET_XFER_E_IO;
3837
3838 if (!linux_nat_get_siginfo (ptid, &siginfo))
3839 return TARGET_XFER_E_IO;
3840
3841 /* When GDB is built as a 64-bit application, ptrace writes into
3842 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3843 inferior with a 64-bit GDB should look the same as debugging it
3844 with a 32-bit GDB, we need to convert it. GDB core always sees
3845 the converted layout, so any read/write will have to be done
3846 post-conversion. */
3847 siginfo_fixup (&siginfo, inf_siginfo, 0);
3848
3849 if (offset + len > sizeof (siginfo))
3850 len = sizeof (siginfo) - offset;
3851
3852 if (readbuf != NULL)
3853 memcpy (readbuf, inf_siginfo + offset, len);
3854 else
3855 {
3856 memcpy (inf_siginfo + offset, writebuf, len);
3857
3858 /* Convert back to ptrace layout before flushing it out. */
3859 siginfo_fixup (&siginfo, inf_siginfo, 1);
3860
3861 int pid = get_ptrace_pid (ptid);
3862 errno = 0;
3863 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3864 if (errno != 0)
3865 return TARGET_XFER_E_IO;
3866 }
3867
3868 *xfered_len = len;
3869 return TARGET_XFER_OK;
3870 }
3871
3872 static enum target_xfer_status
3873 linux_nat_xfer_osdata (enum target_object object,
3874 const char *annex, gdb_byte *readbuf,
3875 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3876 ULONGEST *xfered_len);
3877
3878 static enum target_xfer_status
3879 linux_proc_xfer_memory_partial (int pid, gdb_byte *readbuf,
3880 const gdb_byte *writebuf, ULONGEST offset,
3881 LONGEST len, ULONGEST *xfered_len);
3882
3883 enum target_xfer_status
3884 linux_nat_target::xfer_partial (enum target_object object,
3885 const char *annex, gdb_byte *readbuf,
3886 const gdb_byte *writebuf,
3887 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
3888 {
3889 if (object == TARGET_OBJECT_SIGNAL_INFO)
3890 return linux_xfer_siginfo (inferior_ptid, object, annex, readbuf, writebuf,
3891 offset, len, xfered_len);
3892
3893 /* The target is connected but no live inferior is selected. Pass
3894 this request down to a lower stratum (e.g., the executable
3895 file). */
3896 if (object == TARGET_OBJECT_MEMORY && inferior_ptid == null_ptid)
3897 return TARGET_XFER_EOF;
3898
3899 if (object == TARGET_OBJECT_AUXV)
3900 return memory_xfer_auxv (this, object, annex, readbuf, writebuf,
3901 offset, len, xfered_len);
3902
3903 if (object == TARGET_OBJECT_OSDATA)
3904 return linux_nat_xfer_osdata (object, annex, readbuf, writebuf,
3905 offset, len, xfered_len);
3906
3907 if (object == TARGET_OBJECT_MEMORY)
3908 {
3909 /* GDB calculates all addresses in the largest possible address
3910 width. The address width must be masked before its final use
3911 by linux_proc_xfer_partial.
3912
3913 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
3914 int addr_bit = gdbarch_addr_bit (current_inferior ()->arch ());
3915
3916 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
3917 offset &= ((ULONGEST) 1 << addr_bit) - 1;
3918
3919 /* If /proc/pid/mem is writable, don't fallback to ptrace. If
3920 the write via /proc/pid/mem fails because the inferior execed
3921 (and we haven't seen the exec event yet), a subsequent ptrace
3922 poke would incorrectly write memory to the post-exec address
3923 space, while the core was trying to write to the pre-exec
3924 address space. */
3925 if (proc_mem_file_is_writable ())
3926 return linux_proc_xfer_memory_partial (inferior_ptid.pid (), readbuf,
3927 writebuf, offset, len,
3928 xfered_len);
3929 }
3930
3931 return inf_ptrace_target::xfer_partial (object, annex, readbuf, writebuf,
3932 offset, len, xfered_len);
3933 }
3934
3935 bool
3936 linux_nat_target::thread_alive (ptid_t ptid)
3937 {
3938 /* As long as a PTID is in lwp list, consider it alive. */
3939 return find_lwp_pid (ptid) != NULL;
3940 }
3941
3942 /* Implement the to_update_thread_list target method for this
3943 target. */
3944
3945 void
3946 linux_nat_target::update_thread_list ()
3947 {
3948 /* We add/delete threads from the list as clone/exit events are
3949 processed, so just try deleting exited threads still in the
3950 thread list. */
3951 delete_exited_threads ();
3952
3953 /* Update the processor core that each lwp/thread was last seen
3954 running on. */
3955 for (lwp_info *lwp : all_lwps ())
3956 {
3957 /* Avoid accessing /proc if the thread hasn't run since we last
3958 time we fetched the thread's core. Accessing /proc becomes
3959 noticeably expensive when we have thousands of LWPs. */
3960 if (lwp->core == -1)
3961 lwp->core = linux_common_core_of_thread (lwp->ptid);
3962 }
3963 }
3964
3965 std::string
3966 linux_nat_target::pid_to_str (ptid_t ptid)
3967 {
3968 if (ptid.lwp_p ()
3969 && (ptid.pid () != ptid.lwp ()
3970 || num_lwps (ptid.pid ()) > 1))
3971 return string_printf ("LWP %ld", ptid.lwp ());
3972
3973 return normal_pid_to_str (ptid);
3974 }
3975
3976 const char *
3977 linux_nat_target::thread_name (struct thread_info *thr)
3978 {
3979 return linux_proc_tid_get_name (thr->ptid);
3980 }
3981
3982 /* Accepts an integer PID; Returns a string representing a file that
3983 can be opened to get the symbols for the child process. */
3984
3985 const char *
3986 linux_nat_target::pid_to_exec_file (int pid)
3987 {
3988 return linux_proc_pid_to_exec_file (pid);
3989 }
3990
3991 /* Object representing an /proc/PID/mem open file. We keep one such
3992 file open per inferior.
3993
3994 It might be tempting to think about only ever opening one file at
3995 most for all inferiors, closing/reopening the file as we access
3996 memory of different inferiors, to minimize number of file
3997 descriptors open, which can otherwise run into resource limits.
3998 However, that does not work correctly -- if the inferior execs and
3999 we haven't processed the exec event yet, and, we opened a
4000 /proc/PID/mem file, we will get a mem file accessing the post-exec
4001 address space, thinking we're opening it for the pre-exec address
4002 space. That is dangerous as we can poke memory (e.g. clearing
4003 breakpoints) in the post-exec memory by mistake, corrupting the
4004 inferior. For that reason, we open the mem file as early as
4005 possible, right after spawning, forking or attaching to the
4006 inferior, when the inferior is stopped and thus before it has a
4007 chance of execing.
4008
4009 Note that after opening the file, even if the thread we opened it
4010 for subsequently exits, the open file is still usable for accessing
4011 memory. It's only when the whole process exits or execs that the
4012 file becomes invalid, at which point reads/writes return EOF. */
4013
4014 class proc_mem_file
4015 {
4016 public:
4017 proc_mem_file (ptid_t ptid, int fd)
4018 : m_ptid (ptid), m_fd (fd)
4019 {
4020 gdb_assert (m_fd != -1);
4021 }
4022
4023 ~proc_mem_file ()
4024 {
4025 linux_nat_debug_printf ("closing fd %d for /proc/%d/task/%ld/mem",
4026 m_fd, m_ptid.pid (), m_ptid.lwp ());
4027 close (m_fd);
4028 }
4029
4030 DISABLE_COPY_AND_ASSIGN (proc_mem_file);
4031
4032 int fd ()
4033 {
4034 return m_fd;
4035 }
4036
4037 private:
4038 /* The LWP this file was opened for. Just for debugging
4039 purposes. */
4040 ptid_t m_ptid;
4041
4042 /* The file descriptor. */
4043 int m_fd = -1;
4044 };
4045
4046 /* The map between an inferior process id, and the open /proc/PID/mem
4047 file. This is stored in a map instead of in a per-inferior
4048 structure because we need to be able to access memory of processes
4049 which don't have a corresponding struct inferior object. E.g.,
4050 with "detach-on-fork on" (the default), and "follow-fork parent"
4051 (also default), we don't create an inferior for the fork child, but
4052 we still need to remove breakpoints from the fork child's
4053 memory. */
4054 static std::unordered_map<int, proc_mem_file> proc_mem_file_map;
4055
4056 /* Close the /proc/PID/mem file for PID. */
4057
4058 static void
4059 close_proc_mem_file (pid_t pid)
4060 {
4061 proc_mem_file_map.erase (pid);
4062 }
4063
4064 /* Open the /proc/PID/mem file for the process (thread group) of PTID.
4065 We actually open /proc/PID/task/LWP/mem, as that's the LWP we know
4066 exists and is stopped right now. We prefer the
4067 /proc/PID/task/LWP/mem form over /proc/LWP/mem to avoid tid-reuse
4068 races, just in case this is ever called on an already-waited
4069 LWP. */
4070
4071 static void
4072 open_proc_mem_file (ptid_t ptid)
4073 {
4074 auto iter = proc_mem_file_map.find (ptid.pid ());
4075 gdb_assert (iter == proc_mem_file_map.end ());
4076
4077 char filename[64];
4078 xsnprintf (filename, sizeof filename,
4079 "/proc/%d/task/%ld/mem", ptid.pid (), ptid.lwp ());
4080
4081 int fd = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
4082
4083 if (fd == -1)
4084 {
4085 warning (_("opening /proc/PID/mem file for lwp %d.%ld failed: %s (%d)"),
4086 ptid.pid (), ptid.lwp (),
4087 safe_strerror (errno), errno);
4088 return;
4089 }
4090
4091 proc_mem_file_map.emplace (std::piecewise_construct,
4092 std::forward_as_tuple (ptid.pid ()),
4093 std::forward_as_tuple (ptid, fd));
4094
4095 linux_nat_debug_printf ("opened fd %d for lwp %d.%ld",
4096 fd, ptid.pid (), ptid.lwp ());
4097 }
4098
4099 /* Helper for linux_proc_xfer_memory_partial and
4100 proc_mem_file_is_writable. FD is the already opened /proc/pid/mem
4101 file, and PID is the pid of the corresponding process. The rest of
4102 the arguments are like linux_proc_xfer_memory_partial's. */
4103
4104 static enum target_xfer_status
4105 linux_proc_xfer_memory_partial_fd (int fd, int pid,
4106 gdb_byte *readbuf, const gdb_byte *writebuf,
4107 ULONGEST offset, LONGEST len,
4108 ULONGEST *xfered_len)
4109 {
4110 ssize_t ret;
4111
4112 gdb_assert (fd != -1);
4113
4114 /* Use pread64/pwrite64 if available, since they save a syscall and
4115 can handle 64-bit offsets even on 32-bit platforms (for instance,
4116 SPARC debugging a SPARC64 application). But only use them if the
4117 offset isn't so high that when cast to off_t it'd be negative, as
4118 seen on SPARC64. pread64/pwrite64 outright reject such offsets.
4119 lseek does not. */
4120 #ifdef HAVE_PREAD64
4121 if ((off_t) offset >= 0)
4122 ret = (readbuf != nullptr
4123 ? pread64 (fd, readbuf, len, offset)
4124 : pwrite64 (fd, writebuf, len, offset));
4125 else
4126 #endif
4127 {
4128 ret = lseek (fd, offset, SEEK_SET);
4129 if (ret != -1)
4130 ret = (readbuf != nullptr
4131 ? read (fd, readbuf, len)
4132 : write (fd, writebuf, len));
4133 }
4134
4135 if (ret == -1)
4136 {
4137 linux_nat_debug_printf ("accessing fd %d for pid %d failed: %s (%d)",
4138 fd, pid, safe_strerror (errno), errno);
4139 return TARGET_XFER_E_IO;
4140 }
4141 else if (ret == 0)
4142 {
4143 /* EOF means the address space is gone, the whole process exited
4144 or execed. */
4145 linux_nat_debug_printf ("accessing fd %d for pid %d got EOF",
4146 fd, pid);
4147 return TARGET_XFER_EOF;
4148 }
4149 else
4150 {
4151 *xfered_len = ret;
4152 return TARGET_XFER_OK;
4153 }
4154 }
4155
4156 /* Implement the to_xfer_partial target method using /proc/PID/mem.
4157 Because we can use a single read/write call, this can be much more
4158 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
4159 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running
4160 threads. */
4161
4162 static enum target_xfer_status
4163 linux_proc_xfer_memory_partial (int pid, gdb_byte *readbuf,
4164 const gdb_byte *writebuf, ULONGEST offset,
4165 LONGEST len, ULONGEST *xfered_len)
4166 {
4167 auto iter = proc_mem_file_map.find (pid);
4168 if (iter == proc_mem_file_map.end ())
4169 return TARGET_XFER_EOF;
4170
4171 int fd = iter->second.fd ();
4172
4173 return linux_proc_xfer_memory_partial_fd (fd, pid, readbuf, writebuf, offset,
4174 len, xfered_len);
4175 }
4176
4177 /* Check whether /proc/pid/mem is writable in the current kernel, and
4178 return true if so. It wasn't writable before Linux 2.6.39, but
4179 there's no way to know whether the feature was backported to older
4180 kernels. So we check to see if it works. The result is cached,
4181 and this is guaranteed to be called once early during inferior
4182 startup, so that any warning is printed out consistently between
4183 GDB invocations. Note we don't call it during GDB startup instead
4184 though, because then we might warn with e.g. just "gdb --version"
4185 on sandboxed systems. See PR gdb/29907. */
4186
4187 static bool
4188 proc_mem_file_is_writable ()
4189 {
4190 static std::optional<bool> writable;
4191
4192 if (writable.has_value ())
4193 return *writable;
4194
4195 writable.emplace (false);
4196
4197 /* We check whether /proc/pid/mem is writable by trying to write to
4198 one of our variables via /proc/self/mem. */
4199
4200 int fd = gdb_open_cloexec ("/proc/self/mem", O_RDWR | O_LARGEFILE, 0).release ();
4201
4202 if (fd == -1)
4203 {
4204 warning (_("opening /proc/self/mem file failed: %s (%d)"),
4205 safe_strerror (errno), errno);
4206 return *writable;
4207 }
4208
4209 SCOPE_EXIT { close (fd); };
4210
4211 /* This is the variable we try to write to. Note OFFSET below. */
4212 volatile gdb_byte test_var = 0;
4213
4214 gdb_byte writebuf[] = {0x55};
4215 ULONGEST offset = (uintptr_t) &test_var;
4216 ULONGEST xfered_len;
4217
4218 enum target_xfer_status res
4219 = linux_proc_xfer_memory_partial_fd (fd, getpid (), nullptr, writebuf,
4220 offset, 1, &xfered_len);
4221
4222 if (res == TARGET_XFER_OK)
4223 {
4224 gdb_assert (xfered_len == 1);
4225 gdb_assert (test_var == 0x55);
4226 /* Success. */
4227 *writable = true;
4228 }
4229
4230 return *writable;
4231 }
4232
4233 /* Parse LINE as a signal set and add its set bits to SIGS. */
4234
4235 static void
4236 add_line_to_sigset (const char *line, sigset_t *sigs)
4237 {
4238 int len = strlen (line) - 1;
4239 const char *p;
4240 int signum;
4241
4242 if (line[len] != '\n')
4243 error (_("Could not parse signal set: %s"), line);
4244
4245 p = line;
4246 signum = len * 4;
4247 while (len-- > 0)
4248 {
4249 int digit;
4250
4251 if (*p >= '0' && *p <= '9')
4252 digit = *p - '0';
4253 else if (*p >= 'a' && *p <= 'f')
4254 digit = *p - 'a' + 10;
4255 else
4256 error (_("Could not parse signal set: %s"), line);
4257
4258 signum -= 4;
4259
4260 if (digit & 1)
4261 sigaddset (sigs, signum + 1);
4262 if (digit & 2)
4263 sigaddset (sigs, signum + 2);
4264 if (digit & 4)
4265 sigaddset (sigs, signum + 3);
4266 if (digit & 8)
4267 sigaddset (sigs, signum + 4);
4268
4269 p++;
4270 }
4271 }
4272
4273 /* Find process PID's pending signals from /proc/pid/status and set
4274 SIGS to match. */
4275
4276 void
4277 linux_proc_pending_signals (int pid, sigset_t *pending,
4278 sigset_t *blocked, sigset_t *ignored)
4279 {
4280 char buffer[PATH_MAX], fname[PATH_MAX];
4281
4282 sigemptyset (pending);
4283 sigemptyset (blocked);
4284 sigemptyset (ignored);
4285 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
4286 gdb_file_up procfile = gdb_fopen_cloexec (fname, "r");
4287 if (procfile == NULL)
4288 error (_("Could not open %s"), fname);
4289
4290 while (fgets (buffer, PATH_MAX, procfile.get ()) != NULL)
4291 {
4292 /* Normal queued signals are on the SigPnd line in the status
4293 file. However, 2.6 kernels also have a "shared" pending
4294 queue for delivering signals to a thread group, so check for
4295 a ShdPnd line also.
4296
4297 Unfortunately some Red Hat kernels include the shared pending
4298 queue but not the ShdPnd status field. */
4299
4300 if (startswith (buffer, "SigPnd:\t"))
4301 add_line_to_sigset (buffer + 8, pending);
4302 else if (startswith (buffer, "ShdPnd:\t"))
4303 add_line_to_sigset (buffer + 8, pending);
4304 else if (startswith (buffer, "SigBlk:\t"))
4305 add_line_to_sigset (buffer + 8, blocked);
4306 else if (startswith (buffer, "SigIgn:\t"))
4307 add_line_to_sigset (buffer + 8, ignored);
4308 }
4309 }
4310
4311 static enum target_xfer_status
4312 linux_nat_xfer_osdata (enum target_object object,
4313 const char *annex, gdb_byte *readbuf,
4314 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4315 ULONGEST *xfered_len)
4316 {
4317 gdb_assert (object == TARGET_OBJECT_OSDATA);
4318
4319 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4320 if (*xfered_len == 0)
4321 return TARGET_XFER_EOF;
4322 else
4323 return TARGET_XFER_OK;
4324 }
4325
4326 std::vector<static_tracepoint_marker>
4327 linux_nat_target::static_tracepoint_markers_by_strid (const char *strid)
4328 {
4329 char s[IPA_CMD_BUF_SIZE];
4330 int pid = inferior_ptid.pid ();
4331 std::vector<static_tracepoint_marker> markers;
4332 const char *p = s;
4333 ptid_t ptid = ptid_t (pid, 0);
4334 static_tracepoint_marker marker;
4335
4336 /* Pause all */
4337 target_stop (ptid);
4338
4339 strcpy (s, "qTfSTM");
4340 agent_run_command (pid, s, strlen (s) + 1);
4341
4342 /* Unpause all. */
4343 SCOPE_EXIT { target_continue_no_signal (ptid); };
4344
4345 while (*p++ == 'm')
4346 {
4347 do
4348 {
4349 parse_static_tracepoint_marker_definition (p, &p, &marker);
4350
4351 if (strid == NULL || marker.str_id == strid)
4352 markers.push_back (std::move (marker));
4353 }
4354 while (*p++ == ','); /* comma-separated list */
4355
4356 strcpy (s, "qTsSTM");
4357 agent_run_command (pid, s, strlen (s) + 1);
4358 p = s;
4359 }
4360
4361 return markers;
4362 }
4363
4364 /* target_can_async_p implementation. */
4365
4366 bool
4367 linux_nat_target::can_async_p ()
4368 {
4369 /* This flag should be checked in the common target.c code. */
4370 gdb_assert (target_async_permitted);
4371
4372 /* Otherwise, this targets is always able to support async mode. */
4373 return true;
4374 }
4375
4376 bool
4377 linux_nat_target::supports_non_stop ()
4378 {
4379 return true;
4380 }
4381
4382 /* to_always_non_stop_p implementation. */
4383
4384 bool
4385 linux_nat_target::always_non_stop_p ()
4386 {
4387 return true;
4388 }
4389
4390 bool
4391 linux_nat_target::supports_multi_process ()
4392 {
4393 return true;
4394 }
4395
4396 bool
4397 linux_nat_target::supports_disable_randomization ()
4398 {
4399 return true;
4400 }
4401
4402 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4403 so we notice when any child changes state, and notify the
4404 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4405 above to wait for the arrival of a SIGCHLD. */
4406
4407 static void
4408 sigchld_handler (int signo)
4409 {
4410 int old_errno = errno;
4411
4412 if (debug_linux_nat)
4413 gdb_stdlog->write_async_safe ("sigchld\n", sizeof ("sigchld\n") - 1);
4414
4415 if (signo == SIGCHLD)
4416 {
4417 /* Let the event loop know that there are events to handle. */
4418 linux_nat_target::async_file_mark_if_open ();
4419 }
4420
4421 errno = old_errno;
4422 }
4423
4424 /* Callback registered with the target events file descriptor. */
4425
4426 static void
4427 handle_target_event (int error, gdb_client_data client_data)
4428 {
4429 inferior_event_handler (INF_REG_EVENT);
4430 }
4431
4432 /* target_async implementation. */
4433
4434 void
4435 linux_nat_target::async (bool enable)
4436 {
4437 if (enable == is_async_p ())
4438 return;
4439
4440 /* Block child signals while we create/destroy the pipe, as their
4441 handler writes to it. */
4442 gdb::block_signals blocker;
4443
4444 if (enable)
4445 {
4446 if (!async_file_open ())
4447 internal_error ("creating event pipe failed.");
4448
4449 add_file_handler (async_wait_fd (), handle_target_event, NULL,
4450 "linux-nat");
4451
4452 /* There may be pending events to handle. Tell the event loop
4453 to poll them. */
4454 async_file_mark ();
4455 }
4456 else
4457 {
4458 delete_file_handler (async_wait_fd ());
4459 async_file_close ();
4460 }
4461 }
4462
4463 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4464 event came out. */
4465
4466 static int
4467 linux_nat_stop_lwp (struct lwp_info *lwp)
4468 {
4469 if (!lwp->stopped)
4470 {
4471 linux_nat_debug_printf ("running -> suspending %s",
4472 lwp->ptid.to_string ().c_str ());
4473
4474
4475 if (lwp->last_resume_kind == resume_stop)
4476 {
4477 linux_nat_debug_printf ("already stopping LWP %ld at GDB's request",
4478 lwp->ptid.lwp ());
4479 return 0;
4480 }
4481
4482 stop_callback (lwp);
4483 lwp->last_resume_kind = resume_stop;
4484 }
4485 else
4486 {
4487 /* Already known to be stopped; do nothing. */
4488
4489 if (debug_linux_nat)
4490 {
4491 if (linux_target->find_thread (lwp->ptid)->stop_requested)
4492 linux_nat_debug_printf ("already stopped/stop_requested %s",
4493 lwp->ptid.to_string ().c_str ());
4494 else
4495 linux_nat_debug_printf ("already stopped/no stop_requested yet %s",
4496 lwp->ptid.to_string ().c_str ());
4497 }
4498 }
4499 return 0;
4500 }
4501
4502 void
4503 linux_nat_target::stop (ptid_t ptid)
4504 {
4505 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
4506 iterate_over_lwps (ptid, linux_nat_stop_lwp);
4507 }
4508
4509 /* Return the cached value of the processor core for thread PTID. */
4510
4511 int
4512 linux_nat_target::core_of_thread (ptid_t ptid)
4513 {
4514 struct lwp_info *info = find_lwp_pid (ptid);
4515
4516 if (info)
4517 return info->core;
4518 return -1;
4519 }
4520
4521 /* Implementation of to_filesystem_is_local. */
4522
4523 bool
4524 linux_nat_target::filesystem_is_local ()
4525 {
4526 struct inferior *inf = current_inferior ();
4527
4528 if (inf->fake_pid_p || inf->pid == 0)
4529 return true;
4530
4531 return linux_ns_same (inf->pid, LINUX_NS_MNT);
4532 }
4533
4534 /* Convert the INF argument passed to a to_fileio_* method
4535 to a process ID suitable for passing to its corresponding
4536 linux_mntns_* function. If INF is non-NULL then the
4537 caller is requesting the filesystem seen by INF. If INF
4538 is NULL then the caller is requesting the filesystem seen
4539 by the GDB. We fall back to GDB's filesystem in the case
4540 that INF is non-NULL but its PID is unknown. */
4541
4542 static pid_t
4543 linux_nat_fileio_pid_of (struct inferior *inf)
4544 {
4545 if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4546 return getpid ();
4547 else
4548 return inf->pid;
4549 }
4550
4551 /* Implementation of to_fileio_open. */
4552
4553 int
4554 linux_nat_target::fileio_open (struct inferior *inf, const char *filename,
4555 int flags, int mode, int warn_if_slow,
4556 fileio_error *target_errno)
4557 {
4558 int nat_flags;
4559 mode_t nat_mode;
4560 int fd;
4561
4562 if (fileio_to_host_openflags (flags, &nat_flags) == -1
4563 || fileio_to_host_mode (mode, &nat_mode) == -1)
4564 {
4565 *target_errno = FILEIO_EINVAL;
4566 return -1;
4567 }
4568
4569 fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
4570 filename, nat_flags, nat_mode);
4571 if (fd == -1)
4572 *target_errno = host_to_fileio_error (errno);
4573
4574 return fd;
4575 }
4576
4577 /* Implementation of to_fileio_readlink. */
4578
4579 std::optional<std::string>
4580 linux_nat_target::fileio_readlink (struct inferior *inf, const char *filename,
4581 fileio_error *target_errno)
4582 {
4583 char buf[PATH_MAX];
4584 int len;
4585
4586 len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
4587 filename, buf, sizeof (buf));
4588 if (len < 0)
4589 {
4590 *target_errno = host_to_fileio_error (errno);
4591 return {};
4592 }
4593
4594 return std::string (buf, len);
4595 }
4596
4597 /* Implementation of to_fileio_unlink. */
4598
4599 int
4600 linux_nat_target::fileio_unlink (struct inferior *inf, const char *filename,
4601 fileio_error *target_errno)
4602 {
4603 int ret;
4604
4605 ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
4606 filename);
4607 if (ret == -1)
4608 *target_errno = host_to_fileio_error (errno);
4609
4610 return ret;
4611 }
4612
4613 /* Implementation of the to_thread_events method. */
4614
4615 void
4616 linux_nat_target::thread_events (int enable)
4617 {
4618 report_thread_events = enable;
4619 }
4620
4621 bool
4622 linux_nat_target::supports_set_thread_options (gdb_thread_options options)
4623 {
4624 constexpr gdb_thread_options supported_options
4625 = GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
4626 return ((options & supported_options) == options);
4627 }
4628
4629 linux_nat_target::linux_nat_target ()
4630 {
4631 /* We don't change the stratum; this target will sit at
4632 process_stratum and thread_db will set at thread_stratum. This
4633 is a little strange, since this is a multi-threaded-capable
4634 target, but we want to be on the stack below thread_db, and we
4635 also want to be used for single-threaded processes. */
4636 }
4637
4638 /* See linux-nat.h. */
4639
4640 bool
4641 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
4642 {
4643 int pid = get_ptrace_pid (ptid);
4644 return ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo) == 0;
4645 }
4646
4647 /* See nat/linux-nat.h. */
4648
4649 ptid_t
4650 current_lwp_ptid (void)
4651 {
4652 gdb_assert (inferior_ptid.lwp_p ());
4653 return inferior_ptid;
4654 }
4655
4656 /* Implement 'maintenance info linux-lwps'. Displays some basic
4657 information about all the current lwp_info objects. */
4658
4659 static void
4660 maintenance_info_lwps (const char *arg, int from_tty)
4661 {
4662 if (all_lwps ().size () == 0)
4663 {
4664 gdb_printf ("No Linux LWPs\n");
4665 return;
4666 }
4667
4668 /* Start the width at 8 to match the column heading below, then
4669 figure out the widest ptid string. We'll use this to build our
4670 output table below. */
4671 size_t ptid_width = 8;
4672 for (lwp_info *lp : all_lwps ())
4673 ptid_width = std::max (ptid_width, lp->ptid.to_string ().size ());
4674
4675 /* Setup the table headers. */
4676 struct ui_out *uiout = current_uiout;
4677 ui_out_emit_table table_emitter (uiout, 2, -1, "linux-lwps");
4678 uiout->table_header (ptid_width, ui_left, "lwp-ptid", _("LWP Ptid"));
4679 uiout->table_header (9, ui_left, "thread-info", _("Thread ID"));
4680 uiout->table_body ();
4681
4682 /* Display one table row for each lwp_info. */
4683 for (lwp_info *lp : all_lwps ())
4684 {
4685 ui_out_emit_tuple tuple_emitter (uiout, "lwp-entry");
4686
4687 thread_info *th = linux_target->find_thread (lp->ptid);
4688
4689 uiout->field_string ("lwp-ptid", lp->ptid.to_string ().c_str ());
4690 if (th == nullptr)
4691 uiout->field_string ("thread-info", "None");
4692 else
4693 uiout->field_string ("thread-info", print_full_thread_id (th));
4694
4695 uiout->message ("\n");
4696 }
4697 }
4698
4699 void _initialize_linux_nat ();
4700 void
4701 _initialize_linux_nat ()
4702 {
4703 add_setshow_boolean_cmd ("linux-nat", class_maintenance,
4704 &debug_linux_nat, _("\
4705 Set debugging of GNU/Linux native target."), _(" \
4706 Show debugging of GNU/Linux native target."), _(" \
4707 When on, print debug messages relating to the GNU/Linux native target."),
4708 nullptr,
4709 show_debug_linux_nat,
4710 &setdebuglist, &showdebuglist);
4711
4712 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
4713 &debug_linux_namespaces, _("\
4714 Set debugging of GNU/Linux namespaces module."), _("\
4715 Show debugging of GNU/Linux namespaces module."), _("\
4716 Enables printf debugging output."),
4717 NULL,
4718 NULL,
4719 &setdebuglist, &showdebuglist);
4720
4721 /* Install a SIGCHLD handler. */
4722 sigchld_action.sa_handler = sigchld_handler;
4723 sigemptyset (&sigchld_action.sa_mask);
4724 sigchld_action.sa_flags = SA_RESTART;
4725
4726 /* Make it the default. */
4727 sigaction (SIGCHLD, &sigchld_action, NULL);
4728
4729 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4730 gdb_sigmask (SIG_SETMASK, NULL, &suspend_mask);
4731 sigdelset (&suspend_mask, SIGCHLD);
4732
4733 sigemptyset (&blocked_mask);
4734
4735 lwp_lwpid_htab_create ();
4736
4737 add_cmd ("linux-lwps", class_maintenance, maintenance_info_lwps,
4738 _("List the Linux LWPS."), &maintenanceinfolist);
4739 }
4740 \f
4741
4742 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4743 the GNU/Linux Threads library and therefore doesn't really belong
4744 here. */
4745
4746 /* NPTL reserves the first two RT signals, but does not provide any
4747 way for the debugger to query the signal numbers - fortunately
4748 they don't change. */
4749 static int lin_thread_signals[] = { __SIGRTMIN, __SIGRTMIN + 1 };
4750
4751 /* See linux-nat.h. */
4752
4753 unsigned int
4754 lin_thread_get_thread_signal_num (void)
4755 {
4756 return sizeof (lin_thread_signals) / sizeof (lin_thread_signals[0]);
4757 }
4758
4759 /* See linux-nat.h. */
4760
4761 int
4762 lin_thread_get_thread_signal (unsigned int i)
4763 {
4764 gdb_assert (i < lin_thread_get_thread_signal_num ());
4765 return lin_thread_signals[i];
4766 }