]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/linux-nat.c
RISC-V: Fixed overwritten IRELATIVE relocs in the .rel.iplt for data reloc.
[thirdparty/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001-2024 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "inferior.h"
21 #include "infrun.h"
22 #include "target.h"
23 #include "nat/linux-nat.h"
24 #include "nat/linux-waitpid.h"
25 #include "gdbsupport/gdb_wait.h"
26 #include <unistd.h>
27 #include <sys/syscall.h>
28 #include "nat/gdb_ptrace.h"
29 #include "linux-nat.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include "linux-fork.h"
34 #include "gdbthread.h"
35 #include "cli/cli-cmds.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "inf-child.h"
39 #include "inf-ptrace.h"
40 #include "auxv.h"
41 #include <sys/procfs.h>
42 #include "elf-bfd.h"
43 #include "gregset.h"
44 #include "gdbcore.h"
45 #include <ctype.h>
46 #include <sys/stat.h>
47 #include <fcntl.h>
48 #include "inf-loop.h"
49 #include "gdbsupport/event-loop.h"
50 #include "event-top.h"
51 #include <pwd.h>
52 #include <sys/types.h>
53 #include <dirent.h>
54 #include "xml-support.h"
55 #include <sys/vfs.h>
56 #include "solib.h"
57 #include "nat/linux-osdata.h"
58 #include "linux-tdep.h"
59 #include "symfile.h"
60 #include "gdbsupport/agent.h"
61 #include "tracepoint.h"
62 #include "target-descriptions.h"
63 #include "gdbsupport/filestuff.h"
64 #include "objfiles.h"
65 #include "nat/linux-namespaces.h"
66 #include "gdbsupport/block-signals.h"
67 #include "gdbsupport/fileio.h"
68 #include "gdbsupport/scope-exit.h"
69 #include "gdbsupport/gdb-sigmask.h"
70 #include "gdbsupport/common-debug.h"
71 #include <unordered_map>
72
73 /* This comment documents high-level logic of this file.
74
75 Waiting for events in sync mode
76 ===============================
77
78 When waiting for an event in a specific thread, we just use waitpid,
79 passing the specific pid, and not passing WNOHANG.
80
81 When waiting for an event in all threads, waitpid is not quite good:
82
83 - If the thread group leader exits while other threads in the thread
84 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
85 return an exit status until the other threads in the group are
86 reaped.
87
88 - When a non-leader thread execs, that thread just vanishes without
89 reporting an exit (so we'd hang if we waited for it explicitly in
90 that case). The exec event is instead reported to the TGID pid.
91
92 The solution is to always use -1 and WNOHANG, together with
93 sigsuspend.
94
95 First, we use non-blocking waitpid to check for events. If nothing is
96 found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
97 it means something happened to a child process. As soon as we know
98 there's an event, we get back to calling nonblocking waitpid.
99
100 Note that SIGCHLD should be blocked between waitpid and sigsuspend
101 calls, so that we don't miss a signal. If SIGCHLD arrives in between,
102 when it's blocked, the signal becomes pending and sigsuspend
103 immediately notices it and returns.
104
105 Waiting for events in async mode (TARGET_WNOHANG)
106 =================================================
107
108 In async mode, GDB should always be ready to handle both user input
109 and target events, so neither blocking waitpid nor sigsuspend are
110 viable options. Instead, we should asynchronously notify the GDB main
111 event loop whenever there's an unprocessed event from the target. We
112 detect asynchronous target events by handling SIGCHLD signals. To
113 notify the event loop about target events, an event pipe is used
114 --- the pipe is registered as waitable event source in the event loop,
115 the event loop select/poll's on the read end of this pipe (as well on
116 other event sources, e.g., stdin), and the SIGCHLD handler marks the
117 event pipe to raise an event. This is more portable than relying on
118 pselect/ppoll, since on kernels that lack those syscalls, libc
119 emulates them with select/poll+sigprocmask, and that is racy
120 (a.k.a. plain broken).
121
122 Obviously, if we fail to notify the event loop if there's a target
123 event, it's bad. OTOH, if we notify the event loop when there's no
124 event from the target, linux_nat_wait will detect that there's no real
125 event to report, and return event of type TARGET_WAITKIND_IGNORE.
126 This is mostly harmless, but it will waste time and is better avoided.
127
128 The main design point is that every time GDB is outside linux-nat.c,
129 we have a SIGCHLD handler installed that is called when something
130 happens to the target and notifies the GDB event loop. Whenever GDB
131 core decides to handle the event, and calls into linux-nat.c, we
132 process things as in sync mode, except that the we never block in
133 sigsuspend.
134
135 While processing an event, we may end up momentarily blocked in
136 waitpid calls. Those waitpid calls, while blocking, are guarantied to
137 return quickly. E.g., in all-stop mode, before reporting to the core
138 that an LWP hit a breakpoint, all LWPs are stopped by sending them
139 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
140 Note that this is different from blocking indefinitely waiting for the
141 next event --- here, we're already handling an event.
142
143 Use of signals
144 ==============
145
146 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
147 signal is not entirely significant; we just need for a signal to be delivered,
148 so that we can intercept it. SIGSTOP's advantage is that it can not be
149 blocked. A disadvantage is that it is not a real-time signal, so it can only
150 be queued once; we do not keep track of other sources of SIGSTOP.
151
152 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
153 use them, because they have special behavior when the signal is generated -
154 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
155 kills the entire thread group.
156
157 A delivered SIGSTOP would stop the entire thread group, not just the thread we
158 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
159 cancel it (by PTRACE_CONT without passing SIGSTOP).
160
161 We could use a real-time signal instead. This would solve those problems; we
162 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
163 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
164 generates it, and there are races with trying to find a signal that is not
165 blocked.
166
167 Exec events
168 ===========
169
170 The case of a thread group (process) with 3 or more threads, and a
171 thread other than the leader execs is worth detailing:
172
173 On an exec, the Linux kernel destroys all threads except the execing
174 one in the thread group, and resets the execing thread's tid to the
175 tgid. No exit notification is sent for the execing thread -- from the
176 ptracer's perspective, it appears as though the execing thread just
177 vanishes. Until we reap all other threads except the leader and the
178 execing thread, the leader will be zombie, and the execing thread will
179 be in `D (disc sleep)' state. As soon as all other threads are
180 reaped, the execing thread changes its tid to the tgid, and the
181 previous (zombie) leader vanishes, giving place to the "new"
182 leader.
183
184 Accessing inferior memory
185 =========================
186
187 To access inferior memory, we strongly prefer /proc/PID/mem. We
188 fallback to ptrace if and only if /proc/PID/mem is not writable, as a
189 concession for obsolescent kernels (such as found in RHEL6). For
190 modern kernels, the fallback shouldn't trigger. GDBserver does not
191 have the ptrace fallback already, and at some point, we'll consider
192 removing it from native GDB too.
193
194 /proc/PID/mem has a few advantages over alternatives like
195 PTRACE_PEEKTEXT/PTRACE_POKETEXT or process_vm_readv/process_vm_writev:
196
197 - Because we can use a single read/write call, /proc/PID/mem can be
198 much more efficient than banging away at
199 PTRACE_PEEKTEXT/PTRACE_POKETEXT, one word at a time.
200
201 - /proc/PID/mem allows writing to read-only pages, which we need to
202 e.g., plant breakpoint instructions. process_vm_writev does not
203 allow this.
204
205 - /proc/PID/mem allows memory access even if all threads are running.
206 OTOH, PTRACE_PEEKTEXT/PTRACE_POKETEXT require passing down the tid
207 of a stopped task. This lets us e.g., install breakpoints while the
208 inferior is running, clear a displaced stepping scratch pad when the
209 thread that was displaced stepping exits, print inferior globals,
210 etc., all without having to worry about temporarily pausing some
211 thread.
212
213 - /proc/PID/mem does not suffer from a race that could cause us to
214 access memory of the wrong address space when the inferior execs.
215
216 process_vm_readv/process_vm_writev have this problem.
217
218 E.g., say GDB decides to write to memory just while the inferior
219 execs. In this scenario, GDB could write memory to the post-exec
220 address space thinking it was writing to the pre-exec address space,
221 with high probability of corrupting the inferior. Or if GDB decides
222 instead to read memory just while the inferior execs, it could read
223 bogus contents out of the wrong address space.
224
225 ptrace used to have this problem too, but no longer has since Linux
226 commit dbb5afad100a ("ptrace: make ptrace() fail if the tracee
227 changed its pid unexpectedly"), in Linux 5.13. (And if ptrace were
228 ever changed to allow access memory via zombie or running threads,
229 it would better not forget to consider this scenario.)
230
231 We avoid this race with /proc/PID/mem, by opening the file as soon
232 as we start debugging the inferior, when it is known the inferior is
233 stopped, and holding on to the open file descriptor, to be used
234 whenever we need to access inferior memory. If the inferior execs
235 or exits, reading/writing from/to the file returns 0 (EOF),
236 indicating the address space is gone, and so we return
237 TARGET_XFER_EOF to the core. We close the old file and open a new
238 one when we finally see the PTRACE_EVENT_EXEC event. */
239
240 #ifndef O_LARGEFILE
241 #define O_LARGEFILE 0
242 #endif
243
244 struct linux_nat_target *linux_target;
245
246 /* Does the current host support PTRACE_GETREGSET? */
247 enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
248
249 /* When true, print debug messages relating to the linux native target. */
250
251 static bool debug_linux_nat;
252
253 /* Implement 'show debug linux-nat'. */
254
255 static void
256 show_debug_linux_nat (struct ui_file *file, int from_tty,
257 struct cmd_list_element *c, const char *value)
258 {
259 gdb_printf (file, _("Debugging of GNU/Linux native targets is %s.\n"),
260 value);
261 }
262
263 /* Print a linux-nat debug statement. */
264
265 #define linux_nat_debug_printf(fmt, ...) \
266 debug_prefixed_printf_cond (debug_linux_nat, "linux-nat", fmt, ##__VA_ARGS__)
267
268 /* Print "linux-nat" enter/exit debug statements. */
269
270 #define LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT \
271 scoped_debug_enter_exit (debug_linux_nat, "linux-nat")
272
273 struct simple_pid_list
274 {
275 int pid;
276 int status;
277 struct simple_pid_list *next;
278 };
279 static struct simple_pid_list *stopped_pids;
280
281 /* Whether target_thread_events is in effect. */
282 static int report_thread_events;
283
284 static int kill_lwp (int lwpid, int signo);
285
286 static int stop_callback (struct lwp_info *lp);
287
288 static void block_child_signals (sigset_t *prev_mask);
289 static void restore_child_signals_mask (sigset_t *prev_mask);
290
291 struct lwp_info;
292 static struct lwp_info *add_lwp (ptid_t ptid);
293 static void purge_lwp_list (int pid);
294 static void delete_lwp (ptid_t ptid);
295 static struct lwp_info *find_lwp_pid (ptid_t ptid);
296
297 static int lwp_status_pending_p (struct lwp_info *lp);
298
299 static bool is_lwp_marked_dead (lwp_info *lp);
300
301 static void save_stop_reason (struct lwp_info *lp);
302
303 static bool proc_mem_file_is_writable ();
304 static void close_proc_mem_file (pid_t pid);
305 static void open_proc_mem_file (ptid_t ptid);
306
307 /* Return TRUE if LWP is the leader thread of the process. */
308
309 static bool
310 is_leader (lwp_info *lp)
311 {
312 return lp->ptid.pid () == lp->ptid.lwp ();
313 }
314
315 /* Convert an LWP's pending status to a std::string. */
316
317 static std::string
318 pending_status_str (lwp_info *lp)
319 {
320 gdb_assert (lwp_status_pending_p (lp));
321
322 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
323 return lp->waitstatus.to_string ();
324 else
325 return status_to_str (lp->status);
326 }
327
328 /* Return true if we should report exit events for LP. */
329
330 static bool
331 report_exit_events_for (lwp_info *lp)
332 {
333 thread_info *thr = linux_target->find_thread (lp->ptid);
334 gdb_assert (thr != nullptr);
335
336 return (report_thread_events
337 || (thr->thread_options () & GDB_THREAD_OPTION_EXIT) != 0);
338 }
339
340 \f
341 /* LWP accessors. */
342
343 /* See nat/linux-nat.h. */
344
345 ptid_t
346 ptid_of_lwp (struct lwp_info *lwp)
347 {
348 return lwp->ptid;
349 }
350
351 /* See nat/linux-nat.h. */
352
353 void
354 lwp_set_arch_private_info (struct lwp_info *lwp,
355 struct arch_lwp_info *info)
356 {
357 lwp->arch_private = info;
358 }
359
360 /* See nat/linux-nat.h. */
361
362 struct arch_lwp_info *
363 lwp_arch_private_info (struct lwp_info *lwp)
364 {
365 return lwp->arch_private;
366 }
367
368 /* See nat/linux-nat.h. */
369
370 int
371 lwp_is_stopped (struct lwp_info *lwp)
372 {
373 return lwp->stopped;
374 }
375
376 /* See nat/linux-nat.h. */
377
378 enum target_stop_reason
379 lwp_stop_reason (struct lwp_info *lwp)
380 {
381 return lwp->stop_reason;
382 }
383
384 /* See nat/linux-nat.h. */
385
386 int
387 lwp_is_stepping (struct lwp_info *lwp)
388 {
389 return lwp->step;
390 }
391
392 \f
393 /* Trivial list manipulation functions to keep track of a list of
394 new stopped processes. */
395 static void
396 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
397 {
398 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
399
400 new_pid->pid = pid;
401 new_pid->status = status;
402 new_pid->next = *listp;
403 *listp = new_pid;
404 }
405
406 static int
407 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
408 {
409 struct simple_pid_list **p;
410
411 for (p = listp; *p != NULL; p = &(*p)->next)
412 if ((*p)->pid == pid)
413 {
414 struct simple_pid_list *next = (*p)->next;
415
416 *statusp = (*p)->status;
417 xfree (*p);
418 *p = next;
419 return 1;
420 }
421 return 0;
422 }
423
424 /* Return the ptrace options that we want to try to enable. */
425
426 static int
427 linux_nat_ptrace_options (int attached)
428 {
429 int options = 0;
430
431 if (!attached)
432 options |= PTRACE_O_EXITKILL;
433
434 options |= (PTRACE_O_TRACESYSGOOD
435 | PTRACE_O_TRACEVFORKDONE
436 | PTRACE_O_TRACEVFORK
437 | PTRACE_O_TRACEFORK
438 | PTRACE_O_TRACEEXEC);
439
440 return options;
441 }
442
443 /* Initialize ptrace and procfs warnings and check for supported
444 ptrace features given PID.
445
446 ATTACHED should be nonzero iff we attached to the inferior. */
447
448 static void
449 linux_init_ptrace_procfs (pid_t pid, int attached)
450 {
451 int options = linux_nat_ptrace_options (attached);
452
453 linux_enable_event_reporting (pid, options);
454 linux_ptrace_init_warnings ();
455 linux_proc_init_warnings ();
456 proc_mem_file_is_writable ();
457 }
458
459 linux_nat_target::~linux_nat_target ()
460 {}
461
462 void
463 linux_nat_target::post_attach (int pid)
464 {
465 linux_init_ptrace_procfs (pid, 1);
466 }
467
468 /* Implement the virtual inf_ptrace_target::post_startup_inferior method. */
469
470 void
471 linux_nat_target::post_startup_inferior (ptid_t ptid)
472 {
473 linux_init_ptrace_procfs (ptid.pid (), 0);
474 }
475
476 /* Return the number of known LWPs in the tgid given by PID. */
477
478 static int
479 num_lwps (int pid)
480 {
481 int count = 0;
482
483 for (const lwp_info *lp ATTRIBUTE_UNUSED : all_lwps ())
484 if (lp->ptid.pid () == pid)
485 count++;
486
487 return count;
488 }
489
490 /* Deleter for lwp_info unique_ptr specialisation. */
491
492 struct lwp_deleter
493 {
494 void operator() (struct lwp_info *lwp) const
495 {
496 delete_lwp (lwp->ptid);
497 }
498 };
499
500 /* A unique_ptr specialisation for lwp_info. */
501
502 typedef std::unique_ptr<struct lwp_info, lwp_deleter> lwp_info_up;
503
504 /* Target hook for follow_fork. */
505
506 void
507 linux_nat_target::follow_fork (inferior *child_inf, ptid_t child_ptid,
508 target_waitkind fork_kind, bool follow_child,
509 bool detach_fork)
510 {
511 inf_ptrace_target::follow_fork (child_inf, child_ptid, fork_kind,
512 follow_child, detach_fork);
513
514 if (!follow_child)
515 {
516 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
517 ptid_t parent_ptid = inferior_ptid;
518 int parent_pid = parent_ptid.lwp ();
519 int child_pid = child_ptid.lwp ();
520
521 /* We're already attached to the parent, by default. */
522 lwp_info *child_lp = add_lwp (child_ptid);
523 child_lp->stopped = 1;
524 child_lp->last_resume_kind = resume_stop;
525
526 /* Detach new forked process? */
527 if (detach_fork)
528 {
529 int child_stop_signal = 0;
530 bool detach_child = true;
531
532 /* Move CHILD_LP into a unique_ptr and clear the source pointer
533 to prevent us doing anything stupid with it. */
534 lwp_info_up child_lp_ptr (child_lp);
535 child_lp = nullptr;
536
537 linux_target->low_prepare_to_resume (child_lp_ptr.get ());
538
539 /* When debugging an inferior in an architecture that supports
540 hardware single stepping on a kernel without commit
541 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
542 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
543 set if the parent process had them set.
544 To work around this, single step the child process
545 once before detaching to clear the flags. */
546
547 /* Note that we consult the parent's architecture instead of
548 the child's because there's no inferior for the child at
549 this point. */
550 if (!gdbarch_software_single_step_p (target_thread_architecture
551 (parent_ptid)))
552 {
553 int status;
554
555 linux_disable_event_reporting (child_pid);
556 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
557 perror_with_name (_("Couldn't do single step"));
558 if (my_waitpid (child_pid, &status, 0) < 0)
559 perror_with_name (_("Couldn't wait vfork process"));
560 else
561 {
562 detach_child = WIFSTOPPED (status);
563 child_stop_signal = WSTOPSIG (status);
564 }
565 }
566
567 if (detach_child)
568 {
569 int signo = child_stop_signal;
570
571 if (signo != 0
572 && !signal_pass_state (gdb_signal_from_host (signo)))
573 signo = 0;
574 ptrace (PTRACE_DETACH, child_pid, 0, signo);
575
576 close_proc_mem_file (child_pid);
577 }
578 }
579
580 if (has_vforked)
581 {
582 lwp_info *parent_lp = find_lwp_pid (parent_ptid);
583 linux_nat_debug_printf ("waiting for VFORK_DONE on %d", parent_pid);
584 parent_lp->stopped = 1;
585
586 /* We'll handle the VFORK_DONE event like any other
587 event, in target_wait. */
588 }
589 }
590 else
591 {
592 struct lwp_info *child_lp;
593
594 child_lp = add_lwp (child_ptid);
595 child_lp->stopped = 1;
596 child_lp->last_resume_kind = resume_stop;
597 }
598 }
599
600 \f
601 int
602 linux_nat_target::insert_fork_catchpoint (int pid)
603 {
604 return 0;
605 }
606
607 int
608 linux_nat_target::remove_fork_catchpoint (int pid)
609 {
610 return 0;
611 }
612
613 int
614 linux_nat_target::insert_vfork_catchpoint (int pid)
615 {
616 return 0;
617 }
618
619 int
620 linux_nat_target::remove_vfork_catchpoint (int pid)
621 {
622 return 0;
623 }
624
625 int
626 linux_nat_target::insert_exec_catchpoint (int pid)
627 {
628 return 0;
629 }
630
631 int
632 linux_nat_target::remove_exec_catchpoint (int pid)
633 {
634 return 0;
635 }
636
637 int
638 linux_nat_target::set_syscall_catchpoint (int pid, bool needed, int any_count,
639 gdb::array_view<const int> syscall_counts)
640 {
641 /* On GNU/Linux, we ignore the arguments. It means that we only
642 enable the syscall catchpoints, but do not disable them.
643
644 Also, we do not use the `syscall_counts' information because we do not
645 filter system calls here. We let GDB do the logic for us. */
646 return 0;
647 }
648
649 /* List of known LWPs, keyed by LWP PID. This speeds up the common
650 case of mapping a PID returned from the kernel to our corresponding
651 lwp_info data structure. */
652 static htab_t lwp_lwpid_htab;
653
654 /* Calculate a hash from a lwp_info's LWP PID. */
655
656 static hashval_t
657 lwp_info_hash (const void *ap)
658 {
659 const struct lwp_info *lp = (struct lwp_info *) ap;
660 pid_t pid = lp->ptid.lwp ();
661
662 return iterative_hash_object (pid, 0);
663 }
664
665 /* Equality function for the lwp_info hash table. Compares the LWP's
666 PID. */
667
668 static int
669 lwp_lwpid_htab_eq (const void *a, const void *b)
670 {
671 const struct lwp_info *entry = (const struct lwp_info *) a;
672 const struct lwp_info *element = (const struct lwp_info *) b;
673
674 return entry->ptid.lwp () == element->ptid.lwp ();
675 }
676
677 /* Create the lwp_lwpid_htab hash table. */
678
679 static void
680 lwp_lwpid_htab_create (void)
681 {
682 lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL);
683 }
684
685 /* Add LP to the hash table. */
686
687 static void
688 lwp_lwpid_htab_add_lwp (struct lwp_info *lp)
689 {
690 void **slot;
691
692 slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT);
693 gdb_assert (slot != NULL && *slot == NULL);
694 *slot = lp;
695 }
696
697 /* Head of doubly-linked list of known LWPs. Sorted by reverse
698 creation order. This order is assumed in some cases. E.g.,
699 reaping status after killing alls lwps of a process: the leader LWP
700 must be reaped last. */
701
702 static intrusive_list<lwp_info> lwp_list;
703
704 /* See linux-nat.h. */
705
706 lwp_info_range
707 all_lwps ()
708 {
709 return lwp_info_range (lwp_list.begin ());
710 }
711
712 /* See linux-nat.h. */
713
714 lwp_info_safe_range
715 all_lwps_safe ()
716 {
717 return lwp_info_safe_range (lwp_list.begin ());
718 }
719
720 /* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
721
722 static void
723 lwp_list_add (struct lwp_info *lp)
724 {
725 lwp_list.push_front (*lp);
726 }
727
728 /* Remove LP from sorted-by-reverse-creation-order doubly-linked
729 list. */
730
731 static void
732 lwp_list_remove (struct lwp_info *lp)
733 {
734 /* Remove from sorted-by-creation-order list. */
735 lwp_list.erase (lwp_list.iterator_to (*lp));
736 }
737
738 \f
739
740 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
741 _initialize_linux_nat. */
742 static sigset_t suspend_mask;
743
744 /* Signals to block to make that sigsuspend work. */
745 static sigset_t blocked_mask;
746
747 /* SIGCHLD action. */
748 static struct sigaction sigchld_action;
749
750 /* Block child signals (SIGCHLD and linux threads signals), and store
751 the previous mask in PREV_MASK. */
752
753 static void
754 block_child_signals (sigset_t *prev_mask)
755 {
756 /* Make sure SIGCHLD is blocked. */
757 if (!sigismember (&blocked_mask, SIGCHLD))
758 sigaddset (&blocked_mask, SIGCHLD);
759
760 gdb_sigmask (SIG_BLOCK, &blocked_mask, prev_mask);
761 }
762
763 /* Restore child signals mask, previously returned by
764 block_child_signals. */
765
766 static void
767 restore_child_signals_mask (sigset_t *prev_mask)
768 {
769 gdb_sigmask (SIG_SETMASK, prev_mask, NULL);
770 }
771
772 /* Mask of signals to pass directly to the inferior. */
773 static sigset_t pass_mask;
774
775 /* Update signals to pass to the inferior. */
776 void
777 linux_nat_target::pass_signals
778 (gdb::array_view<const unsigned char> pass_signals)
779 {
780 int signo;
781
782 sigemptyset (&pass_mask);
783
784 for (signo = 1; signo < NSIG; signo++)
785 {
786 int target_signo = gdb_signal_from_host (signo);
787 if (target_signo < pass_signals.size () && pass_signals[target_signo])
788 sigaddset (&pass_mask, signo);
789 }
790 }
791
792 \f
793
794 /* Prototypes for local functions. */
795 static int stop_wait_callback (struct lwp_info *lp);
796 static int resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid);
797 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
798
799 \f
800
801 /* Destroy and free LP. */
802
803 lwp_info::~lwp_info ()
804 {
805 /* Let the arch specific bits release arch_lwp_info. */
806 linux_target->low_delete_thread (this->arch_private);
807 }
808
809 /* Traversal function for purge_lwp_list. */
810
811 static int
812 lwp_lwpid_htab_remove_pid (void **slot, void *info)
813 {
814 struct lwp_info *lp = (struct lwp_info *) *slot;
815 int pid = *(int *) info;
816
817 if (lp->ptid.pid () == pid)
818 {
819 htab_clear_slot (lwp_lwpid_htab, slot);
820 lwp_list_remove (lp);
821 delete lp;
822 }
823
824 return 1;
825 }
826
827 /* Remove all LWPs belong to PID from the lwp list. */
828
829 static void
830 purge_lwp_list (int pid)
831 {
832 htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid);
833 }
834
835 /* Add the LWP specified by PTID to the list. PTID is the first LWP
836 in the process. Return a pointer to the structure describing the
837 new LWP.
838
839 This differs from add_lwp in that we don't let the arch specific
840 bits know about this new thread. Current clients of this callback
841 take the opportunity to install watchpoints in the new thread, and
842 we shouldn't do that for the first thread. If we're spawning a
843 child ("run"), the thread executes the shell wrapper first, and we
844 shouldn't touch it until it execs the program we want to debug.
845 For "attach", it'd be okay to call the callback, but it's not
846 necessary, because watchpoints can't yet have been inserted into
847 the inferior. */
848
849 static struct lwp_info *
850 add_initial_lwp (ptid_t ptid)
851 {
852 gdb_assert (ptid.lwp_p ());
853
854 lwp_info *lp = new lwp_info (ptid);
855
856
857 /* Add to sorted-by-reverse-creation-order list. */
858 lwp_list_add (lp);
859
860 /* Add to keyed-by-pid htab. */
861 lwp_lwpid_htab_add_lwp (lp);
862
863 return lp;
864 }
865
866 /* Add the LWP specified by PID to the list. Return a pointer to the
867 structure describing the new LWP. The LWP should already be
868 stopped. */
869
870 static struct lwp_info *
871 add_lwp (ptid_t ptid)
872 {
873 struct lwp_info *lp;
874
875 lp = add_initial_lwp (ptid);
876
877 /* Let the arch specific bits know about this new thread. Current
878 clients of this callback take the opportunity to install
879 watchpoints in the new thread. We don't do this for the first
880 thread though. See add_initial_lwp. */
881 linux_target->low_new_thread (lp);
882
883 return lp;
884 }
885
886 /* Remove the LWP specified by PID from the list. */
887
888 static void
889 delete_lwp (ptid_t ptid)
890 {
891 lwp_info dummy (ptid);
892
893 void **slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
894 if (slot == NULL)
895 return;
896
897 lwp_info *lp = *(struct lwp_info **) slot;
898 gdb_assert (lp != NULL);
899
900 htab_clear_slot (lwp_lwpid_htab, slot);
901
902 /* Remove from sorted-by-creation-order list. */
903 lwp_list_remove (lp);
904
905 /* Release. */
906 delete lp;
907 }
908
909 /* Return a pointer to the structure describing the LWP corresponding
910 to PID. If no corresponding LWP could be found, return NULL. */
911
912 static struct lwp_info *
913 find_lwp_pid (ptid_t ptid)
914 {
915 int lwp;
916
917 if (ptid.lwp_p ())
918 lwp = ptid.lwp ();
919 else
920 lwp = ptid.pid ();
921
922 lwp_info dummy (ptid_t (0, lwp));
923 return (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
924 }
925
926 /* See nat/linux-nat.h. */
927
928 struct lwp_info *
929 iterate_over_lwps (ptid_t filter,
930 gdb::function_view<iterate_over_lwps_ftype> callback)
931 {
932 for (lwp_info *lp : all_lwps_safe ())
933 {
934 if (lp->ptid.matches (filter))
935 {
936 if (callback (lp) != 0)
937 return lp;
938 }
939 }
940
941 return NULL;
942 }
943
944 /* Update our internal state when changing from one checkpoint to
945 another indicated by NEW_PTID. We can only switch single-threaded
946 applications, so we only create one new LWP, and the previous list
947 is discarded. */
948
949 void
950 linux_nat_switch_fork (ptid_t new_ptid)
951 {
952 struct lwp_info *lp;
953
954 purge_lwp_list (inferior_ptid.pid ());
955
956 lp = add_lwp (new_ptid);
957 lp->stopped = 1;
958
959 /* This changes the thread's ptid while preserving the gdb thread
960 num. Also changes the inferior pid, while preserving the
961 inferior num. */
962 thread_change_ptid (linux_target, inferior_ptid, new_ptid);
963
964 /* We've just told GDB core that the thread changed target id, but,
965 in fact, it really is a different thread, with different register
966 contents. */
967 registers_changed ();
968 }
969
970 /* Handle the exit of a single thread LP. If DEL_THREAD is true,
971 delete the thread_info associated to LP, if it exists. */
972
973 static void
974 exit_lwp (struct lwp_info *lp, bool del_thread = true)
975 {
976 struct thread_info *th = linux_target->find_thread (lp->ptid);
977
978 if (th != nullptr && del_thread)
979 delete_thread (th);
980
981 delete_lwp (lp->ptid);
982 }
983
984 /* Wait for the LWP specified by LP, which we have just attached to.
985 Returns a wait status for that LWP, to cache. */
986
987 static int
988 linux_nat_post_attach_wait (ptid_t ptid, int *signalled)
989 {
990 pid_t new_pid, pid = ptid.lwp ();
991 int status;
992
993 if (linux_proc_pid_is_stopped (pid))
994 {
995 linux_nat_debug_printf ("Attaching to a stopped process");
996
997 /* The process is definitely stopped. It is in a job control
998 stop, unless the kernel predates the TASK_STOPPED /
999 TASK_TRACED distinction, in which case it might be in a
1000 ptrace stop. Make sure it is in a ptrace stop; from there we
1001 can kill it, signal it, et cetera.
1002
1003 First make sure there is a pending SIGSTOP. Since we are
1004 already attached, the process can not transition from stopped
1005 to running without a PTRACE_CONT; so we know this signal will
1006 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1007 probably already in the queue (unless this kernel is old
1008 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1009 is not an RT signal, it can only be queued once. */
1010 kill_lwp (pid, SIGSTOP);
1011
1012 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1013 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1014 ptrace (PTRACE_CONT, pid, 0, 0);
1015 }
1016
1017 /* Make sure the initial process is stopped. The user-level threads
1018 layer might want to poke around in the inferior, and that won't
1019 work if things haven't stabilized yet. */
1020 new_pid = my_waitpid (pid, &status, __WALL);
1021 gdb_assert (pid == new_pid);
1022
1023 if (!WIFSTOPPED (status))
1024 {
1025 /* The pid we tried to attach has apparently just exited. */
1026 linux_nat_debug_printf ("Failed to stop %d: %s", pid,
1027 status_to_str (status).c_str ());
1028 return status;
1029 }
1030
1031 if (WSTOPSIG (status) != SIGSTOP)
1032 {
1033 *signalled = 1;
1034 linux_nat_debug_printf ("Received %s after attaching",
1035 status_to_str (status).c_str ());
1036 }
1037
1038 return status;
1039 }
1040
1041 void
1042 linux_nat_target::create_inferior (const char *exec_file,
1043 const std::string &allargs,
1044 char **env, int from_tty)
1045 {
1046 maybe_disable_address_space_randomization restore_personality
1047 (disable_randomization);
1048
1049 /* The fork_child mechanism is synchronous and calls target_wait, so
1050 we have to mask the async mode. */
1051
1052 /* Make sure we report all signals during startup. */
1053 pass_signals ({});
1054
1055 inf_ptrace_target::create_inferior (exec_file, allargs, env, from_tty);
1056
1057 open_proc_mem_file (inferior_ptid);
1058 }
1059
1060 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1061 already attached. Returns true if a new LWP is found, false
1062 otherwise. */
1063
1064 static int
1065 attach_proc_task_lwp_callback (ptid_t ptid)
1066 {
1067 struct lwp_info *lp;
1068
1069 /* Ignore LWPs we're already attached to. */
1070 lp = find_lwp_pid (ptid);
1071 if (lp == NULL)
1072 {
1073 int lwpid = ptid.lwp ();
1074
1075 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1076 {
1077 int err = errno;
1078
1079 /* Be quiet if we simply raced with the thread exiting.
1080 EPERM is returned if the thread's task still exists, and
1081 is marked as exited or zombie, as well as other
1082 conditions, so in that case, confirm the status in
1083 /proc/PID/status. */
1084 if (err == ESRCH
1085 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1086 {
1087 linux_nat_debug_printf
1088 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1089 lwpid, err, safe_strerror (err));
1090
1091 }
1092 else
1093 {
1094 std::string reason
1095 = linux_ptrace_attach_fail_reason_string (ptid, err);
1096
1097 error (_("Cannot attach to lwp %d: %s"),
1098 lwpid, reason.c_str ());
1099 }
1100 }
1101 else
1102 {
1103 linux_nat_debug_printf ("PTRACE_ATTACH %s, 0, 0 (OK)",
1104 ptid.to_string ().c_str ());
1105
1106 lp = add_lwp (ptid);
1107
1108 /* The next time we wait for this LWP we'll see a SIGSTOP as
1109 PTRACE_ATTACH brings it to a halt. */
1110 lp->signalled = 1;
1111
1112 /* We need to wait for a stop before being able to make the
1113 next ptrace call on this LWP. */
1114 lp->must_set_ptrace_flags = 1;
1115
1116 /* So that wait collects the SIGSTOP. */
1117 lp->resumed = 1;
1118 }
1119
1120 return 1;
1121 }
1122 return 0;
1123 }
1124
1125 void
1126 linux_nat_target::attach (const char *args, int from_tty)
1127 {
1128 struct lwp_info *lp;
1129 int status;
1130 ptid_t ptid;
1131
1132 /* Make sure we report all signals during attach. */
1133 pass_signals ({});
1134
1135 try
1136 {
1137 inf_ptrace_target::attach (args, from_tty);
1138 }
1139 catch (const gdb_exception_error &ex)
1140 {
1141 pid_t pid = parse_pid_to_attach (args);
1142 std::string reason = linux_ptrace_attach_fail_reason (pid);
1143
1144 if (!reason.empty ())
1145 throw_error (ex.error, "warning: %s\n%s", reason.c_str (),
1146 ex.what ());
1147 else
1148 throw_error (ex.error, "%s", ex.what ());
1149 }
1150
1151 /* The ptrace base target adds the main thread with (pid,0,0)
1152 format. Decorate it with lwp info. */
1153 ptid = ptid_t (inferior_ptid.pid (),
1154 inferior_ptid.pid ());
1155 thread_change_ptid (linux_target, inferior_ptid, ptid);
1156
1157 /* Add the initial process as the first LWP to the list. */
1158 lp = add_initial_lwp (ptid);
1159
1160 status = linux_nat_post_attach_wait (lp->ptid, &lp->signalled);
1161 if (!WIFSTOPPED (status))
1162 {
1163 if (WIFEXITED (status))
1164 {
1165 int exit_code = WEXITSTATUS (status);
1166
1167 target_terminal::ours ();
1168 target_mourn_inferior (inferior_ptid);
1169 if (exit_code == 0)
1170 error (_("Unable to attach: program exited normally."));
1171 else
1172 error (_("Unable to attach: program exited with code %d."),
1173 exit_code);
1174 }
1175 else if (WIFSIGNALED (status))
1176 {
1177 enum gdb_signal signo;
1178
1179 target_terminal::ours ();
1180 target_mourn_inferior (inferior_ptid);
1181
1182 signo = gdb_signal_from_host (WTERMSIG (status));
1183 error (_("Unable to attach: program terminated with signal "
1184 "%s, %s."),
1185 gdb_signal_to_name (signo),
1186 gdb_signal_to_string (signo));
1187 }
1188
1189 internal_error (_("unexpected status %d for PID %ld"),
1190 status, (long) ptid.lwp ());
1191 }
1192
1193 lp->stopped = 1;
1194
1195 open_proc_mem_file (lp->ptid);
1196
1197 /* Save the wait status to report later. */
1198 lp->resumed = 1;
1199 linux_nat_debug_printf ("waitpid %ld, saving status %s",
1200 (long) lp->ptid.pid (),
1201 status_to_str (status).c_str ());
1202
1203 lp->status = status;
1204
1205 /* We must attach to every LWP. If /proc is mounted, use that to
1206 find them now. The inferior may be using raw clone instead of
1207 using pthreads. But even if it is using pthreads, thread_db
1208 walks structures in the inferior's address space to find the list
1209 of threads/LWPs, and those structures may well be corrupted.
1210 Note that once thread_db is loaded, we'll still use it to list
1211 threads and associate pthread info with each LWP. */
1212 try
1213 {
1214 linux_proc_attach_tgid_threads (lp->ptid.pid (),
1215 attach_proc_task_lwp_callback);
1216 }
1217 catch (const gdb_exception_error &)
1218 {
1219 /* Failed to attach to some LWP. Detach any we've already
1220 attached to. */
1221 iterate_over_lwps (ptid_t (ptid.pid ()),
1222 [] (struct lwp_info *lwp) -> int
1223 {
1224 /* Ignore errors when detaching. */
1225 ptrace (PTRACE_DETACH, lwp->ptid.lwp (), 0, 0);
1226 delete_lwp (lwp->ptid);
1227 return 0;
1228 });
1229
1230 target_terminal::ours ();
1231 target_mourn_inferior (inferior_ptid);
1232
1233 throw;
1234 }
1235
1236 /* Add all the LWPs to gdb's thread list. */
1237 iterate_over_lwps (ptid_t (ptid.pid ()),
1238 [] (struct lwp_info *lwp) -> int
1239 {
1240 if (lwp->ptid.pid () != lwp->ptid.lwp ())
1241 {
1242 add_thread (linux_target, lwp->ptid);
1243 set_running (linux_target, lwp->ptid, true);
1244 set_executing (linux_target, lwp->ptid, true);
1245 }
1246 return 0;
1247 });
1248 }
1249
1250 /* Ptrace-detach the thread with pid PID. */
1251
1252 static void
1253 detach_one_pid (int pid, int signo)
1254 {
1255 if (ptrace (PTRACE_DETACH, pid, 0, signo) < 0)
1256 {
1257 int save_errno = errno;
1258
1259 /* We know the thread exists, so ESRCH must mean the lwp is
1260 zombie. This can happen if one of the already-detached
1261 threads exits the whole thread group. In that case we're
1262 still attached, and must reap the lwp. */
1263 if (save_errno == ESRCH)
1264 {
1265 int ret, status;
1266
1267 ret = my_waitpid (pid, &status, __WALL);
1268 if (ret == -1)
1269 {
1270 warning (_("Couldn't reap LWP %d while detaching: %s"),
1271 pid, safe_strerror (errno));
1272 }
1273 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1274 {
1275 warning (_("Reaping LWP %d while detaching "
1276 "returned unexpected status 0x%x"),
1277 pid, status);
1278 }
1279 }
1280 else
1281 error (_("Can't detach %d: %s"),
1282 pid, safe_strerror (save_errno));
1283 }
1284 else
1285 linux_nat_debug_printf ("PTRACE_DETACH (%d, %s, 0) (OK)",
1286 pid, strsignal (signo));
1287 }
1288
1289 /* Get pending signal of THREAD as a host signal number, for detaching
1290 purposes. This is the signal the thread last stopped for, which we
1291 need to deliver to the thread when detaching, otherwise, it'd be
1292 suppressed/lost. */
1293
1294 static int
1295 get_detach_signal (struct lwp_info *lp)
1296 {
1297 enum gdb_signal signo = GDB_SIGNAL_0;
1298
1299 /* If we paused threads momentarily, we may have stored pending
1300 events in lp->status or lp->waitstatus (see stop_wait_callback),
1301 and GDB core hasn't seen any signal for those threads.
1302 Otherwise, the last signal reported to the core is found in the
1303 thread object's stop_signal.
1304
1305 There's a corner case that isn't handled here at present. Only
1306 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1307 stop_signal make sense as a real signal to pass to the inferior.
1308 Some catchpoint related events, like
1309 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1310 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1311 those traps are debug API (ptrace in our case) related and
1312 induced; the inferior wouldn't see them if it wasn't being
1313 traced. Hence, we should never pass them to the inferior, even
1314 when set to pass state. Since this corner case isn't handled by
1315 infrun.c when proceeding with a signal, for consistency, neither
1316 do we handle it here (or elsewhere in the file we check for
1317 signal pass state). Normally SIGTRAP isn't set to pass state, so
1318 this is really a corner case. */
1319
1320 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
1321 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1322 else if (lp->status)
1323 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1324 else
1325 {
1326 thread_info *tp = linux_target->find_thread (lp->ptid);
1327
1328 if (target_is_non_stop_p () && !tp->executing ())
1329 {
1330 if (tp->has_pending_waitstatus ())
1331 {
1332 /* If the thread has a pending event, and it was stopped with a
1333 signal, use that signal to resume it. If it has a pending
1334 event of another kind, it was not stopped with a signal, so
1335 resume it without a signal. */
1336 if (tp->pending_waitstatus ().kind () == TARGET_WAITKIND_STOPPED)
1337 signo = tp->pending_waitstatus ().sig ();
1338 else
1339 signo = GDB_SIGNAL_0;
1340 }
1341 else
1342 signo = tp->stop_signal ();
1343 }
1344 else if (!target_is_non_stop_p ())
1345 {
1346 ptid_t last_ptid;
1347 process_stratum_target *last_target;
1348
1349 get_last_target_status (&last_target, &last_ptid, nullptr);
1350
1351 if (last_target == linux_target
1352 && lp->ptid.lwp () == last_ptid.lwp ())
1353 signo = tp->stop_signal ();
1354 }
1355 }
1356
1357 if (signo == GDB_SIGNAL_0)
1358 {
1359 linux_nat_debug_printf ("lwp %s has no pending signal",
1360 lp->ptid.to_string ().c_str ());
1361 }
1362 else if (!signal_pass_state (signo))
1363 {
1364 linux_nat_debug_printf
1365 ("lwp %s had signal %s but it is in no pass state",
1366 lp->ptid.to_string ().c_str (), gdb_signal_to_string (signo));
1367 }
1368 else
1369 {
1370 linux_nat_debug_printf ("lwp %s has pending signal %s",
1371 lp->ptid.to_string ().c_str (),
1372 gdb_signal_to_string (signo));
1373
1374 return gdb_signal_to_host (signo);
1375 }
1376
1377 return 0;
1378 }
1379
1380 /* If LP has a pending fork/vfork/clone status, return it. */
1381
1382 static std::optional<target_waitstatus>
1383 get_pending_child_status (lwp_info *lp)
1384 {
1385 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
1386
1387 linux_nat_debug_printf ("lwp %s (stopped = %d)",
1388 lp->ptid.to_string ().c_str (), lp->stopped);
1389
1390 /* Check in lwp_info::status. */
1391 if (WIFSTOPPED (lp->status) && linux_is_extended_waitstatus (lp->status))
1392 {
1393 int event = linux_ptrace_get_extended_event (lp->status);
1394
1395 if (event == PTRACE_EVENT_FORK
1396 || event == PTRACE_EVENT_VFORK
1397 || event == PTRACE_EVENT_CLONE)
1398 {
1399 unsigned long child_pid;
1400 int ret = ptrace (PTRACE_GETEVENTMSG, lp->ptid.lwp (), 0, &child_pid);
1401 if (ret == 0)
1402 {
1403 target_waitstatus ws;
1404
1405 if (event == PTRACE_EVENT_FORK)
1406 ws.set_forked (ptid_t (child_pid, child_pid));
1407 else if (event == PTRACE_EVENT_VFORK)
1408 ws.set_vforked (ptid_t (child_pid, child_pid));
1409 else if (event == PTRACE_EVENT_CLONE)
1410 ws.set_thread_cloned (ptid_t (lp->ptid.pid (), child_pid));
1411 else
1412 gdb_assert_not_reached ("unhandled");
1413
1414 return ws;
1415 }
1416 else
1417 {
1418 perror_warning_with_name (_("Failed to retrieve event msg"));
1419 return {};
1420 }
1421 }
1422 }
1423
1424 /* Check in lwp_info::waitstatus. */
1425 if (is_new_child_status (lp->waitstatus.kind ()))
1426 return lp->waitstatus;
1427
1428 thread_info *tp = linux_target->find_thread (lp->ptid);
1429
1430 /* Check in thread_info::pending_waitstatus. */
1431 if (tp->has_pending_waitstatus ()
1432 && is_new_child_status (tp->pending_waitstatus ().kind ()))
1433 return tp->pending_waitstatus ();
1434
1435 /* Check in thread_info::pending_follow. */
1436 if (is_new_child_status (tp->pending_follow.kind ()))
1437 return tp->pending_follow;
1438
1439 return {};
1440 }
1441
1442 /* Detach from LP. If SIGNO_P is non-NULL, then it points to the
1443 signal number that should be passed to the LWP when detaching.
1444 Otherwise pass any pending signal the LWP may have, if any. */
1445
1446 static void
1447 detach_one_lwp (struct lwp_info *lp, int *signo_p)
1448 {
1449 int lwpid = lp->ptid.lwp ();
1450 int signo;
1451
1452 /* If the lwp/thread we are about to detach has a pending fork/clone
1453 event, there is a process/thread GDB is attached to that the core
1454 of GDB doesn't know about. Detach from it. */
1455
1456 std::optional<target_waitstatus> ws = get_pending_child_status (lp);
1457 if (ws.has_value ())
1458 detach_one_pid (ws->child_ptid ().lwp (), 0);
1459
1460 /* If there is a pending SIGSTOP, get rid of it. */
1461 if (lp->signalled)
1462 {
1463 linux_nat_debug_printf ("Sending SIGCONT to %s",
1464 lp->ptid.to_string ().c_str ());
1465
1466 kill_lwp (lwpid, SIGCONT);
1467 lp->signalled = 0;
1468 }
1469
1470 /* If the lwp has exited or was terminated due to a signal, there's
1471 nothing left to do. */
1472 if (is_lwp_marked_dead (lp))
1473 {
1474 linux_nat_debug_printf
1475 ("Can't detach %s - it has exited or was terminated: %s.",
1476 lp->ptid.to_string ().c_str (),
1477 lp->waitstatus.to_string ().c_str ());
1478 delete_lwp (lp->ptid);
1479 return;
1480 }
1481
1482 if (signo_p == NULL)
1483 {
1484 /* Pass on any pending signal for this LWP. */
1485 signo = get_detach_signal (lp);
1486 }
1487 else
1488 signo = *signo_p;
1489
1490 linux_nat_debug_printf ("preparing to resume lwp %s (stopped = %d)",
1491 lp->ptid.to_string ().c_str (),
1492 lp->stopped);
1493
1494 /* Preparing to resume may try to write registers, and fail if the
1495 lwp is zombie. If that happens, ignore the error. We'll handle
1496 it below, when detach fails with ESRCH. */
1497 try
1498 {
1499 linux_target->low_prepare_to_resume (lp);
1500 }
1501 catch (const gdb_exception_error &ex)
1502 {
1503 if (!check_ptrace_stopped_lwp_gone (lp))
1504 throw;
1505 }
1506
1507 detach_one_pid (lwpid, signo);
1508
1509 delete_lwp (lp->ptid);
1510 }
1511
1512 static int
1513 detach_callback (struct lwp_info *lp)
1514 {
1515 /* We don't actually detach from the thread group leader just yet.
1516 If the thread group exits, we must reap the zombie clone lwps
1517 before we're able to reap the leader. */
1518 if (lp->ptid.lwp () != lp->ptid.pid ())
1519 detach_one_lwp (lp, NULL);
1520 return 0;
1521 }
1522
1523 void
1524 linux_nat_target::detach (inferior *inf, int from_tty)
1525 {
1526 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
1527
1528 struct lwp_info *main_lwp;
1529 int pid = inf->pid;
1530
1531 /* Don't unregister from the event loop, as there may be other
1532 inferiors running. */
1533
1534 /* Stop all threads before detaching. ptrace requires that the
1535 thread is stopped to successfully detach. */
1536 iterate_over_lwps (ptid_t (pid), stop_callback);
1537 /* ... and wait until all of them have reported back that
1538 they're no longer running. */
1539 iterate_over_lwps (ptid_t (pid), stop_wait_callback);
1540
1541 /* We can now safely remove breakpoints. We don't this in earlier
1542 in common code because this target doesn't currently support
1543 writing memory while the inferior is running. */
1544 remove_breakpoints_inf (current_inferior ());
1545
1546 iterate_over_lwps (ptid_t (pid), detach_callback);
1547
1548 /* We have detached from everything except the main thread now, so
1549 should only have one thread left. However, in non-stop mode the
1550 main thread might have exited, in which case we'll have no threads
1551 left. */
1552 gdb_assert (num_lwps (pid) == 1
1553 || (target_is_non_stop_p () && num_lwps (pid) == 0));
1554
1555 if (pid == inferior_ptid.pid () && forks_exist_p ())
1556 {
1557 /* Multi-fork case. The current inferior_ptid is being detached
1558 from, but there are other viable forks to debug. Detach from
1559 the current fork, and context-switch to the first
1560 available. */
1561 linux_fork_detach (from_tty, find_lwp_pid (ptid_t (pid)));
1562 }
1563 else
1564 {
1565 target_announce_detach (from_tty);
1566
1567 /* In non-stop mode it is possible that the main thread has exited,
1568 in which case we don't try to detach. */
1569 main_lwp = find_lwp_pid (ptid_t (pid));
1570 if (main_lwp != nullptr)
1571 {
1572 /* Pass on any pending signal for the last LWP. */
1573 int signo = get_detach_signal (main_lwp);
1574
1575 detach_one_lwp (main_lwp, &signo);
1576 }
1577 else
1578 gdb_assert (target_is_non_stop_p ());
1579
1580 detach_success (inf);
1581 }
1582
1583 close_proc_mem_file (pid);
1584 }
1585
1586 /* Resume execution of the inferior process. If STEP is nonzero,
1587 single-step it. If SIGNAL is nonzero, give it that signal. */
1588
1589 static void
1590 linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1591 enum gdb_signal signo)
1592 {
1593 lp->step = step;
1594
1595 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1596 We only presently need that if the LWP is stepped though (to
1597 handle the case of stepping a breakpoint instruction). */
1598 if (step)
1599 {
1600 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
1601
1602 lp->stop_pc = regcache_read_pc (regcache);
1603 }
1604 else
1605 lp->stop_pc = 0;
1606
1607 linux_target->low_prepare_to_resume (lp);
1608 linux_target->low_resume (lp->ptid, step, signo);
1609
1610 /* Successfully resumed. Clear state that no longer makes sense,
1611 and mark the LWP as running. Must not do this before resuming
1612 otherwise if that fails other code will be confused. E.g., we'd
1613 later try to stop the LWP and hang forever waiting for a stop
1614 status. Note that we must not throw after this is cleared,
1615 otherwise handle_zombie_lwp_error would get confused. */
1616 lp->stopped = 0;
1617 lp->core = -1;
1618 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1619 registers_changed_ptid (linux_target, lp->ptid);
1620 }
1621
1622 /* Called when we try to resume a stopped LWP and that errors out. If
1623 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1624 or about to become), discard the error, clear any pending status
1625 the LWP may have, and return true (we'll collect the exit status
1626 soon enough). Otherwise, return false. */
1627
1628 static int
1629 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1630 {
1631 /* If we get an error after resuming the LWP successfully, we'd
1632 confuse !T state for the LWP being gone. */
1633 gdb_assert (lp->stopped);
1634
1635 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1636 because even if ptrace failed with ESRCH, the tracee may be "not
1637 yet fully dead", but already refusing ptrace requests. In that
1638 case the tracee has 'R (Running)' state for a little bit
1639 (observed in Linux 3.18). See also the note on ESRCH in the
1640 ptrace(2) man page. Instead, check whether the LWP has any state
1641 other than ptrace-stopped. */
1642
1643 /* Don't assume anything if /proc/PID/status can't be read. */
1644 if (linux_proc_pid_is_trace_stopped_nowarn (lp->ptid.lwp ()) == 0)
1645 {
1646 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1647 lp->status = 0;
1648 lp->waitstatus.set_ignore ();
1649 return 1;
1650 }
1651 return 0;
1652 }
1653
1654 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1655 disappears while we try to resume it. */
1656
1657 static void
1658 linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1659 {
1660 try
1661 {
1662 linux_resume_one_lwp_throw (lp, step, signo);
1663 }
1664 catch (const gdb_exception_error &ex)
1665 {
1666 if (!check_ptrace_stopped_lwp_gone (lp))
1667 throw;
1668 }
1669 }
1670
1671 /* Resume LP. */
1672
1673 static void
1674 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1675 {
1676 if (lp->stopped)
1677 {
1678 struct inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
1679
1680 if (inf->vfork_child != NULL)
1681 {
1682 linux_nat_debug_printf ("Not resuming sibling %s (vfork parent)",
1683 lp->ptid.to_string ().c_str ());
1684 }
1685 else if (!lwp_status_pending_p (lp))
1686 {
1687 linux_nat_debug_printf ("Resuming sibling %s, %s, %s",
1688 lp->ptid.to_string ().c_str (),
1689 (signo != GDB_SIGNAL_0
1690 ? strsignal (gdb_signal_to_host (signo))
1691 : "0"),
1692 step ? "step" : "resume");
1693
1694 linux_resume_one_lwp (lp, step, signo);
1695 }
1696 else
1697 {
1698 linux_nat_debug_printf ("Not resuming sibling %s (has pending)",
1699 lp->ptid.to_string ().c_str ());
1700 }
1701 }
1702 else
1703 linux_nat_debug_printf ("Not resuming sibling %s (not stopped)",
1704 lp->ptid.to_string ().c_str ());
1705 }
1706
1707 /* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1708 Resume LWP with the last stop signal, if it is in pass state. */
1709
1710 static int
1711 linux_nat_resume_callback (struct lwp_info *lp, struct lwp_info *except)
1712 {
1713 enum gdb_signal signo = GDB_SIGNAL_0;
1714
1715 if (lp == except)
1716 return 0;
1717
1718 if (lp->stopped)
1719 {
1720 struct thread_info *thread;
1721
1722 thread = linux_target->find_thread (lp->ptid);
1723 if (thread != NULL)
1724 {
1725 signo = thread->stop_signal ();
1726 thread->set_stop_signal (GDB_SIGNAL_0);
1727 }
1728 }
1729
1730 resume_lwp (lp, 0, signo);
1731 return 0;
1732 }
1733
1734 static int
1735 resume_clear_callback (struct lwp_info *lp)
1736 {
1737 lp->resumed = 0;
1738 lp->last_resume_kind = resume_stop;
1739 return 0;
1740 }
1741
1742 static int
1743 resume_set_callback (struct lwp_info *lp)
1744 {
1745 lp->resumed = 1;
1746 lp->last_resume_kind = resume_continue;
1747 return 0;
1748 }
1749
1750 void
1751 linux_nat_target::resume (ptid_t scope_ptid, int step, enum gdb_signal signo)
1752 {
1753 struct lwp_info *lp;
1754
1755 linux_nat_debug_printf ("Preparing to %s %s, %s, inferior_ptid %s",
1756 step ? "step" : "resume",
1757 scope_ptid.to_string ().c_str (),
1758 (signo != GDB_SIGNAL_0
1759 ? strsignal (gdb_signal_to_host (signo)) : "0"),
1760 inferior_ptid.to_string ().c_str ());
1761
1762 /* Mark the lwps we're resuming as resumed and update their
1763 last_resume_kind to resume_continue. */
1764 iterate_over_lwps (scope_ptid, resume_set_callback);
1765
1766 lp = find_lwp_pid (inferior_ptid);
1767 gdb_assert (lp != NULL);
1768
1769 /* Remember if we're stepping. */
1770 lp->last_resume_kind = step ? resume_step : resume_continue;
1771
1772 /* If we have a pending wait status for this thread, there is no
1773 point in resuming the process. But first make sure that
1774 linux_nat_wait won't preemptively handle the event - we
1775 should never take this short-circuit if we are going to
1776 leave LP running, since we have skipped resuming all the
1777 other threads. This bit of code needs to be synchronized
1778 with linux_nat_wait. */
1779
1780 if (lp->status && WIFSTOPPED (lp->status))
1781 {
1782 if (!lp->step
1783 && WSTOPSIG (lp->status)
1784 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1785 {
1786 linux_nat_debug_printf
1787 ("Not short circuiting for ignored status 0x%x", lp->status);
1788
1789 /* FIXME: What should we do if we are supposed to continue
1790 this thread with a signal? */
1791 gdb_assert (signo == GDB_SIGNAL_0);
1792 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1793 lp->status = 0;
1794 }
1795 }
1796
1797 if (lwp_status_pending_p (lp))
1798 {
1799 /* FIXME: What should we do if we are supposed to continue
1800 this thread with a signal? */
1801 gdb_assert (signo == GDB_SIGNAL_0);
1802
1803 linux_nat_debug_printf ("Short circuiting for status %s",
1804 pending_status_str (lp).c_str ());
1805
1806 if (target_can_async_p ())
1807 {
1808 target_async (true);
1809 /* Tell the event loop we have something to process. */
1810 async_file_mark ();
1811 }
1812 return;
1813 }
1814
1815 /* No use iterating unless we're resuming other threads. */
1816 if (scope_ptid != lp->ptid)
1817 iterate_over_lwps (scope_ptid, [=] (struct lwp_info *info)
1818 {
1819 return linux_nat_resume_callback (info, lp);
1820 });
1821
1822 linux_nat_debug_printf ("%s %s, %s (resume event thread)",
1823 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1824 lp->ptid.to_string ().c_str (),
1825 (signo != GDB_SIGNAL_0
1826 ? strsignal (gdb_signal_to_host (signo)) : "0"));
1827
1828 linux_resume_one_lwp (lp, step, signo);
1829 }
1830
1831 /* Send a signal to an LWP. */
1832
1833 static int
1834 kill_lwp (int lwpid, int signo)
1835 {
1836 int ret;
1837
1838 errno = 0;
1839 ret = syscall (__NR_tkill, lwpid, signo);
1840 if (errno == ENOSYS)
1841 {
1842 /* If tkill fails, then we are not using nptl threads, a
1843 configuration we no longer support. */
1844 perror_with_name (("tkill"));
1845 }
1846 return ret;
1847 }
1848
1849 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1850 event, check if the core is interested in it: if not, ignore the
1851 event, and keep waiting; otherwise, we need to toggle the LWP's
1852 syscall entry/exit status, since the ptrace event itself doesn't
1853 indicate it, and report the trap to higher layers. */
1854
1855 static int
1856 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1857 {
1858 struct target_waitstatus *ourstatus = &lp->waitstatus;
1859 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1860 thread_info *thread = linux_target->find_thread (lp->ptid);
1861 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, thread);
1862
1863 if (stopping)
1864 {
1865 /* If we're stopping threads, there's a SIGSTOP pending, which
1866 makes it so that the LWP reports an immediate syscall return,
1867 followed by the SIGSTOP. Skip seeing that "return" using
1868 PTRACE_CONT directly, and let stop_wait_callback collect the
1869 SIGSTOP. Later when the thread is resumed, a new syscall
1870 entry event. If we didn't do this (and returned 0), we'd
1871 leave a syscall entry pending, and our caller, by using
1872 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1873 itself. Later, when the user re-resumes this LWP, we'd see
1874 another syscall entry event and we'd mistake it for a return.
1875
1876 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1877 (leaving immediately with LWP->signalled set, without issuing
1878 a PTRACE_CONT), it would still be problematic to leave this
1879 syscall enter pending, as later when the thread is resumed,
1880 it would then see the same syscall exit mentioned above,
1881 followed by the delayed SIGSTOP, while the syscall didn't
1882 actually get to execute. It seems it would be even more
1883 confusing to the user. */
1884
1885 linux_nat_debug_printf
1886 ("ignoring syscall %d for LWP %ld (stopping threads), resuming with "
1887 "PTRACE_CONT for SIGSTOP", syscall_number, lp->ptid.lwp ());
1888
1889 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1890 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
1891 lp->stopped = 0;
1892 return 1;
1893 }
1894
1895 /* Always update the entry/return state, even if this particular
1896 syscall isn't interesting to the core now. In async mode,
1897 the user could install a new catchpoint for this syscall
1898 between syscall enter/return, and we'll need to know to
1899 report a syscall return if that happens. */
1900 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1901 ? TARGET_WAITKIND_SYSCALL_RETURN
1902 : TARGET_WAITKIND_SYSCALL_ENTRY);
1903
1904 if (catch_syscall_enabled ())
1905 {
1906 if (catching_syscall_number (syscall_number))
1907 {
1908 /* Alright, an event to report. */
1909 if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
1910 ourstatus->set_syscall_entry (syscall_number);
1911 else if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
1912 ourstatus->set_syscall_return (syscall_number);
1913 else
1914 gdb_assert_not_reached ("unexpected syscall state");
1915
1916 linux_nat_debug_printf
1917 ("stopping for %s of syscall %d for LWP %ld",
1918 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1919 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1920
1921 return 0;
1922 }
1923
1924 linux_nat_debug_printf
1925 ("ignoring %s of syscall %d for LWP %ld",
1926 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1927 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1928 }
1929 else
1930 {
1931 /* If we had been syscall tracing, and hence used PT_SYSCALL
1932 before on this LWP, it could happen that the user removes all
1933 syscall catchpoints before we get to process this event.
1934 There are two noteworthy issues here:
1935
1936 - When stopped at a syscall entry event, resuming with
1937 PT_STEP still resumes executing the syscall and reports a
1938 syscall return.
1939
1940 - Only PT_SYSCALL catches syscall enters. If we last
1941 single-stepped this thread, then this event can't be a
1942 syscall enter. If we last single-stepped this thread, this
1943 has to be a syscall exit.
1944
1945 The points above mean that the next resume, be it PT_STEP or
1946 PT_CONTINUE, can not trigger a syscall trace event. */
1947 linux_nat_debug_printf
1948 ("caught syscall event with no syscall catchpoints. %d for LWP %ld, "
1949 "ignoring", syscall_number, lp->ptid.lwp ());
1950 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1951 }
1952
1953 /* The core isn't interested in this event. For efficiency, avoid
1954 stopping all threads only to have the core resume them all again.
1955 Since we're not stopping threads, if we're still syscall tracing
1956 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1957 subsequent syscall. Simply resume using the inf-ptrace layer,
1958 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1959
1960 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
1961 return 1;
1962 }
1963
1964 /* See target.h. */
1965
1966 void
1967 linux_nat_target::follow_clone (ptid_t child_ptid)
1968 {
1969 lwp_info *new_lp = add_lwp (child_ptid);
1970 new_lp->stopped = 1;
1971
1972 /* If the thread_db layer is active, let it record the user
1973 level thread id and status, and add the thread to GDB's
1974 list. */
1975 if (!thread_db_notice_clone (inferior_ptid, new_lp->ptid))
1976 {
1977 /* The process is not using thread_db. Add the LWP to
1978 GDB's list. */
1979 add_thread (linux_target, new_lp->ptid);
1980 }
1981
1982 /* We just created NEW_LP so it cannot yet contain STATUS. */
1983 gdb_assert (new_lp->status == 0);
1984
1985 if (!pull_pid_from_list (&stopped_pids, child_ptid.lwp (), &new_lp->status))
1986 internal_error (_("no saved status for clone lwp"));
1987
1988 if (WSTOPSIG (new_lp->status) != SIGSTOP)
1989 {
1990 /* This can happen if someone starts sending signals to
1991 the new thread before it gets a chance to run, which
1992 have a lower number than SIGSTOP (e.g. SIGUSR1).
1993 This is an unlikely case, and harder to handle for
1994 fork / vfork than for clone, so we do not try - but
1995 we handle it for clone events here. */
1996
1997 new_lp->signalled = 1;
1998
1999 /* Save the wait status to report later. */
2000 linux_nat_debug_printf
2001 ("waitpid of new LWP %ld, saving status %s",
2002 (long) new_lp->ptid.lwp (), status_to_str (new_lp->status).c_str ());
2003 }
2004 else
2005 {
2006 new_lp->status = 0;
2007
2008 if (report_thread_events)
2009 new_lp->waitstatus.set_thread_created ();
2010 }
2011 }
2012
2013 /* Handle a GNU/Linux extended wait response. If we see a clone
2014 event, we need to add the new LWP to our list (and not report the
2015 trap to higher layers). This function returns non-zero if the
2016 event should be ignored and we should wait again. If STOPPING is
2017 true, the new LWP remains stopped, otherwise it is continued. */
2018
2019 static int
2020 linux_handle_extended_wait (struct lwp_info *lp, int status)
2021 {
2022 int pid = lp->ptid.lwp ();
2023 struct target_waitstatus *ourstatus = &lp->waitstatus;
2024 int event = linux_ptrace_get_extended_event (status);
2025
2026 /* All extended events we currently use are mid-syscall. Only
2027 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
2028 you have to be using PTRACE_SEIZE to get that. */
2029 lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
2030
2031 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2032 || event == PTRACE_EVENT_CLONE)
2033 {
2034 unsigned long new_pid;
2035 int ret;
2036
2037 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
2038
2039 /* If we haven't already seen the new PID stop, wait for it now. */
2040 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2041 {
2042 /* The new child has a pending SIGSTOP. We can't affect it until it
2043 hits the SIGSTOP, but we're already attached. */
2044 ret = my_waitpid (new_pid, &status, __WALL);
2045 if (ret == -1)
2046 perror_with_name (_("waiting for new child"));
2047 else if (ret != new_pid)
2048 internal_error (_("wait returned unexpected PID %d"), ret);
2049 else if (!WIFSTOPPED (status))
2050 internal_error (_("wait returned unexpected status 0x%x"), status);
2051 }
2052
2053 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
2054 {
2055 open_proc_mem_file (ptid_t (new_pid, new_pid));
2056
2057 /* The arch-specific native code may need to know about new
2058 forks even if those end up never mapped to an
2059 inferior. */
2060 linux_target->low_new_fork (lp, new_pid);
2061 }
2062 else if (event == PTRACE_EVENT_CLONE)
2063 {
2064 linux_target->low_new_clone (lp, new_pid);
2065 }
2066
2067 if (event == PTRACE_EVENT_FORK
2068 && linux_fork_checkpointing_p (lp->ptid.pid ()))
2069 {
2070 /* Handle checkpointing by linux-fork.c here as a special
2071 case. We don't want the follow-fork-mode or 'catch fork'
2072 to interfere with this. */
2073
2074 /* This won't actually modify the breakpoint list, but will
2075 physically remove the breakpoints from the child. */
2076 detach_breakpoints (ptid_t (new_pid, new_pid));
2077
2078 /* Retain child fork in ptrace (stopped) state. */
2079 if (!find_fork_pid (new_pid))
2080 add_fork (new_pid);
2081
2082 /* Report as spurious, so that infrun doesn't want to follow
2083 this fork. We're actually doing an infcall in
2084 linux-fork.c. */
2085 ourstatus->set_spurious ();
2086
2087 /* Report the stop to the core. */
2088 return 0;
2089 }
2090
2091 if (event == PTRACE_EVENT_FORK)
2092 ourstatus->set_forked (ptid_t (new_pid, new_pid));
2093 else if (event == PTRACE_EVENT_VFORK)
2094 ourstatus->set_vforked (ptid_t (new_pid, new_pid));
2095 else if (event == PTRACE_EVENT_CLONE)
2096 {
2097 linux_nat_debug_printf
2098 ("Got clone event from LWP %d, new child is LWP %ld", pid, new_pid);
2099
2100 /* Save the status again, we'll use it in follow_clone. */
2101 add_to_pid_list (&stopped_pids, new_pid, status);
2102
2103 ourstatus->set_thread_cloned (ptid_t (lp->ptid.pid (), new_pid));
2104 }
2105
2106 return 0;
2107 }
2108
2109 if (event == PTRACE_EVENT_EXEC)
2110 {
2111 linux_nat_debug_printf ("Got exec event from LWP %ld", lp->ptid.lwp ());
2112
2113 /* Close the previous /proc/PID/mem file for this inferior,
2114 which was using the address space which is now gone.
2115 Reading/writing from this file would return 0/EOF. */
2116 close_proc_mem_file (lp->ptid.pid ());
2117
2118 /* Open a new file for the new address space. */
2119 open_proc_mem_file (lp->ptid);
2120
2121 ourstatus->set_execd
2122 (make_unique_xstrdup (linux_proc_pid_to_exec_file (pid)));
2123
2124 /* The thread that execed must have been resumed, but, when a
2125 thread execs, it changes its tid to the tgid, and the old
2126 tgid thread might have not been resumed. */
2127 lp->resumed = 1;
2128
2129 /* All other LWPs are gone now. We'll have received a thread
2130 exit notification for all threads other the execing one.
2131 That one, if it wasn't the leader, just silently changes its
2132 tid to the tgid, and the previous leader vanishes. Since
2133 Linux 3.0, the former thread ID can be retrieved with
2134 PTRACE_GETEVENTMSG, but since we support older kernels, don't
2135 bother with it, and just walk the LWP list. Even with
2136 PTRACE_GETEVENTMSG, we'd still need to lookup the
2137 corresponding LWP object, and it would be an extra ptrace
2138 syscall, so this way may even be more efficient. */
2139 for (lwp_info *other_lp : all_lwps_safe ())
2140 if (other_lp != lp && other_lp->ptid.pid () == lp->ptid.pid ())
2141 exit_lwp (other_lp);
2142
2143 return 0;
2144 }
2145
2146 if (event == PTRACE_EVENT_VFORK_DONE)
2147 {
2148 linux_nat_debug_printf
2149 ("Got PTRACE_EVENT_VFORK_DONE from LWP %ld",
2150 lp->ptid.lwp ());
2151 ourstatus->set_vfork_done ();
2152 return 0;
2153 }
2154
2155 internal_error (_("unknown ptrace event %d"), event);
2156 }
2157
2158 /* Suspend waiting for a signal. We're mostly interested in
2159 SIGCHLD/SIGINT. */
2160
2161 static void
2162 wait_for_signal ()
2163 {
2164 linux_nat_debug_printf ("about to sigsuspend");
2165 sigsuspend (&suspend_mask);
2166
2167 /* If the quit flag is set, it means that the user pressed Ctrl-C
2168 and we're debugging a process that is running on a separate
2169 terminal, so we must forward the Ctrl-C to the inferior. (If the
2170 inferior is sharing GDB's terminal, then the Ctrl-C reaches the
2171 inferior directly.) We must do this here because functions that
2172 need to block waiting for a signal loop forever until there's an
2173 event to report before returning back to the event loop. */
2174 if (!target_terminal::is_ours ())
2175 {
2176 if (check_quit_flag ())
2177 target_pass_ctrlc ();
2178 }
2179 }
2180
2181 /* Mark LWP dead, with STATUS as exit status pending to report
2182 later. */
2183
2184 static void
2185 mark_lwp_dead (lwp_info *lp, int status)
2186 {
2187 /* Store the exit status lp->waitstatus, because lp->status would be
2188 ambiguous (W_EXITCODE(0,0) == 0). */
2189 lp->waitstatus = host_status_to_waitstatus (status);
2190
2191 /* If we're processing LP's status, there should be no other event
2192 already recorded as pending. */
2193 gdb_assert (lp->status == 0);
2194
2195 /* Dead LWPs aren't expected to report a pending sigstop. */
2196 lp->signalled = 0;
2197
2198 /* Prevent trying to stop it. */
2199 lp->stopped = 1;
2200 }
2201
2202 /* Return true if LP is dead, with a pending exit/signalled event. */
2203
2204 static bool
2205 is_lwp_marked_dead (lwp_info *lp)
2206 {
2207 switch (lp->waitstatus.kind ())
2208 {
2209 case TARGET_WAITKIND_EXITED:
2210 case TARGET_WAITKIND_THREAD_EXITED:
2211 case TARGET_WAITKIND_SIGNALLED:
2212 return true;
2213 }
2214 return false;
2215 }
2216
2217 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2218 exited. */
2219
2220 static int
2221 wait_lwp (struct lwp_info *lp)
2222 {
2223 pid_t pid;
2224 int status = 0;
2225 int thread_dead = 0;
2226 sigset_t prev_mask;
2227
2228 gdb_assert (!lp->stopped);
2229 gdb_assert (lp->status == 0);
2230
2231 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2232 block_child_signals (&prev_mask);
2233
2234 for (;;)
2235 {
2236 pid = my_waitpid (lp->ptid.lwp (), &status, __WALL | WNOHANG);
2237 if (pid == -1 && errno == ECHILD)
2238 {
2239 /* The thread has previously exited. We need to delete it
2240 now because if this was a non-leader thread execing, we
2241 won't get an exit event. See comments on exec events at
2242 the top of the file. */
2243 thread_dead = 1;
2244 linux_nat_debug_printf ("%s vanished.",
2245 lp->ptid.to_string ().c_str ());
2246 }
2247 if (pid != 0)
2248 break;
2249
2250 /* Bugs 10970, 12702.
2251 Thread group leader may have exited in which case we'll lock up in
2252 waitpid if there are other threads, even if they are all zombies too.
2253 Basically, we're not supposed to use waitpid this way.
2254 tkill(pid,0) cannot be used here as it gets ESRCH for both
2255 for zombie and running processes.
2256
2257 As a workaround, check if we're waiting for the thread group leader and
2258 if it's a zombie, and avoid calling waitpid if it is.
2259
2260 This is racy, what if the tgl becomes a zombie right after we check?
2261 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2262 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2263
2264 if (lp->ptid.pid () == lp->ptid.lwp ()
2265 && linux_proc_pid_is_zombie (lp->ptid.lwp ()))
2266 {
2267 thread_dead = 1;
2268 linux_nat_debug_printf ("Thread group leader %s vanished.",
2269 lp->ptid.to_string ().c_str ());
2270 break;
2271 }
2272
2273 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2274 get invoked despite our caller had them intentionally blocked by
2275 block_child_signals. This is sensitive only to the loop of
2276 linux_nat_wait_1 and there if we get called my_waitpid gets called
2277 again before it gets to sigsuspend so we can safely let the handlers
2278 get executed here. */
2279 wait_for_signal ();
2280 }
2281
2282 restore_child_signals_mask (&prev_mask);
2283
2284 if (!thread_dead)
2285 {
2286 gdb_assert (pid == lp->ptid.lwp ());
2287
2288 linux_nat_debug_printf ("waitpid %s received %s",
2289 lp->ptid.to_string ().c_str (),
2290 status_to_str (status).c_str ());
2291
2292 /* Check if the thread has exited. */
2293 if (WIFEXITED (status) || WIFSIGNALED (status))
2294 {
2295 if (report_exit_events_for (lp) || is_leader (lp))
2296 {
2297 linux_nat_debug_printf ("LWP %d exited.", lp->ptid.pid ());
2298
2299 /* If this is the leader exiting, it means the whole
2300 process is gone. Store the status to report to the
2301 core. */
2302 mark_lwp_dead (lp, status);
2303 return 0;
2304 }
2305
2306 thread_dead = 1;
2307 linux_nat_debug_printf ("%s exited.",
2308 lp->ptid.to_string ().c_str ());
2309 }
2310 }
2311
2312 if (thread_dead)
2313 {
2314 exit_lwp (lp);
2315 return 0;
2316 }
2317
2318 gdb_assert (WIFSTOPPED (status));
2319 lp->stopped = 1;
2320
2321 if (lp->must_set_ptrace_flags)
2322 {
2323 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
2324 int options = linux_nat_ptrace_options (inf->attach_flag);
2325
2326 linux_enable_event_reporting (lp->ptid.lwp (), options);
2327 lp->must_set_ptrace_flags = 0;
2328 }
2329
2330 /* Handle GNU/Linux's syscall SIGTRAPs. */
2331 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2332 {
2333 /* No longer need the sysgood bit. The ptrace event ends up
2334 recorded in lp->waitstatus if we care for it. We can carry
2335 on handling the event like a regular SIGTRAP from here
2336 on. */
2337 status = W_STOPCODE (SIGTRAP);
2338 if (linux_handle_syscall_trap (lp, 1))
2339 return wait_lwp (lp);
2340 }
2341 else
2342 {
2343 /* Almost all other ptrace-stops are known to be outside of system
2344 calls, with further exceptions in linux_handle_extended_wait. */
2345 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2346 }
2347
2348 /* Handle GNU/Linux's extended waitstatus for trace events. */
2349 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2350 && linux_is_extended_waitstatus (status))
2351 {
2352 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
2353 linux_handle_extended_wait (lp, status);
2354 return 0;
2355 }
2356
2357 return status;
2358 }
2359
2360 /* Send a SIGSTOP to LP. */
2361
2362 static int
2363 stop_callback (struct lwp_info *lp)
2364 {
2365 if (!lp->stopped && !lp->signalled)
2366 {
2367 int ret;
2368
2369 linux_nat_debug_printf ("kill %s **<SIGSTOP>**",
2370 lp->ptid.to_string ().c_str ());
2371
2372 errno = 0;
2373 ret = kill_lwp (lp->ptid.lwp (), SIGSTOP);
2374 linux_nat_debug_printf ("lwp kill %d %s", ret,
2375 errno ? safe_strerror (errno) : "ERRNO-OK");
2376
2377 lp->signalled = 1;
2378 gdb_assert (lp->status == 0);
2379 }
2380
2381 return 0;
2382 }
2383
2384 /* Request a stop on LWP. */
2385
2386 void
2387 linux_stop_lwp (struct lwp_info *lwp)
2388 {
2389 stop_callback (lwp);
2390 }
2391
2392 /* See linux-nat.h */
2393
2394 void
2395 linux_stop_and_wait_all_lwps (void)
2396 {
2397 /* Stop all LWP's ... */
2398 iterate_over_lwps (minus_one_ptid, stop_callback);
2399
2400 /* ... and wait until all of them have reported back that
2401 they're no longer running. */
2402 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
2403 }
2404
2405 /* See linux-nat.h */
2406
2407 void
2408 linux_unstop_all_lwps (void)
2409 {
2410 iterate_over_lwps (minus_one_ptid,
2411 [] (struct lwp_info *info)
2412 {
2413 return resume_stopped_resumed_lwps (info, minus_one_ptid);
2414 });
2415 }
2416
2417 /* Return non-zero if LWP PID has a pending SIGINT. */
2418
2419 static int
2420 linux_nat_has_pending_sigint (int pid)
2421 {
2422 sigset_t pending, blocked, ignored;
2423
2424 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2425
2426 if (sigismember (&pending, SIGINT)
2427 && !sigismember (&ignored, SIGINT))
2428 return 1;
2429
2430 return 0;
2431 }
2432
2433 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2434
2435 static int
2436 set_ignore_sigint (struct lwp_info *lp)
2437 {
2438 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2439 flag to consume the next one. */
2440 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2441 && WSTOPSIG (lp->status) == SIGINT)
2442 lp->status = 0;
2443 else
2444 lp->ignore_sigint = 1;
2445
2446 return 0;
2447 }
2448
2449 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2450 This function is called after we know the LWP has stopped; if the LWP
2451 stopped before the expected SIGINT was delivered, then it will never have
2452 arrived. Also, if the signal was delivered to a shared queue and consumed
2453 by a different thread, it will never be delivered to this LWP. */
2454
2455 static void
2456 maybe_clear_ignore_sigint (struct lwp_info *lp)
2457 {
2458 if (!lp->ignore_sigint)
2459 return;
2460
2461 if (!linux_nat_has_pending_sigint (lp->ptid.lwp ()))
2462 {
2463 linux_nat_debug_printf ("Clearing bogus flag for %s",
2464 lp->ptid.to_string ().c_str ());
2465 lp->ignore_sigint = 0;
2466 }
2467 }
2468
2469 /* Fetch the possible triggered data watchpoint info and store it in
2470 LP.
2471
2472 On some archs, like x86, that use debug registers to set
2473 watchpoints, it's possible that the way to know which watched
2474 address trapped, is to check the register that is used to select
2475 which address to watch. Problem is, between setting the watchpoint
2476 and reading back which data address trapped, the user may change
2477 the set of watchpoints, and, as a consequence, GDB changes the
2478 debug registers in the inferior. To avoid reading back a stale
2479 stopped-data-address when that happens, we cache in LP the fact
2480 that a watchpoint trapped, and the corresponding data address, as
2481 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2482 registers meanwhile, we have the cached data we can rely on. */
2483
2484 static int
2485 check_stopped_by_watchpoint (struct lwp_info *lp)
2486 {
2487 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
2488 inferior_ptid = lp->ptid;
2489
2490 if (linux_target->low_stopped_by_watchpoint ())
2491 {
2492 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2493 lp->stopped_data_address_p
2494 = linux_target->low_stopped_data_address (&lp->stopped_data_address);
2495 }
2496
2497 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2498 }
2499
2500 /* Returns true if the LWP had stopped for a watchpoint. */
2501
2502 bool
2503 linux_nat_target::stopped_by_watchpoint ()
2504 {
2505 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2506
2507 gdb_assert (lp != NULL);
2508
2509 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2510 }
2511
2512 bool
2513 linux_nat_target::stopped_data_address (CORE_ADDR *addr_p)
2514 {
2515 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2516
2517 gdb_assert (lp != NULL);
2518
2519 *addr_p = lp->stopped_data_address;
2520
2521 return lp->stopped_data_address_p;
2522 }
2523
2524 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2525
2526 bool
2527 linux_nat_target::low_status_is_event (int status)
2528 {
2529 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2530 }
2531
2532 /* Wait until LP is stopped. */
2533
2534 static int
2535 stop_wait_callback (struct lwp_info *lp)
2536 {
2537 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
2538
2539 /* If this is a vfork parent, bail out, it is not going to report
2540 any SIGSTOP until the vfork is done with. */
2541 if (inf->vfork_child != NULL)
2542 return 0;
2543
2544 if (!lp->stopped)
2545 {
2546 int status;
2547
2548 status = wait_lwp (lp);
2549 if (status == 0)
2550 return 0;
2551
2552 if (lp->ignore_sigint && WIFSTOPPED (status)
2553 && WSTOPSIG (status) == SIGINT)
2554 {
2555 lp->ignore_sigint = 0;
2556
2557 errno = 0;
2558 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
2559 lp->stopped = 0;
2560 linux_nat_debug_printf
2561 ("PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)",
2562 lp->ptid.to_string ().c_str (),
2563 errno ? safe_strerror (errno) : "OK");
2564
2565 return stop_wait_callback (lp);
2566 }
2567
2568 maybe_clear_ignore_sigint (lp);
2569
2570 if (WSTOPSIG (status) != SIGSTOP)
2571 {
2572 /* The thread was stopped with a signal other than SIGSTOP. */
2573
2574 linux_nat_debug_printf ("Pending event %s in %s",
2575 status_to_str ((int) status).c_str (),
2576 lp->ptid.to_string ().c_str ());
2577
2578 /* Save the sigtrap event. */
2579 lp->status = status;
2580 gdb_assert (lp->signalled);
2581 save_stop_reason (lp);
2582 }
2583 else
2584 {
2585 /* We caught the SIGSTOP that we intended to catch. */
2586
2587 linux_nat_debug_printf ("Expected SIGSTOP caught for %s.",
2588 lp->ptid.to_string ().c_str ());
2589
2590 lp->signalled = 0;
2591
2592 /* If we are waiting for this stop so we can report the thread
2593 stopped then we need to record this status. Otherwise, we can
2594 now discard this stop event. */
2595 if (lp->last_resume_kind == resume_stop)
2596 {
2597 lp->status = status;
2598 save_stop_reason (lp);
2599 }
2600 }
2601 }
2602
2603 return 0;
2604 }
2605
2606 /* Get the inferior associated to LWP. Must be called with an LWP that has
2607 an associated inferior. Always return non-nullptr. */
2608
2609 static inferior *
2610 lwp_inferior (const lwp_info *lwp)
2611 {
2612 inferior *inf = find_inferior_ptid (linux_target, lwp->ptid);
2613 gdb_assert (inf != nullptr);
2614 return inf;
2615 }
2616
2617 /* Return non-zero if LP has a wait status pending. Discard the
2618 pending event and resume the LWP if the event that originally
2619 caused the stop became uninteresting. */
2620
2621 static int
2622 status_callback (struct lwp_info *lp)
2623 {
2624 /* Only report a pending wait status if we pretend that this has
2625 indeed been resumed. */
2626 if (!lp->resumed)
2627 return 0;
2628
2629 if (!lwp_status_pending_p (lp))
2630 return 0;
2631
2632 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2633 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2634 {
2635 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
2636 CORE_ADDR pc;
2637 int discard = 0;
2638
2639 pc = regcache_read_pc (regcache);
2640
2641 if (pc != lp->stop_pc)
2642 {
2643 linux_nat_debug_printf ("PC of %s changed. was=%s, now=%s",
2644 lp->ptid.to_string ().c_str (),
2645 paddress (current_inferior ()->arch (),
2646 lp->stop_pc),
2647 paddress (current_inferior ()->arch (), pc));
2648 discard = 1;
2649 }
2650
2651 if (discard)
2652 {
2653 linux_nat_debug_printf ("pending event of %s cancelled.",
2654 lp->ptid.to_string ().c_str ());
2655
2656 lp->status = 0;
2657 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2658 return 0;
2659 }
2660 }
2661
2662 return 1;
2663 }
2664
2665 /* Count the LWP's that have had events. */
2666
2667 static int
2668 count_events_callback (struct lwp_info *lp, int *count)
2669 {
2670 gdb_assert (count != NULL);
2671
2672 /* Select only resumed LWPs that have an event pending. */
2673 if (lp->resumed && lwp_status_pending_p (lp))
2674 (*count)++;
2675
2676 return 0;
2677 }
2678
2679 /* Select the LWP (if any) that is currently being single-stepped. */
2680
2681 static int
2682 select_singlestep_lwp_callback (struct lwp_info *lp)
2683 {
2684 if (lp->last_resume_kind == resume_step
2685 && lp->status != 0)
2686 return 1;
2687 else
2688 return 0;
2689 }
2690
2691 /* Returns true if LP has a status pending. */
2692
2693 static int
2694 lwp_status_pending_p (struct lwp_info *lp)
2695 {
2696 /* We check for lp->waitstatus in addition to lp->status, because we
2697 can have pending process exits recorded in lp->status and
2698 W_EXITCODE(0,0) happens to be 0. */
2699 return lp->status != 0 || lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE;
2700 }
2701
2702 /* Select the Nth LWP that has had an event. */
2703
2704 static int
2705 select_event_lwp_callback (struct lwp_info *lp, int *selector)
2706 {
2707 gdb_assert (selector != NULL);
2708
2709 /* Select only resumed LWPs that have an event pending. */
2710 if (lp->resumed && lwp_status_pending_p (lp))
2711 if ((*selector)-- == 0)
2712 return 1;
2713
2714 return 0;
2715 }
2716
2717 /* Called when the LWP stopped for a signal/trap. If it stopped for a
2718 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2719 and save the result in the LWP's stop_reason field. If it stopped
2720 for a breakpoint, decrement the PC if necessary on the lwp's
2721 architecture. */
2722
2723 static void
2724 save_stop_reason (struct lwp_info *lp)
2725 {
2726 struct regcache *regcache;
2727 struct gdbarch *gdbarch;
2728 CORE_ADDR pc;
2729 CORE_ADDR sw_bp_pc;
2730 siginfo_t siginfo;
2731
2732 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
2733 gdb_assert (lp->status != 0);
2734
2735 if (!linux_target->low_status_is_event (lp->status))
2736 return;
2737
2738 inferior *inf = lwp_inferior (lp);
2739 if (inf->starting_up)
2740 return;
2741
2742 regcache = get_thread_regcache (linux_target, lp->ptid);
2743 gdbarch = regcache->arch ();
2744
2745 pc = regcache_read_pc (regcache);
2746 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
2747
2748 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2749 {
2750 if (siginfo.si_signo == SIGTRAP)
2751 {
2752 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
2753 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
2754 {
2755 /* The si_code is ambiguous on this arch -- check debug
2756 registers. */
2757 if (!check_stopped_by_watchpoint (lp))
2758 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2759 }
2760 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
2761 {
2762 /* If we determine the LWP stopped for a SW breakpoint,
2763 trust it. Particularly don't check watchpoint
2764 registers, because, at least on s390, we'd find
2765 stopped-by-watchpoint as long as there's a watchpoint
2766 set. */
2767 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2768 }
2769 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
2770 {
2771 /* This can indicate either a hardware breakpoint or
2772 hardware watchpoint. Check debug registers. */
2773 if (!check_stopped_by_watchpoint (lp))
2774 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2775 }
2776 else if (siginfo.si_code == TRAP_TRACE)
2777 {
2778 linux_nat_debug_printf ("%s stopped by trace",
2779 lp->ptid.to_string ().c_str ());
2780
2781 /* We may have single stepped an instruction that
2782 triggered a watchpoint. In that case, on some
2783 architectures (such as x86), instead of TRAP_HWBKPT,
2784 si_code indicates TRAP_TRACE, and we need to check
2785 the debug registers separately. */
2786 check_stopped_by_watchpoint (lp);
2787 }
2788 }
2789 }
2790
2791 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2792 {
2793 linux_nat_debug_printf ("%s stopped by software breakpoint",
2794 lp->ptid.to_string ().c_str ());
2795
2796 /* Back up the PC if necessary. */
2797 if (pc != sw_bp_pc)
2798 regcache_write_pc (regcache, sw_bp_pc);
2799
2800 /* Update this so we record the correct stop PC below. */
2801 pc = sw_bp_pc;
2802 }
2803 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2804 {
2805 linux_nat_debug_printf ("%s stopped by hardware breakpoint",
2806 lp->ptid.to_string ().c_str ());
2807 }
2808 else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2809 {
2810 linux_nat_debug_printf ("%s stopped by hardware watchpoint",
2811 lp->ptid.to_string ().c_str ());
2812 }
2813
2814 lp->stop_pc = pc;
2815 }
2816
2817
2818 /* Returns true if the LWP had stopped for a software breakpoint. */
2819
2820 bool
2821 linux_nat_target::stopped_by_sw_breakpoint ()
2822 {
2823 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2824
2825 gdb_assert (lp != NULL);
2826
2827 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2828 }
2829
2830 /* Implement the supports_stopped_by_sw_breakpoint method. */
2831
2832 bool
2833 linux_nat_target::supports_stopped_by_sw_breakpoint ()
2834 {
2835 return true;
2836 }
2837
2838 /* Returns true if the LWP had stopped for a hardware
2839 breakpoint/watchpoint. */
2840
2841 bool
2842 linux_nat_target::stopped_by_hw_breakpoint ()
2843 {
2844 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2845
2846 gdb_assert (lp != NULL);
2847
2848 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2849 }
2850
2851 /* Implement the supports_stopped_by_hw_breakpoint method. */
2852
2853 bool
2854 linux_nat_target::supports_stopped_by_hw_breakpoint ()
2855 {
2856 return true;
2857 }
2858
2859 /* Select one LWP out of those that have events pending. */
2860
2861 static void
2862 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
2863 {
2864 int num_events = 0;
2865 int random_selector;
2866 struct lwp_info *event_lp = NULL;
2867
2868 /* Record the wait status for the original LWP. */
2869 (*orig_lp)->status = *status;
2870
2871 /* In all-stop, give preference to the LWP that is being
2872 single-stepped. There will be at most one, and it will be the
2873 LWP that the core is most interested in. If we didn't do this,
2874 then we'd have to handle pending step SIGTRAPs somehow in case
2875 the core later continues the previously-stepped thread, as
2876 otherwise we'd report the pending SIGTRAP then, and the core, not
2877 having stepped the thread, wouldn't understand what the trap was
2878 for, and therefore would report it to the user as a random
2879 signal. */
2880 if (!target_is_non_stop_p ())
2881 {
2882 event_lp = iterate_over_lwps (filter, select_singlestep_lwp_callback);
2883 if (event_lp != NULL)
2884 {
2885 linux_nat_debug_printf ("Select single-step %s",
2886 event_lp->ptid.to_string ().c_str ());
2887 }
2888 }
2889
2890 if (event_lp == NULL)
2891 {
2892 /* Pick one at random, out of those which have had events. */
2893
2894 /* First see how many events we have. */
2895 iterate_over_lwps (filter,
2896 [&] (struct lwp_info *info)
2897 {
2898 return count_events_callback (info, &num_events);
2899 });
2900 gdb_assert (num_events > 0);
2901
2902 /* Now randomly pick a LWP out of those that have had
2903 events. */
2904 random_selector = (int)
2905 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2906
2907 if (num_events > 1)
2908 linux_nat_debug_printf ("Found %d events, selecting #%d",
2909 num_events, random_selector);
2910
2911 event_lp
2912 = (iterate_over_lwps
2913 (filter,
2914 [&] (struct lwp_info *info)
2915 {
2916 return select_event_lwp_callback (info,
2917 &random_selector);
2918 }));
2919 }
2920
2921 if (event_lp != NULL)
2922 {
2923 /* Switch the event LWP. */
2924 *orig_lp = event_lp;
2925 *status = event_lp->status;
2926 }
2927
2928 /* Flush the wait status for the event LWP. */
2929 (*orig_lp)->status = 0;
2930 }
2931
2932 /* Return non-zero if LP has been resumed. */
2933
2934 static int
2935 resumed_callback (struct lwp_info *lp)
2936 {
2937 return lp->resumed;
2938 }
2939
2940 /* Check if we should go on and pass this event to common code.
2941
2942 If so, save the status to the lwp_info structure associated to LWPID. */
2943
2944 static void
2945 linux_nat_filter_event (int lwpid, int status)
2946 {
2947 struct lwp_info *lp;
2948 int event = linux_ptrace_get_extended_event (status);
2949
2950 lp = find_lwp_pid (ptid_t (lwpid));
2951
2952 /* Check for events reported by anything not in our LWP list. */
2953 if (lp == nullptr)
2954 {
2955 if (WIFSTOPPED (status))
2956 {
2957 if (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC)
2958 {
2959 /* A non-leader thread exec'ed after we've seen the
2960 leader zombie, and removed it from our lists (in
2961 check_zombie_leaders). The non-leader thread changes
2962 its tid to the tgid. */
2963 linux_nat_debug_printf
2964 ("Re-adding thread group leader LWP %d after exec.",
2965 lwpid);
2966
2967 lp = add_lwp (ptid_t (lwpid, lwpid));
2968 lp->stopped = 1;
2969 lp->resumed = 1;
2970 add_thread (linux_target, lp->ptid);
2971 }
2972 else
2973 {
2974 /* A process we are controlling has forked and the new
2975 child's stop was reported to us by the kernel. Save
2976 its PID and go back to waiting for the fork event to
2977 be reported - the stopped process might be returned
2978 from waitpid before or after the fork event is. */
2979 linux_nat_debug_printf
2980 ("Saving LWP %d status %s in stopped_pids list",
2981 lwpid, status_to_str (status).c_str ());
2982 add_to_pid_list (&stopped_pids, lwpid, status);
2983 }
2984 }
2985 else
2986 {
2987 /* Don't report an event for the exit of an LWP not in our
2988 list, i.e. not part of any inferior we're debugging.
2989 This can happen if we detach from a program we originally
2990 forked and then it exits. However, note that we may have
2991 earlier deleted a leader of an inferior we're debugging,
2992 in check_zombie_leaders. Re-add it back here if so. */
2993 for (inferior *inf : all_inferiors (linux_target))
2994 {
2995 if (inf->pid == lwpid)
2996 {
2997 linux_nat_debug_printf
2998 ("Re-adding thread group leader LWP %d after exit.",
2999 lwpid);
3000
3001 lp = add_lwp (ptid_t (lwpid, lwpid));
3002 lp->resumed = 1;
3003 add_thread (linux_target, lp->ptid);
3004 break;
3005 }
3006 }
3007 }
3008
3009 if (lp == nullptr)
3010 return;
3011 }
3012
3013 /* This LWP is stopped now. (And if dead, this prevents it from
3014 ever being continued.) */
3015 lp->stopped = 1;
3016
3017 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
3018 {
3019 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
3020 int options = linux_nat_ptrace_options (inf->attach_flag);
3021
3022 linux_enable_event_reporting (lp->ptid.lwp (), options);
3023 lp->must_set_ptrace_flags = 0;
3024 }
3025
3026 /* Handle GNU/Linux's syscall SIGTRAPs. */
3027 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3028 {
3029 /* No longer need the sysgood bit. The ptrace event ends up
3030 recorded in lp->waitstatus if we care for it. We can carry
3031 on handling the event like a regular SIGTRAP from here
3032 on. */
3033 status = W_STOPCODE (SIGTRAP);
3034 if (linux_handle_syscall_trap (lp, 0))
3035 return;
3036 }
3037 else
3038 {
3039 /* Almost all other ptrace-stops are known to be outside of system
3040 calls, with further exceptions in linux_handle_extended_wait. */
3041 lp->syscall_state = TARGET_WAITKIND_IGNORE;
3042 }
3043
3044 /* Handle GNU/Linux's extended waitstatus for trace events. */
3045 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
3046 && linux_is_extended_waitstatus (status))
3047 {
3048 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
3049
3050 if (linux_handle_extended_wait (lp, status))
3051 return;
3052 }
3053
3054 /* Check if the thread has exited. */
3055 if (WIFEXITED (status) || WIFSIGNALED (status))
3056 {
3057 if (!report_exit_events_for (lp) && !is_leader (lp))
3058 {
3059 linux_nat_debug_printf ("%s exited.",
3060 lp->ptid.to_string ().c_str ());
3061
3062 /* If this was not the leader exiting, then the exit signal
3063 was not the end of the debugged application and should be
3064 ignored. */
3065 exit_lwp (lp);
3066 return;
3067 }
3068
3069 /* Note that even if the leader was ptrace-stopped, it can still
3070 exit, if e.g., some other thread brings down the whole
3071 process (calls `exit'). So don't assert that the lwp is
3072 resumed. */
3073 linux_nat_debug_printf ("LWP %ld exited (resumed=%d)",
3074 lp->ptid.lwp (), lp->resumed);
3075
3076 mark_lwp_dead (lp, status);
3077 return;
3078 }
3079
3080 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3081 an attempt to stop an LWP. */
3082 if (lp->signalled
3083 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3084 {
3085 lp->signalled = 0;
3086
3087 if (lp->last_resume_kind == resume_stop)
3088 {
3089 linux_nat_debug_printf ("resume_stop SIGSTOP caught for %s.",
3090 lp->ptid.to_string ().c_str ());
3091 }
3092 else
3093 {
3094 /* This is a delayed SIGSTOP. Filter out the event. */
3095
3096 linux_nat_debug_printf
3097 ("%s %s, 0, 0 (discard delayed SIGSTOP)",
3098 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3099 lp->ptid.to_string ().c_str ());
3100
3101 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3102 gdb_assert (lp->resumed);
3103 return;
3104 }
3105 }
3106
3107 /* Make sure we don't report a SIGINT that we have already displayed
3108 for another thread. */
3109 if (lp->ignore_sigint
3110 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3111 {
3112 linux_nat_debug_printf ("Delayed SIGINT caught for %s.",
3113 lp->ptid.to_string ().c_str ());
3114
3115 /* This is a delayed SIGINT. */
3116 lp->ignore_sigint = 0;
3117
3118 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3119 linux_nat_debug_printf ("%s %s, 0, 0 (discard SIGINT)",
3120 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3121 lp->ptid.to_string ().c_str ());
3122 gdb_assert (lp->resumed);
3123
3124 /* Discard the event. */
3125 return;
3126 }
3127
3128 /* Don't report signals that GDB isn't interested in, such as
3129 signals that are neither printed nor stopped upon. Stopping all
3130 threads can be a bit time-consuming, so if we want decent
3131 performance with heavily multi-threaded programs, especially when
3132 they're using a high frequency timer, we'd better avoid it if we
3133 can. */
3134 if (WIFSTOPPED (status))
3135 {
3136 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3137
3138 if (!target_is_non_stop_p ())
3139 {
3140 /* Only do the below in all-stop, as we currently use SIGSTOP
3141 to implement target_stop (see linux_nat_stop) in
3142 non-stop. */
3143 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3144 {
3145 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3146 forwarded to the entire process group, that is, all LWPs
3147 will receive it - unless they're using CLONE_THREAD to
3148 share signals. Since we only want to report it once, we
3149 mark it as ignored for all LWPs except this one. */
3150 iterate_over_lwps (ptid_t (lp->ptid.pid ()), set_ignore_sigint);
3151 lp->ignore_sigint = 0;
3152 }
3153 else
3154 maybe_clear_ignore_sigint (lp);
3155 }
3156
3157 /* When using hardware single-step, we need to report every signal.
3158 Otherwise, signals in pass_mask may be short-circuited
3159 except signals that might be caused by a breakpoint, or SIGSTOP
3160 if we sent the SIGSTOP and are waiting for it to arrive. */
3161 if (!lp->step
3162 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
3163 && (WSTOPSIG (status) != SIGSTOP
3164 || !linux_target->find_thread (lp->ptid)->stop_requested)
3165 && !linux_wstatus_maybe_breakpoint (status))
3166 {
3167 linux_resume_one_lwp (lp, lp->step, signo);
3168 linux_nat_debug_printf
3169 ("%s %s, %s (preempt 'handle')",
3170 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3171 lp->ptid.to_string ().c_str (),
3172 (signo != GDB_SIGNAL_0
3173 ? strsignal (gdb_signal_to_host (signo)) : "0"));
3174 return;
3175 }
3176 }
3177
3178 /* An interesting event. */
3179 gdb_assert (lp);
3180 lp->status = status;
3181 save_stop_reason (lp);
3182 }
3183
3184 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3185 their exits until all other threads in the group have exited. */
3186
3187 static void
3188 check_zombie_leaders (void)
3189 {
3190 for (inferior *inf : all_inferiors ())
3191 {
3192 struct lwp_info *leader_lp;
3193
3194 if (inf->pid == 0)
3195 continue;
3196
3197 leader_lp = find_lwp_pid (ptid_t (inf->pid));
3198 if (leader_lp != NULL
3199 /* Check if there are other threads in the group, as we may
3200 have raced with the inferior simply exiting. Note this
3201 isn't a watertight check. If the inferior is
3202 multi-threaded and is exiting, it may be we see the
3203 leader as zombie before we reap all the non-leader
3204 threads. See comments below. */
3205 && num_lwps (inf->pid) > 1
3206 && linux_proc_pid_is_zombie (inf->pid))
3207 {
3208 /* A zombie leader in a multi-threaded program can mean one
3209 of three things:
3210
3211 #1 - Only the leader exited, not the whole program, e.g.,
3212 with pthread_exit. Since we can't reap the leader's exit
3213 status until all other threads are gone and reaped too,
3214 we want to delete the zombie leader right away, as it
3215 can't be debugged, we can't read its registers, etc.
3216 This is the main reason we check for zombie leaders
3217 disappearing.
3218
3219 #2 - The whole thread-group/process exited (a group exit,
3220 via e.g. exit(3), and there is (or will be shortly) an
3221 exit reported for each thread in the process, and then
3222 finally an exit for the leader once the non-leaders are
3223 reaped.
3224
3225 #3 - There are 3 or more threads in the group, and a
3226 thread other than the leader exec'd. See comments on
3227 exec events at the top of the file.
3228
3229 Ideally we would never delete the leader for case #2.
3230 Instead, we want to collect the exit status of each
3231 non-leader thread, and then finally collect the exit
3232 status of the leader as normal and use its exit code as
3233 whole-process exit code. Unfortunately, there's no
3234 race-free way to distinguish cases #1 and #2. We can't
3235 assume the exit events for the non-leaders threads are
3236 already pending in the kernel, nor can we assume the
3237 non-leader threads are in zombie state already. Between
3238 the leader becoming zombie and the non-leaders exiting
3239 and becoming zombie themselves, there's a small time
3240 window, so such a check would be racy. Temporarily
3241 pausing all threads and checking to see if all threads
3242 exit or not before re-resuming them would work in the
3243 case that all threads are running right now, but it
3244 wouldn't work if some thread is currently already
3245 ptrace-stopped, e.g., due to scheduler-locking.
3246
3247 So what we do is we delete the leader anyhow, and then
3248 later on when we see its exit status, we re-add it back.
3249 We also make sure that we only report a whole-process
3250 exit when we see the leader exiting, as opposed to when
3251 the last LWP in the LWP list exits, which can be a
3252 non-leader if we deleted the leader here. */
3253 linux_nat_debug_printf ("Thread group leader %d zombie "
3254 "(it exited, or another thread execd), "
3255 "deleting it.",
3256 inf->pid);
3257 exit_lwp (leader_lp);
3258 }
3259 }
3260 }
3261
3262 /* Convenience function that is called when we're about to return an
3263 event to the core. If the event is an exit or signalled event,
3264 then this decides whether to report it as process-wide event, as a
3265 thread exit event, or to suppress it. All other event kinds are
3266 passed through unmodified. */
3267
3268 static ptid_t
3269 filter_exit_event (struct lwp_info *event_child,
3270 struct target_waitstatus *ourstatus)
3271 {
3272 ptid_t ptid = event_child->ptid;
3273
3274 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
3275 if a non-leader thread exits with a signal, we'd report it to the
3276 core which would interpret it as the whole-process exiting.
3277 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
3278 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
3279 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
3280 return ptid;
3281
3282 if (!is_leader (event_child))
3283 {
3284 if (report_exit_events_for (event_child))
3285 {
3286 ourstatus->set_thread_exited (0);
3287 /* Delete lwp, but not thread_info, infrun will need it to
3288 process the event. */
3289 exit_lwp (event_child, false);
3290 }
3291 else
3292 {
3293 ourstatus->set_ignore ();
3294 exit_lwp (event_child);
3295 }
3296 }
3297
3298 return ptid;
3299 }
3300
3301 static ptid_t
3302 linux_nat_wait_1 (ptid_t ptid, struct target_waitstatus *ourstatus,
3303 target_wait_flags target_options)
3304 {
3305 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3306
3307 sigset_t prev_mask;
3308 enum resume_kind last_resume_kind;
3309 struct lwp_info *lp;
3310 int status;
3311
3312 /* The first time we get here after starting a new inferior, we may
3313 not have added it to the LWP list yet - this is the earliest
3314 moment at which we know its PID. */
3315 if (ptid.is_pid () && find_lwp_pid (ptid) == nullptr)
3316 {
3317 ptid_t lwp_ptid (ptid.pid (), ptid.pid ());
3318
3319 /* Upgrade the main thread's ptid. */
3320 thread_change_ptid (linux_target, ptid, lwp_ptid);
3321 lp = add_initial_lwp (lwp_ptid);
3322 lp->resumed = 1;
3323 }
3324
3325 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
3326 block_child_signals (&prev_mask);
3327
3328 /* First check if there is a LWP with a wait status pending. */
3329 lp = iterate_over_lwps (ptid, status_callback);
3330 if (lp != NULL)
3331 {
3332 linux_nat_debug_printf ("Using pending wait status %s for %s.",
3333 pending_status_str (lp).c_str (),
3334 lp->ptid.to_string ().c_str ());
3335 }
3336
3337 /* But if we don't find a pending event, we'll have to wait. Always
3338 pull all events out of the kernel. We'll randomly select an
3339 event LWP out of all that have events, to prevent starvation. */
3340
3341 while (lp == NULL)
3342 {
3343 pid_t lwpid;
3344
3345 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3346 quirks:
3347
3348 - If the thread group leader exits while other threads in the
3349 thread group still exist, waitpid(TGID, ...) hangs. That
3350 waitpid won't return an exit status until the other threads
3351 in the group are reaped.
3352
3353 - When a non-leader thread execs, that thread just vanishes
3354 without reporting an exit (so we'd hang if we waited for it
3355 explicitly in that case). The exec event is reported to
3356 the TGID pid. */
3357
3358 errno = 0;
3359 lwpid = my_waitpid (-1, &status, __WALL | WNOHANG);
3360
3361 linux_nat_debug_printf ("waitpid(-1, ...) returned %d, %s",
3362 lwpid,
3363 errno ? safe_strerror (errno) : "ERRNO-OK");
3364
3365 if (lwpid > 0)
3366 {
3367 linux_nat_debug_printf ("waitpid %ld received %s",
3368 (long) lwpid,
3369 status_to_str (status).c_str ());
3370
3371 linux_nat_filter_event (lwpid, status);
3372 /* Retry until nothing comes out of waitpid. A single
3373 SIGCHLD can indicate more than one child stopped. */
3374 continue;
3375 }
3376
3377 /* Now that we've pulled all events out of the kernel, resume
3378 LWPs that don't have an interesting event to report. */
3379 iterate_over_lwps (minus_one_ptid,
3380 [] (struct lwp_info *info)
3381 {
3382 return resume_stopped_resumed_lwps (info, minus_one_ptid);
3383 });
3384
3385 /* ... and find an LWP with a status to report to the core, if
3386 any. */
3387 lp = iterate_over_lwps (ptid, status_callback);
3388 if (lp != NULL)
3389 break;
3390
3391 /* Check for zombie thread group leaders. Those can't be reaped
3392 until all other threads in the thread group are. */
3393 check_zombie_leaders ();
3394
3395 /* If there are no resumed children left, bail. We'd be stuck
3396 forever in the sigsuspend call below otherwise. */
3397 if (iterate_over_lwps (ptid, resumed_callback) == NULL)
3398 {
3399 linux_nat_debug_printf ("exit (no resumed LWP)");
3400
3401 ourstatus->set_no_resumed ();
3402
3403 restore_child_signals_mask (&prev_mask);
3404 return minus_one_ptid;
3405 }
3406
3407 /* No interesting event to report to the core. */
3408
3409 if (target_options & TARGET_WNOHANG)
3410 {
3411 linux_nat_debug_printf ("no interesting events found");
3412
3413 ourstatus->set_ignore ();
3414 restore_child_signals_mask (&prev_mask);
3415 return minus_one_ptid;
3416 }
3417
3418 /* We shouldn't end up here unless we want to try again. */
3419 gdb_assert (lp == NULL);
3420
3421 /* Block until we get an event reported with SIGCHLD. */
3422 wait_for_signal ();
3423 }
3424
3425 gdb_assert (lp);
3426 gdb_assert (lp->stopped);
3427
3428 status = lp->status;
3429 lp->status = 0;
3430
3431 if (!target_is_non_stop_p ())
3432 {
3433 /* Now stop all other LWP's ... */
3434 iterate_over_lwps (minus_one_ptid, stop_callback);
3435
3436 /* ... and wait until all of them have reported back that
3437 they're no longer running. */
3438 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
3439 }
3440
3441 /* If we're not waiting for a specific LWP, choose an event LWP from
3442 among those that have had events. Giving equal priority to all
3443 LWPs that have had events helps prevent starvation. */
3444 if (ptid == minus_one_ptid || ptid.is_pid ())
3445 select_event_lwp (ptid, &lp, &status);
3446
3447 gdb_assert (lp != NULL);
3448
3449 /* We'll need this to determine whether to report a SIGSTOP as
3450 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3451 clears it. */
3452 last_resume_kind = lp->last_resume_kind;
3453
3454 if (!target_is_non_stop_p ())
3455 {
3456 /* In all-stop, from the core's perspective, all LWPs are now
3457 stopped until a new resume action is sent over. */
3458 iterate_over_lwps (minus_one_ptid, resume_clear_callback);
3459 }
3460 else
3461 {
3462 resume_clear_callback (lp);
3463 }
3464
3465 if (linux_target->low_status_is_event (status))
3466 {
3467 linux_nat_debug_printf ("trap ptid is %s.",
3468 lp->ptid.to_string ().c_str ());
3469 }
3470
3471 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3472 {
3473 *ourstatus = lp->waitstatus;
3474 lp->waitstatus.set_ignore ();
3475 }
3476 else
3477 *ourstatus = host_status_to_waitstatus (status);
3478
3479 linux_nat_debug_printf ("event found");
3480
3481 restore_child_signals_mask (&prev_mask);
3482
3483 if (last_resume_kind == resume_stop
3484 && ourstatus->kind () == TARGET_WAITKIND_STOPPED
3485 && WSTOPSIG (status) == SIGSTOP)
3486 {
3487 /* A thread that has been requested to stop by GDB with
3488 target_stop, and it stopped cleanly, so report as SIG0. The
3489 use of SIGSTOP is an implementation detail. */
3490 ourstatus->set_stopped (GDB_SIGNAL_0);
3491 }
3492
3493 if (ourstatus->kind () == TARGET_WAITKIND_EXITED
3494 || ourstatus->kind () == TARGET_WAITKIND_SIGNALLED)
3495 lp->core = -1;
3496 else
3497 lp->core = linux_common_core_of_thread (lp->ptid);
3498
3499 return filter_exit_event (lp, ourstatus);
3500 }
3501
3502 /* Resume LWPs that are currently stopped without any pending status
3503 to report, but are resumed from the core's perspective. */
3504
3505 static int
3506 resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid)
3507 {
3508 inferior *inf = lwp_inferior (lp);
3509
3510 if (!lp->stopped)
3511 {
3512 linux_nat_debug_printf ("NOT resuming LWP %s, not stopped",
3513 lp->ptid.to_string ().c_str ());
3514 }
3515 else if (!lp->resumed)
3516 {
3517 linux_nat_debug_printf ("NOT resuming LWP %s, not resumed",
3518 lp->ptid.to_string ().c_str ());
3519 }
3520 else if (lwp_status_pending_p (lp))
3521 {
3522 linux_nat_debug_printf ("NOT resuming LWP %s, has pending status",
3523 lp->ptid.to_string ().c_str ());
3524 }
3525 else if (inf->vfork_child != nullptr)
3526 {
3527 linux_nat_debug_printf ("NOT resuming LWP %s (vfork parent)",
3528 lp->ptid.to_string ().c_str ());
3529 }
3530 else
3531 {
3532 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
3533 struct gdbarch *gdbarch = regcache->arch ();
3534
3535 try
3536 {
3537 CORE_ADDR pc = regcache_read_pc (regcache);
3538 int leave_stopped = 0;
3539
3540 /* Don't bother if there's a breakpoint at PC that we'd hit
3541 immediately, and we're not waiting for this LWP. */
3542 if (!lp->ptid.matches (wait_ptid))
3543 {
3544 if (breakpoint_inserted_here_p (inf->aspace.get (), pc))
3545 leave_stopped = 1;
3546 }
3547
3548 if (!leave_stopped)
3549 {
3550 linux_nat_debug_printf
3551 ("resuming stopped-resumed LWP %s at %s: step=%d",
3552 lp->ptid.to_string ().c_str (), paddress (gdbarch, pc),
3553 lp->step);
3554
3555 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3556 }
3557 }
3558 catch (const gdb_exception_error &ex)
3559 {
3560 if (!check_ptrace_stopped_lwp_gone (lp))
3561 throw;
3562 }
3563 }
3564
3565 return 0;
3566 }
3567
3568 ptid_t
3569 linux_nat_target::wait (ptid_t ptid, struct target_waitstatus *ourstatus,
3570 target_wait_flags target_options)
3571 {
3572 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3573
3574 ptid_t event_ptid;
3575
3576 linux_nat_debug_printf ("[%s], [%s]", ptid.to_string ().c_str (),
3577 target_options_to_string (target_options).c_str ());
3578
3579 /* Flush the async file first. */
3580 if (target_is_async_p ())
3581 async_file_flush ();
3582
3583 /* Resume LWPs that are currently stopped without any pending status
3584 to report, but are resumed from the core's perspective. LWPs get
3585 in this state if we find them stopping at a time we're not
3586 interested in reporting the event (target_wait on a
3587 specific_process, for example, see linux_nat_wait_1), and
3588 meanwhile the event became uninteresting. Don't bother resuming
3589 LWPs we're not going to wait for if they'd stop immediately. */
3590 if (target_is_non_stop_p ())
3591 iterate_over_lwps (minus_one_ptid,
3592 [=] (struct lwp_info *info)
3593 {
3594 return resume_stopped_resumed_lwps (info, ptid);
3595 });
3596
3597 event_ptid = linux_nat_wait_1 (ptid, ourstatus, target_options);
3598
3599 /* If we requested any event, and something came out, assume there
3600 may be more. If we requested a specific lwp or process, also
3601 assume there may be more. */
3602 if (target_is_async_p ()
3603 && ((ourstatus->kind () != TARGET_WAITKIND_IGNORE
3604 && ourstatus->kind () != TARGET_WAITKIND_NO_RESUMED)
3605 || ptid != minus_one_ptid))
3606 async_file_mark ();
3607
3608 return event_ptid;
3609 }
3610
3611 /* Kill one LWP. */
3612
3613 static void
3614 kill_one_lwp (pid_t pid)
3615 {
3616 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3617
3618 errno = 0;
3619 kill_lwp (pid, SIGKILL);
3620
3621 if (debug_linux_nat)
3622 {
3623 int save_errno = errno;
3624
3625 linux_nat_debug_printf
3626 ("kill (SIGKILL) %ld, 0, 0 (%s)", (long) pid,
3627 save_errno != 0 ? safe_strerror (save_errno) : "OK");
3628 }
3629
3630 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3631
3632 errno = 0;
3633 ptrace (PTRACE_KILL, pid, 0, 0);
3634 if (debug_linux_nat)
3635 {
3636 int save_errno = errno;
3637
3638 linux_nat_debug_printf
3639 ("PTRACE_KILL %ld, 0, 0 (%s)", (long) pid,
3640 save_errno ? safe_strerror (save_errno) : "OK");
3641 }
3642 }
3643
3644 /* Wait for an LWP to die. */
3645
3646 static void
3647 kill_wait_one_lwp (pid_t pid)
3648 {
3649 pid_t res;
3650
3651 /* We must make sure that there are no pending events (delayed
3652 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3653 program doesn't interfere with any following debugging session. */
3654
3655 do
3656 {
3657 res = my_waitpid (pid, NULL, __WALL);
3658 if (res != (pid_t) -1)
3659 {
3660 linux_nat_debug_printf ("wait %ld received unknown.", (long) pid);
3661
3662 /* The Linux kernel sometimes fails to kill a thread
3663 completely after PTRACE_KILL; that goes from the stop
3664 point in do_fork out to the one in get_signal_to_deliver
3665 and waits again. So kill it again. */
3666 kill_one_lwp (pid);
3667 }
3668 }
3669 while (res == pid);
3670
3671 gdb_assert (res == -1 && errno == ECHILD);
3672 }
3673
3674 /* Callback for iterate_over_lwps. */
3675
3676 static int
3677 kill_callback (struct lwp_info *lp)
3678 {
3679 kill_one_lwp (lp->ptid.lwp ());
3680 return 0;
3681 }
3682
3683 /* Callback for iterate_over_lwps. */
3684
3685 static int
3686 kill_wait_callback (struct lwp_info *lp)
3687 {
3688 kill_wait_one_lwp (lp->ptid.lwp ());
3689 return 0;
3690 }
3691
3692 /* Kill the fork/clone child of LP if it has an unfollowed child. */
3693
3694 static int
3695 kill_unfollowed_child_callback (lwp_info *lp)
3696 {
3697 std::optional<target_waitstatus> ws = get_pending_child_status (lp);
3698 if (ws.has_value ())
3699 {
3700 ptid_t child_ptid = ws->child_ptid ();
3701 int child_pid = child_ptid.pid ();
3702 int child_lwp = child_ptid.lwp ();
3703
3704 kill_one_lwp (child_lwp);
3705 kill_wait_one_lwp (child_lwp);
3706
3707 /* Let the arch-specific native code know this process is
3708 gone. */
3709 if (ws->kind () != TARGET_WAITKIND_THREAD_CLONED)
3710 linux_target->low_forget_process (child_pid);
3711 }
3712
3713 return 0;
3714 }
3715
3716 void
3717 linux_nat_target::kill ()
3718 {
3719 ptid_t pid_ptid (inferior_ptid.pid ());
3720
3721 /* If we're stopped while forking/cloning and we haven't followed
3722 yet, kill the child task. We need to do this first because the
3723 parent will be sleeping if this is a vfork. */
3724 iterate_over_lwps (pid_ptid, kill_unfollowed_child_callback);
3725
3726 if (forks_exist_p ())
3727 linux_fork_killall ();
3728 else
3729 {
3730 /* Stop all threads before killing them, since ptrace requires
3731 that the thread is stopped to successfully PTRACE_KILL. */
3732 iterate_over_lwps (pid_ptid, stop_callback);
3733 /* ... and wait until all of them have reported back that
3734 they're no longer running. */
3735 iterate_over_lwps (pid_ptid, stop_wait_callback);
3736
3737 /* Kill all LWP's ... */
3738 iterate_over_lwps (pid_ptid, kill_callback);
3739
3740 /* ... and wait until we've flushed all events. */
3741 iterate_over_lwps (pid_ptid, kill_wait_callback);
3742 }
3743
3744 target_mourn_inferior (inferior_ptid);
3745 }
3746
3747 void
3748 linux_nat_target::mourn_inferior ()
3749 {
3750 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3751
3752 int pid = inferior_ptid.pid ();
3753
3754 purge_lwp_list (pid);
3755
3756 close_proc_mem_file (pid);
3757
3758 if (! forks_exist_p ())
3759 /* Normal case, no other forks available. */
3760 inf_ptrace_target::mourn_inferior ();
3761 else
3762 /* Multi-fork case. The current inferior_ptid has exited, but
3763 there are other viable forks to debug. Delete the exiting
3764 one and context-switch to the first available. */
3765 linux_fork_mourn_inferior ();
3766
3767 /* Let the arch-specific native code know this process is gone. */
3768 linux_target->low_forget_process (pid);
3769 }
3770
3771 /* Convert a native/host siginfo object, into/from the siginfo in the
3772 layout of the inferiors' architecture. */
3773
3774 static void
3775 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
3776 {
3777 /* If the low target didn't do anything, then just do a straight
3778 memcpy. */
3779 if (!linux_target->low_siginfo_fixup (siginfo, inf_siginfo, direction))
3780 {
3781 if (direction == 1)
3782 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
3783 else
3784 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
3785 }
3786 }
3787
3788 static enum target_xfer_status
3789 linux_xfer_siginfo (ptid_t ptid, enum target_object object,
3790 const char *annex, gdb_byte *readbuf,
3791 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3792 ULONGEST *xfered_len)
3793 {
3794 siginfo_t siginfo;
3795 gdb_byte inf_siginfo[sizeof (siginfo_t)];
3796
3797 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3798 gdb_assert (readbuf || writebuf);
3799
3800 if (offset > sizeof (siginfo))
3801 return TARGET_XFER_E_IO;
3802
3803 if (!linux_nat_get_siginfo (ptid, &siginfo))
3804 return TARGET_XFER_E_IO;
3805
3806 /* When GDB is built as a 64-bit application, ptrace writes into
3807 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3808 inferior with a 64-bit GDB should look the same as debugging it
3809 with a 32-bit GDB, we need to convert it. GDB core always sees
3810 the converted layout, so any read/write will have to be done
3811 post-conversion. */
3812 siginfo_fixup (&siginfo, inf_siginfo, 0);
3813
3814 if (offset + len > sizeof (siginfo))
3815 len = sizeof (siginfo) - offset;
3816
3817 if (readbuf != NULL)
3818 memcpy (readbuf, inf_siginfo + offset, len);
3819 else
3820 {
3821 memcpy (inf_siginfo + offset, writebuf, len);
3822
3823 /* Convert back to ptrace layout before flushing it out. */
3824 siginfo_fixup (&siginfo, inf_siginfo, 1);
3825
3826 int pid = get_ptrace_pid (ptid);
3827 errno = 0;
3828 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3829 if (errno != 0)
3830 return TARGET_XFER_E_IO;
3831 }
3832
3833 *xfered_len = len;
3834 return TARGET_XFER_OK;
3835 }
3836
3837 static enum target_xfer_status
3838 linux_nat_xfer_osdata (enum target_object object,
3839 const char *annex, gdb_byte *readbuf,
3840 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3841 ULONGEST *xfered_len);
3842
3843 static enum target_xfer_status
3844 linux_proc_xfer_memory_partial (int pid, gdb_byte *readbuf,
3845 const gdb_byte *writebuf, ULONGEST offset,
3846 LONGEST len, ULONGEST *xfered_len);
3847
3848 /* Look for an LWP of PID that we know is ptrace-stopped. Returns
3849 NULL if none is found. */
3850
3851 static lwp_info *
3852 find_stopped_lwp (int pid)
3853 {
3854 for (lwp_info *lp : all_lwps ())
3855 if (lp->ptid.pid () == pid
3856 && lp->stopped
3857 && !is_lwp_marked_dead (lp))
3858 return lp;
3859 return nullptr;
3860 }
3861
3862 enum target_xfer_status
3863 linux_nat_target::xfer_partial (enum target_object object,
3864 const char *annex, gdb_byte *readbuf,
3865 const gdb_byte *writebuf,
3866 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
3867 {
3868 if (object == TARGET_OBJECT_SIGNAL_INFO)
3869 return linux_xfer_siginfo (inferior_ptid, object, annex, readbuf, writebuf,
3870 offset, len, xfered_len);
3871
3872 /* The target is connected but no live inferior is selected. Pass
3873 this request down to a lower stratum (e.g., the executable
3874 file). */
3875 if (object == TARGET_OBJECT_MEMORY && inferior_ptid == null_ptid)
3876 return TARGET_XFER_EOF;
3877
3878 if (object == TARGET_OBJECT_AUXV)
3879 return memory_xfer_auxv (this, object, annex, readbuf, writebuf,
3880 offset, len, xfered_len);
3881
3882 if (object == TARGET_OBJECT_OSDATA)
3883 return linux_nat_xfer_osdata (object, annex, readbuf, writebuf,
3884 offset, len, xfered_len);
3885
3886 if (object == TARGET_OBJECT_MEMORY)
3887 {
3888 /* GDB calculates all addresses in the largest possible address
3889 width. The address width must be masked before its final use
3890 by linux_proc_xfer_partial.
3891
3892 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
3893 int addr_bit = gdbarch_addr_bit (current_inferior ()->arch ());
3894
3895 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
3896 offset &= ((ULONGEST) 1 << addr_bit) - 1;
3897
3898 /* If /proc/pid/mem is writable, don't fallback to ptrace. If
3899 the write via /proc/pid/mem fails because the inferior execed
3900 (and we haven't seen the exec event yet), a subsequent ptrace
3901 poke would incorrectly write memory to the post-exec address
3902 space, while the core was trying to write to the pre-exec
3903 address space. */
3904 if (proc_mem_file_is_writable ())
3905 return linux_proc_xfer_memory_partial (inferior_ptid.pid (), readbuf,
3906 writebuf, offset, len,
3907 xfered_len);
3908
3909 /* Fallback to ptrace. This should only really trigger on old
3910 systems. See "Accessing inferior memory" at the top.
3911
3912 The target_xfer interface for memory access uses
3913 inferior_ptid as sideband argument to indicate which process
3914 to access. Memory access is process-wide, it is not
3915 thread-specific, so inferior_ptid sometimes points at a
3916 process ptid_t. If we fallback to inf_ptrace_target with
3917 that inferior_ptid, then the ptrace code will do the ptrace
3918 call targeting inferior_ptid.pid(), the leader LWP. That
3919 may fail with ESRCH if the leader is currently running, or
3920 zombie. So if we get a pid-ptid, we try to find a stopped
3921 LWP to use with ptrace.
3922
3923 Note that inferior_ptid may not exist in the lwp / thread /
3924 inferior lists. This can happen when we're removing
3925 breakpoints from a fork child that we're not going to stay
3926 attached to. So if we don't find a stopped LWP, still do the
3927 ptrace call, targeting the inferior_ptid we had on entry. */
3928 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
3929 lwp_info *stopped = find_stopped_lwp (inferior_ptid.pid ());
3930 if (stopped != nullptr)
3931 inferior_ptid = stopped->ptid;
3932 return inf_ptrace_target::xfer_partial (object, annex, readbuf, writebuf,
3933 offset, len, xfered_len);
3934 }
3935
3936 return inf_ptrace_target::xfer_partial (object, annex, readbuf, writebuf,
3937 offset, len, xfered_len);
3938 }
3939
3940 bool
3941 linux_nat_target::thread_alive (ptid_t ptid)
3942 {
3943 /* As long as a PTID is in lwp list, consider it alive. */
3944 return find_lwp_pid (ptid) != NULL;
3945 }
3946
3947 /* Implement the to_update_thread_list target method for this
3948 target. */
3949
3950 void
3951 linux_nat_target::update_thread_list ()
3952 {
3953 /* We add/delete threads from the list as clone/exit events are
3954 processed, so just try deleting exited threads still in the
3955 thread list. */
3956 delete_exited_threads ();
3957
3958 /* Update the processor core that each lwp/thread was last seen
3959 running on. */
3960 for (lwp_info *lwp : all_lwps ())
3961 {
3962 /* Avoid accessing /proc if the thread hasn't run since we last
3963 time we fetched the thread's core. Accessing /proc becomes
3964 noticeably expensive when we have thousands of LWPs. */
3965 if (lwp->core == -1)
3966 lwp->core = linux_common_core_of_thread (lwp->ptid);
3967 }
3968 }
3969
3970 std::string
3971 linux_nat_target::pid_to_str (ptid_t ptid)
3972 {
3973 if (ptid.lwp_p ()
3974 && (ptid.pid () != ptid.lwp ()
3975 || num_lwps (ptid.pid ()) > 1))
3976 return string_printf ("LWP %ld", ptid.lwp ());
3977
3978 return normal_pid_to_str (ptid);
3979 }
3980
3981 const char *
3982 linux_nat_target::thread_name (struct thread_info *thr)
3983 {
3984 return linux_proc_tid_get_name (thr->ptid);
3985 }
3986
3987 /* Accepts an integer PID; Returns a string representing a file that
3988 can be opened to get the symbols for the child process. */
3989
3990 const char *
3991 linux_nat_target::pid_to_exec_file (int pid)
3992 {
3993 return linux_proc_pid_to_exec_file (pid);
3994 }
3995
3996 /* Object representing an /proc/PID/mem open file. We keep one such
3997 file open per inferior.
3998
3999 It might be tempting to think about only ever opening one file at
4000 most for all inferiors, closing/reopening the file as we access
4001 memory of different inferiors, to minimize number of file
4002 descriptors open, which can otherwise run into resource limits.
4003 However, that does not work correctly -- if the inferior execs and
4004 we haven't processed the exec event yet, and, we opened a
4005 /proc/PID/mem file, we will get a mem file accessing the post-exec
4006 address space, thinking we're opening it for the pre-exec address
4007 space. That is dangerous as we can poke memory (e.g. clearing
4008 breakpoints) in the post-exec memory by mistake, corrupting the
4009 inferior. For that reason, we open the mem file as early as
4010 possible, right after spawning, forking or attaching to the
4011 inferior, when the inferior is stopped and thus before it has a
4012 chance of execing.
4013
4014 Note that after opening the file, even if the thread we opened it
4015 for subsequently exits, the open file is still usable for accessing
4016 memory. It's only when the whole process exits or execs that the
4017 file becomes invalid, at which point reads/writes return EOF. */
4018
4019 class proc_mem_file
4020 {
4021 public:
4022 proc_mem_file (ptid_t ptid, int fd)
4023 : m_ptid (ptid), m_fd (fd)
4024 {
4025 gdb_assert (m_fd != -1);
4026 }
4027
4028 ~proc_mem_file ()
4029 {
4030 linux_nat_debug_printf ("closing fd %d for /proc/%d/task/%ld/mem",
4031 m_fd, m_ptid.pid (), m_ptid.lwp ());
4032 close (m_fd);
4033 }
4034
4035 DISABLE_COPY_AND_ASSIGN (proc_mem_file);
4036
4037 int fd ()
4038 {
4039 return m_fd;
4040 }
4041
4042 private:
4043 /* The LWP this file was opened for. Just for debugging
4044 purposes. */
4045 ptid_t m_ptid;
4046
4047 /* The file descriptor. */
4048 int m_fd = -1;
4049 };
4050
4051 /* The map between an inferior process id, and the open /proc/PID/mem
4052 file. This is stored in a map instead of in a per-inferior
4053 structure because we need to be able to access memory of processes
4054 which don't have a corresponding struct inferior object. E.g.,
4055 with "detach-on-fork on" (the default), and "follow-fork parent"
4056 (also default), we don't create an inferior for the fork child, but
4057 we still need to remove breakpoints from the fork child's
4058 memory. */
4059 static std::unordered_map<int, proc_mem_file> proc_mem_file_map;
4060
4061 /* Close the /proc/PID/mem file for PID. */
4062
4063 static void
4064 close_proc_mem_file (pid_t pid)
4065 {
4066 proc_mem_file_map.erase (pid);
4067 }
4068
4069 /* Open the /proc/PID/mem file for the process (thread group) of PTID.
4070 We actually open /proc/PID/task/LWP/mem, as that's the LWP we know
4071 exists and is stopped right now. We prefer the
4072 /proc/PID/task/LWP/mem form over /proc/LWP/mem to avoid tid-reuse
4073 races, just in case this is ever called on an already-waited
4074 LWP. */
4075
4076 static void
4077 open_proc_mem_file (ptid_t ptid)
4078 {
4079 auto iter = proc_mem_file_map.find (ptid.pid ());
4080 gdb_assert (iter == proc_mem_file_map.end ());
4081
4082 char filename[64];
4083 xsnprintf (filename, sizeof filename,
4084 "/proc/%d/task/%ld/mem", ptid.pid (), ptid.lwp ());
4085
4086 int fd = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
4087
4088 if (fd == -1)
4089 {
4090 warning (_("opening /proc/PID/mem file for lwp %d.%ld failed: %s (%d)"),
4091 ptid.pid (), ptid.lwp (),
4092 safe_strerror (errno), errno);
4093 return;
4094 }
4095
4096 proc_mem_file_map.emplace (std::piecewise_construct,
4097 std::forward_as_tuple (ptid.pid ()),
4098 std::forward_as_tuple (ptid, fd));
4099
4100 linux_nat_debug_printf ("opened fd %d for lwp %d.%ld",
4101 fd, ptid.pid (), ptid.lwp ());
4102 }
4103
4104 /* Helper for linux_proc_xfer_memory_partial and
4105 proc_mem_file_is_writable. FD is the already opened /proc/pid/mem
4106 file, and PID is the pid of the corresponding process. The rest of
4107 the arguments are like linux_proc_xfer_memory_partial's. */
4108
4109 static enum target_xfer_status
4110 linux_proc_xfer_memory_partial_fd (int fd, int pid,
4111 gdb_byte *readbuf, const gdb_byte *writebuf,
4112 ULONGEST offset, LONGEST len,
4113 ULONGEST *xfered_len)
4114 {
4115 ssize_t ret;
4116
4117 gdb_assert (fd != -1);
4118
4119 /* Use pread64/pwrite64 if available, since they save a syscall and
4120 can handle 64-bit offsets even on 32-bit platforms (for instance,
4121 SPARC debugging a SPARC64 application). But only use them if the
4122 offset isn't so high that when cast to off_t it'd be negative, as
4123 seen on SPARC64. pread64/pwrite64 outright reject such offsets.
4124 lseek does not. */
4125 #ifdef HAVE_PREAD64
4126 if ((off_t) offset >= 0)
4127 ret = (readbuf != nullptr
4128 ? pread64 (fd, readbuf, len, offset)
4129 : pwrite64 (fd, writebuf, len, offset));
4130 else
4131 #endif
4132 {
4133 ret = lseek (fd, offset, SEEK_SET);
4134 if (ret != -1)
4135 ret = (readbuf != nullptr
4136 ? read (fd, readbuf, len)
4137 : write (fd, writebuf, len));
4138 }
4139
4140 if (ret == -1)
4141 {
4142 linux_nat_debug_printf ("accessing fd %d for pid %d failed: %s (%d)",
4143 fd, pid, safe_strerror (errno), errno);
4144 return TARGET_XFER_E_IO;
4145 }
4146 else if (ret == 0)
4147 {
4148 /* EOF means the address space is gone, the whole process exited
4149 or execed. */
4150 linux_nat_debug_printf ("accessing fd %d for pid %d got EOF",
4151 fd, pid);
4152 return TARGET_XFER_EOF;
4153 }
4154 else
4155 {
4156 *xfered_len = ret;
4157 return TARGET_XFER_OK;
4158 }
4159 }
4160
4161 /* Implement the to_xfer_partial target method using /proc/PID/mem.
4162 Because we can use a single read/write call, this can be much more
4163 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
4164 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running
4165 threads. */
4166
4167 static enum target_xfer_status
4168 linux_proc_xfer_memory_partial (int pid, gdb_byte *readbuf,
4169 const gdb_byte *writebuf, ULONGEST offset,
4170 LONGEST len, ULONGEST *xfered_len)
4171 {
4172 auto iter = proc_mem_file_map.find (pid);
4173 if (iter == proc_mem_file_map.end ())
4174 return TARGET_XFER_EOF;
4175
4176 int fd = iter->second.fd ();
4177
4178 return linux_proc_xfer_memory_partial_fd (fd, pid, readbuf, writebuf, offset,
4179 len, xfered_len);
4180 }
4181
4182 /* Check whether /proc/pid/mem is writable in the current kernel, and
4183 return true if so. It wasn't writable before Linux 2.6.39, but
4184 there's no way to know whether the feature was backported to older
4185 kernels. So we check to see if it works. The result is cached,
4186 and this is guaranteed to be called once early during inferior
4187 startup, so that any warning is printed out consistently between
4188 GDB invocations. Note we don't call it during GDB startup instead
4189 though, because then we might warn with e.g. just "gdb --version"
4190 on sandboxed systems. See PR gdb/29907. */
4191
4192 static bool
4193 proc_mem_file_is_writable ()
4194 {
4195 static std::optional<bool> writable;
4196
4197 if (writable.has_value ())
4198 return *writable;
4199
4200 writable.emplace (false);
4201
4202 /* We check whether /proc/pid/mem is writable by trying to write to
4203 one of our variables via /proc/self/mem. */
4204
4205 int fd = gdb_open_cloexec ("/proc/self/mem", O_RDWR | O_LARGEFILE, 0).release ();
4206
4207 if (fd == -1)
4208 {
4209 warning (_("opening /proc/self/mem file failed: %s (%d)"),
4210 safe_strerror (errno), errno);
4211 return *writable;
4212 }
4213
4214 SCOPE_EXIT { close (fd); };
4215
4216 /* This is the variable we try to write to. Note OFFSET below. */
4217 volatile gdb_byte test_var = 0;
4218
4219 gdb_byte writebuf[] = {0x55};
4220 ULONGEST offset = (uintptr_t) &test_var;
4221 ULONGEST xfered_len;
4222
4223 enum target_xfer_status res
4224 = linux_proc_xfer_memory_partial_fd (fd, getpid (), nullptr, writebuf,
4225 offset, 1, &xfered_len);
4226
4227 if (res == TARGET_XFER_OK)
4228 {
4229 gdb_assert (xfered_len == 1);
4230 gdb_assert (test_var == 0x55);
4231 /* Success. */
4232 *writable = true;
4233 }
4234
4235 return *writable;
4236 }
4237
4238 /* Parse LINE as a signal set and add its set bits to SIGS. */
4239
4240 static void
4241 add_line_to_sigset (const char *line, sigset_t *sigs)
4242 {
4243 int len = strlen (line) - 1;
4244 const char *p;
4245 int signum;
4246
4247 if (line[len] != '\n')
4248 error (_("Could not parse signal set: %s"), line);
4249
4250 p = line;
4251 signum = len * 4;
4252 while (len-- > 0)
4253 {
4254 int digit;
4255
4256 if (*p >= '0' && *p <= '9')
4257 digit = *p - '0';
4258 else if (*p >= 'a' && *p <= 'f')
4259 digit = *p - 'a' + 10;
4260 else
4261 error (_("Could not parse signal set: %s"), line);
4262
4263 signum -= 4;
4264
4265 if (digit & 1)
4266 sigaddset (sigs, signum + 1);
4267 if (digit & 2)
4268 sigaddset (sigs, signum + 2);
4269 if (digit & 4)
4270 sigaddset (sigs, signum + 3);
4271 if (digit & 8)
4272 sigaddset (sigs, signum + 4);
4273
4274 p++;
4275 }
4276 }
4277
4278 /* Find process PID's pending signals from /proc/pid/status and set
4279 SIGS to match. */
4280
4281 void
4282 linux_proc_pending_signals (int pid, sigset_t *pending,
4283 sigset_t *blocked, sigset_t *ignored)
4284 {
4285 char buffer[PATH_MAX], fname[PATH_MAX];
4286
4287 sigemptyset (pending);
4288 sigemptyset (blocked);
4289 sigemptyset (ignored);
4290 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
4291 gdb_file_up procfile = gdb_fopen_cloexec (fname, "r");
4292 if (procfile == NULL)
4293 error (_("Could not open %s"), fname);
4294
4295 while (fgets (buffer, PATH_MAX, procfile.get ()) != NULL)
4296 {
4297 /* Normal queued signals are on the SigPnd line in the status
4298 file. However, 2.6 kernels also have a "shared" pending
4299 queue for delivering signals to a thread group, so check for
4300 a ShdPnd line also.
4301
4302 Unfortunately some Red Hat kernels include the shared pending
4303 queue but not the ShdPnd status field. */
4304
4305 if (startswith (buffer, "SigPnd:\t"))
4306 add_line_to_sigset (buffer + 8, pending);
4307 else if (startswith (buffer, "ShdPnd:\t"))
4308 add_line_to_sigset (buffer + 8, pending);
4309 else if (startswith (buffer, "SigBlk:\t"))
4310 add_line_to_sigset (buffer + 8, blocked);
4311 else if (startswith (buffer, "SigIgn:\t"))
4312 add_line_to_sigset (buffer + 8, ignored);
4313 }
4314 }
4315
4316 static enum target_xfer_status
4317 linux_nat_xfer_osdata (enum target_object object,
4318 const char *annex, gdb_byte *readbuf,
4319 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4320 ULONGEST *xfered_len)
4321 {
4322 gdb_assert (object == TARGET_OBJECT_OSDATA);
4323
4324 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4325 if (*xfered_len == 0)
4326 return TARGET_XFER_EOF;
4327 else
4328 return TARGET_XFER_OK;
4329 }
4330
4331 std::vector<static_tracepoint_marker>
4332 linux_nat_target::static_tracepoint_markers_by_strid (const char *strid)
4333 {
4334 char s[IPA_CMD_BUF_SIZE];
4335 int pid = inferior_ptid.pid ();
4336 std::vector<static_tracepoint_marker> markers;
4337 const char *p = s;
4338 ptid_t ptid = ptid_t (pid, 0);
4339 static_tracepoint_marker marker;
4340
4341 /* Pause all */
4342 target_stop (ptid);
4343
4344 strcpy (s, "qTfSTM");
4345 agent_run_command (pid, s, strlen (s) + 1);
4346
4347 /* Unpause all. */
4348 SCOPE_EXIT { target_continue_no_signal (ptid); };
4349
4350 while (*p++ == 'm')
4351 {
4352 do
4353 {
4354 parse_static_tracepoint_marker_definition (p, &p, &marker);
4355
4356 if (strid == NULL || marker.str_id == strid)
4357 markers.push_back (std::move (marker));
4358 }
4359 while (*p++ == ','); /* comma-separated list */
4360
4361 strcpy (s, "qTsSTM");
4362 agent_run_command (pid, s, strlen (s) + 1);
4363 p = s;
4364 }
4365
4366 return markers;
4367 }
4368
4369 /* target_can_async_p implementation. */
4370
4371 bool
4372 linux_nat_target::can_async_p ()
4373 {
4374 /* This flag should be checked in the common target.c code. */
4375 gdb_assert (target_async_permitted);
4376
4377 /* Otherwise, this targets is always able to support async mode. */
4378 return true;
4379 }
4380
4381 bool
4382 linux_nat_target::supports_non_stop ()
4383 {
4384 return true;
4385 }
4386
4387 /* to_always_non_stop_p implementation. */
4388
4389 bool
4390 linux_nat_target::always_non_stop_p ()
4391 {
4392 return true;
4393 }
4394
4395 bool
4396 linux_nat_target::supports_multi_process ()
4397 {
4398 return true;
4399 }
4400
4401 bool
4402 linux_nat_target::supports_disable_randomization ()
4403 {
4404 return true;
4405 }
4406
4407 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4408 so we notice when any child changes state, and notify the
4409 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4410 above to wait for the arrival of a SIGCHLD. */
4411
4412 static void
4413 sigchld_handler (int signo)
4414 {
4415 int old_errno = errno;
4416
4417 if (debug_linux_nat)
4418 gdb_stdlog->write_async_safe ("sigchld\n", sizeof ("sigchld\n") - 1);
4419
4420 if (signo == SIGCHLD)
4421 {
4422 /* Let the event loop know that there are events to handle. */
4423 linux_nat_target::async_file_mark_if_open ();
4424 }
4425
4426 errno = old_errno;
4427 }
4428
4429 /* Callback registered with the target events file descriptor. */
4430
4431 static void
4432 handle_target_event (int error, gdb_client_data client_data)
4433 {
4434 inferior_event_handler (INF_REG_EVENT);
4435 }
4436
4437 /* target_async implementation. */
4438
4439 void
4440 linux_nat_target::async (bool enable)
4441 {
4442 if (enable == is_async_p ())
4443 return;
4444
4445 /* Block child signals while we create/destroy the pipe, as their
4446 handler writes to it. */
4447 gdb::block_signals blocker;
4448
4449 if (enable)
4450 {
4451 if (!async_file_open ())
4452 internal_error ("creating event pipe failed.");
4453
4454 add_file_handler (async_wait_fd (), handle_target_event, NULL,
4455 "linux-nat");
4456
4457 /* There may be pending events to handle. Tell the event loop
4458 to poll them. */
4459 async_file_mark ();
4460 }
4461 else
4462 {
4463 delete_file_handler (async_wait_fd ());
4464 async_file_close ();
4465 }
4466 }
4467
4468 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4469 event came out. */
4470
4471 static int
4472 linux_nat_stop_lwp (struct lwp_info *lwp)
4473 {
4474 if (!lwp->stopped)
4475 {
4476 linux_nat_debug_printf ("running -> suspending %s",
4477 lwp->ptid.to_string ().c_str ());
4478
4479
4480 if (lwp->last_resume_kind == resume_stop)
4481 {
4482 linux_nat_debug_printf ("already stopping LWP %ld at GDB's request",
4483 lwp->ptid.lwp ());
4484 return 0;
4485 }
4486
4487 stop_callback (lwp);
4488 lwp->last_resume_kind = resume_stop;
4489 }
4490 else
4491 {
4492 /* Already known to be stopped; do nothing. */
4493
4494 if (debug_linux_nat)
4495 {
4496 if (linux_target->find_thread (lwp->ptid)->stop_requested)
4497 linux_nat_debug_printf ("already stopped/stop_requested %s",
4498 lwp->ptid.to_string ().c_str ());
4499 else
4500 linux_nat_debug_printf ("already stopped/no stop_requested yet %s",
4501 lwp->ptid.to_string ().c_str ());
4502 }
4503 }
4504 return 0;
4505 }
4506
4507 void
4508 linux_nat_target::stop (ptid_t ptid)
4509 {
4510 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
4511 iterate_over_lwps (ptid, linux_nat_stop_lwp);
4512 }
4513
4514 /* Return the cached value of the processor core for thread PTID. */
4515
4516 int
4517 linux_nat_target::core_of_thread (ptid_t ptid)
4518 {
4519 struct lwp_info *info = find_lwp_pid (ptid);
4520
4521 if (info)
4522 return info->core;
4523 return -1;
4524 }
4525
4526 /* Implementation of to_filesystem_is_local. */
4527
4528 bool
4529 linux_nat_target::filesystem_is_local ()
4530 {
4531 struct inferior *inf = current_inferior ();
4532
4533 if (inf->fake_pid_p || inf->pid == 0)
4534 return true;
4535
4536 return linux_ns_same (inf->pid, LINUX_NS_MNT);
4537 }
4538
4539 /* Convert the INF argument passed to a to_fileio_* method
4540 to a process ID suitable for passing to its corresponding
4541 linux_mntns_* function. If INF is non-NULL then the
4542 caller is requesting the filesystem seen by INF. If INF
4543 is NULL then the caller is requesting the filesystem seen
4544 by the GDB. We fall back to GDB's filesystem in the case
4545 that INF is non-NULL but its PID is unknown. */
4546
4547 static pid_t
4548 linux_nat_fileio_pid_of (struct inferior *inf)
4549 {
4550 if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4551 return getpid ();
4552 else
4553 return inf->pid;
4554 }
4555
4556 /* Implementation of to_fileio_open. */
4557
4558 int
4559 linux_nat_target::fileio_open (struct inferior *inf, const char *filename,
4560 int flags, int mode, int warn_if_slow,
4561 fileio_error *target_errno)
4562 {
4563 int nat_flags;
4564 mode_t nat_mode;
4565 int fd;
4566
4567 if (fileio_to_host_openflags (flags, &nat_flags) == -1
4568 || fileio_to_host_mode (mode, &nat_mode) == -1)
4569 {
4570 *target_errno = FILEIO_EINVAL;
4571 return -1;
4572 }
4573
4574 fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
4575 filename, nat_flags, nat_mode);
4576 if (fd == -1)
4577 *target_errno = host_to_fileio_error (errno);
4578
4579 return fd;
4580 }
4581
4582 /* Implementation of to_fileio_readlink. */
4583
4584 std::optional<std::string>
4585 linux_nat_target::fileio_readlink (struct inferior *inf, const char *filename,
4586 fileio_error *target_errno)
4587 {
4588 char buf[PATH_MAX];
4589 int len;
4590
4591 len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
4592 filename, buf, sizeof (buf));
4593 if (len < 0)
4594 {
4595 *target_errno = host_to_fileio_error (errno);
4596 return {};
4597 }
4598
4599 return std::string (buf, len);
4600 }
4601
4602 /* Implementation of to_fileio_unlink. */
4603
4604 int
4605 linux_nat_target::fileio_unlink (struct inferior *inf, const char *filename,
4606 fileio_error *target_errno)
4607 {
4608 int ret;
4609
4610 ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
4611 filename);
4612 if (ret == -1)
4613 *target_errno = host_to_fileio_error (errno);
4614
4615 return ret;
4616 }
4617
4618 /* Implementation of the to_thread_events method. */
4619
4620 void
4621 linux_nat_target::thread_events (int enable)
4622 {
4623 report_thread_events = enable;
4624 }
4625
4626 bool
4627 linux_nat_target::supports_set_thread_options (gdb_thread_options options)
4628 {
4629 constexpr gdb_thread_options supported_options
4630 = GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
4631 return ((options & supported_options) == options);
4632 }
4633
4634 linux_nat_target::linux_nat_target ()
4635 {
4636 /* We don't change the stratum; this target will sit at
4637 process_stratum and thread_db will set at thread_stratum. This
4638 is a little strange, since this is a multi-threaded-capable
4639 target, but we want to be on the stack below thread_db, and we
4640 also want to be used for single-threaded processes. */
4641 }
4642
4643 /* See linux-nat.h. */
4644
4645 bool
4646 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
4647 {
4648 int pid = get_ptrace_pid (ptid);
4649 return ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo) == 0;
4650 }
4651
4652 /* See nat/linux-nat.h. */
4653
4654 ptid_t
4655 current_lwp_ptid (void)
4656 {
4657 gdb_assert (inferior_ptid.lwp_p ());
4658 return inferior_ptid;
4659 }
4660
4661 /* Implement 'maintenance info linux-lwps'. Displays some basic
4662 information about all the current lwp_info objects. */
4663
4664 static void
4665 maintenance_info_lwps (const char *arg, int from_tty)
4666 {
4667 if (all_lwps ().size () == 0)
4668 {
4669 gdb_printf ("No Linux LWPs\n");
4670 return;
4671 }
4672
4673 /* Start the width at 8 to match the column heading below, then
4674 figure out the widest ptid string. We'll use this to build our
4675 output table below. */
4676 size_t ptid_width = 8;
4677 for (lwp_info *lp : all_lwps ())
4678 ptid_width = std::max (ptid_width, lp->ptid.to_string ().size ());
4679
4680 /* Setup the table headers. */
4681 struct ui_out *uiout = current_uiout;
4682 ui_out_emit_table table_emitter (uiout, 2, -1, "linux-lwps");
4683 uiout->table_header (ptid_width, ui_left, "lwp-ptid", _("LWP Ptid"));
4684 uiout->table_header (9, ui_left, "thread-info", _("Thread ID"));
4685 uiout->table_body ();
4686
4687 /* Display one table row for each lwp_info. */
4688 for (lwp_info *lp : all_lwps ())
4689 {
4690 ui_out_emit_tuple tuple_emitter (uiout, "lwp-entry");
4691
4692 thread_info *th = linux_target->find_thread (lp->ptid);
4693
4694 uiout->field_string ("lwp-ptid", lp->ptid.to_string ().c_str ());
4695 if (th == nullptr)
4696 uiout->field_string ("thread-info", "None");
4697 else
4698 uiout->field_string ("thread-info", print_full_thread_id (th));
4699
4700 uiout->message ("\n");
4701 }
4702 }
4703
4704 void _initialize_linux_nat ();
4705 void
4706 _initialize_linux_nat ()
4707 {
4708 add_setshow_boolean_cmd ("linux-nat", class_maintenance,
4709 &debug_linux_nat, _("\
4710 Set debugging of GNU/Linux native target."), _("\
4711 Show debugging of GNU/Linux native target."), _("\
4712 When on, print debug messages relating to the GNU/Linux native target."),
4713 nullptr,
4714 show_debug_linux_nat,
4715 &setdebuglist, &showdebuglist);
4716
4717 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
4718 &debug_linux_namespaces, _("\
4719 Set debugging of GNU/Linux namespaces module."), _("\
4720 Show debugging of GNU/Linux namespaces module."), _("\
4721 Enables printf debugging output."),
4722 NULL,
4723 NULL,
4724 &setdebuglist, &showdebuglist);
4725
4726 /* Install a SIGCHLD handler. */
4727 sigchld_action.sa_handler = sigchld_handler;
4728 sigemptyset (&sigchld_action.sa_mask);
4729 sigchld_action.sa_flags = SA_RESTART;
4730
4731 /* Make it the default. */
4732 sigaction (SIGCHLD, &sigchld_action, NULL);
4733
4734 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4735 gdb_sigmask (SIG_SETMASK, NULL, &suspend_mask);
4736 sigdelset (&suspend_mask, SIGCHLD);
4737
4738 sigemptyset (&blocked_mask);
4739
4740 lwp_lwpid_htab_create ();
4741
4742 add_cmd ("linux-lwps", class_maintenance, maintenance_info_lwps,
4743 _("List the Linux LWPS."), &maintenanceinfolist);
4744 }
4745 \f
4746
4747 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4748 the GNU/Linux Threads library and therefore doesn't really belong
4749 here. */
4750
4751 /* NPTL reserves the first two RT signals, but does not provide any
4752 way for the debugger to query the signal numbers - fortunately
4753 they don't change. */
4754 static int lin_thread_signals[] = { __SIGRTMIN, __SIGRTMIN + 1 };
4755
4756 /* See linux-nat.h. */
4757
4758 unsigned int
4759 lin_thread_get_thread_signal_num (void)
4760 {
4761 return sizeof (lin_thread_signals) / sizeof (lin_thread_signals[0]);
4762 }
4763
4764 /* See linux-nat.h. */
4765
4766 int
4767 lin_thread_get_thread_signal (unsigned int i)
4768 {
4769 gdb_assert (i < lin_thread_get_thread_signal_num ());
4770 return lin_thread_signals[i];
4771 }