1 /* GNU/Linux native-dependent code common to multiple platforms.
3 Copyright (C) 2001-2023 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdbsupport/gdb_wait.h"
28 #include <sys/syscall.h>
29 #include "nat/gdb_ptrace.h"
30 #include "linux-nat.h"
31 #include "nat/linux-ptrace.h"
32 #include "nat/linux-procfs.h"
33 #include "nat/linux-personality.h"
34 #include "linux-fork.h"
35 #include "gdbthread.h"
39 #include "inf-child.h"
40 #include "inf-ptrace.h"
42 #include <sys/procfs.h>
50 #include "gdbsupport/event-loop.h"
51 #include "event-top.h"
53 #include <sys/types.h>
55 #include "xml-support.h"
58 #include "nat/linux-osdata.h"
59 #include "linux-tdep.h"
61 #include "gdbsupport/agent.h"
62 #include "tracepoint.h"
63 #include "target-descriptions.h"
64 #include "gdbsupport/filestuff.h"
66 #include "nat/linux-namespaces.h"
67 #include "gdbsupport/block-signals.h"
68 #include "gdbsupport/fileio.h"
69 #include "gdbsupport/scope-exit.h"
70 #include "gdbsupport/gdb-sigmask.h"
71 #include "gdbsupport/common-debug.h"
72 #include <unordered_map>
74 /* This comment documents high-level logic of this file.
76 Waiting for events in sync mode
77 ===============================
79 When waiting for an event in a specific thread, we just use waitpid,
80 passing the specific pid, and not passing WNOHANG.
82 When waiting for an event in all threads, waitpid is not quite good:
84 - If the thread group leader exits while other threads in the thread
85 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
86 return an exit status until the other threads in the group are
89 - When a non-leader thread execs, that thread just vanishes without
90 reporting an exit (so we'd hang if we waited for it explicitly in
91 that case). The exec event is instead reported to the TGID pid.
93 The solution is to always use -1 and WNOHANG, together with
96 First, we use non-blocking waitpid to check for events. If nothing is
97 found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
98 it means something happened to a child process. As soon as we know
99 there's an event, we get back to calling nonblocking waitpid.
101 Note that SIGCHLD should be blocked between waitpid and sigsuspend
102 calls, so that we don't miss a signal. If SIGCHLD arrives in between,
103 when it's blocked, the signal becomes pending and sigsuspend
104 immediately notices it and returns.
106 Waiting for events in async mode (TARGET_WNOHANG)
107 =================================================
109 In async mode, GDB should always be ready to handle both user input
110 and target events, so neither blocking waitpid nor sigsuspend are
111 viable options. Instead, we should asynchronously notify the GDB main
112 event loop whenever there's an unprocessed event from the target. We
113 detect asynchronous target events by handling SIGCHLD signals. To
114 notify the event loop about target events, an event pipe is used
115 --- the pipe is registered as waitable event source in the event loop,
116 the event loop select/poll's on the read end of this pipe (as well on
117 other event sources, e.g., stdin), and the SIGCHLD handler marks the
118 event pipe to raise an event. This is more portable than relying on
119 pselect/ppoll, since on kernels that lack those syscalls, libc
120 emulates them with select/poll+sigprocmask, and that is racy
121 (a.k.a. plain broken).
123 Obviously, if we fail to notify the event loop if there's a target
124 event, it's bad. OTOH, if we notify the event loop when there's no
125 event from the target, linux_nat_wait will detect that there's no real
126 event to report, and return event of type TARGET_WAITKIND_IGNORE.
127 This is mostly harmless, but it will waste time and is better avoided.
129 The main design point is that every time GDB is outside linux-nat.c,
130 we have a SIGCHLD handler installed that is called when something
131 happens to the target and notifies the GDB event loop. Whenever GDB
132 core decides to handle the event, and calls into linux-nat.c, we
133 process things as in sync mode, except that the we never block in
136 While processing an event, we may end up momentarily blocked in
137 waitpid calls. Those waitpid calls, while blocking, are guarantied to
138 return quickly. E.g., in all-stop mode, before reporting to the core
139 that an LWP hit a breakpoint, all LWPs are stopped by sending them
140 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141 Note that this is different from blocking indefinitely waiting for the
142 next event --- here, we're already handling an event.
147 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
148 signal is not entirely significant; we just need for a signal to be delivered,
149 so that we can intercept it. SIGSTOP's advantage is that it can not be
150 blocked. A disadvantage is that it is not a real-time signal, so it can only
151 be queued once; we do not keep track of other sources of SIGSTOP.
153 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
154 use them, because they have special behavior when the signal is generated -
155 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
156 kills the entire thread group.
158 A delivered SIGSTOP would stop the entire thread group, not just the thread we
159 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
160 cancel it (by PTRACE_CONT without passing SIGSTOP).
162 We could use a real-time signal instead. This would solve those problems; we
163 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165 generates it, and there are races with trying to find a signal that is not
171 The case of a thread group (process) with 3 or more threads, and a
172 thread other than the leader execs is worth detailing:
174 On an exec, the Linux kernel destroys all threads except the execing
175 one in the thread group, and resets the execing thread's tid to the
176 tgid. No exit notification is sent for the execing thread -- from the
177 ptracer's perspective, it appears as though the execing thread just
178 vanishes. Until we reap all other threads except the leader and the
179 execing thread, the leader will be zombie, and the execing thread will
180 be in `D (disc sleep)' state. As soon as all other threads are
181 reaped, the execing thread changes its tid to the tgid, and the
182 previous (zombie) leader vanishes, giving place to the "new"
186 #define O_LARGEFILE 0
189 struct linux_nat_target
*linux_target
;
191 /* Does the current host support PTRACE_GETREGSET? */
192 enum tribool have_ptrace_getregset
= TRIBOOL_UNKNOWN
;
194 /* When true, print debug messages relating to the linux native target. */
196 static bool debug_linux_nat
;
198 /* Implement 'show debug linux-nat'. */
201 show_debug_linux_nat (struct ui_file
*file
, int from_tty
,
202 struct cmd_list_element
*c
, const char *value
)
204 gdb_printf (file
, _("Debugging of GNU/Linux native targets is %s.\n"),
208 /* Print a linux-nat debug statement. */
210 #define linux_nat_debug_printf(fmt, ...) \
211 debug_prefixed_printf_cond (debug_linux_nat, "linux-nat", fmt, ##__VA_ARGS__)
213 /* Print "linux-nat" enter/exit debug statements. */
215 #define LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT \
216 scoped_debug_enter_exit (debug_linux_nat, "linux-nat")
218 struct simple_pid_list
222 struct simple_pid_list
*next
;
224 static struct simple_pid_list
*stopped_pids
;
226 /* Whether target_thread_events is in effect. */
227 static int report_thread_events
;
229 static int kill_lwp (int lwpid
, int signo
);
231 static int stop_callback (struct lwp_info
*lp
);
233 static void block_child_signals (sigset_t
*prev_mask
);
234 static void restore_child_signals_mask (sigset_t
*prev_mask
);
237 static struct lwp_info
*add_lwp (ptid_t ptid
);
238 static void purge_lwp_list (int pid
);
239 static void delete_lwp (ptid_t ptid
);
240 static struct lwp_info
*find_lwp_pid (ptid_t ptid
);
242 static int lwp_status_pending_p (struct lwp_info
*lp
);
244 static void save_stop_reason (struct lwp_info
*lp
);
246 static bool proc_mem_file_is_writable ();
247 static void close_proc_mem_file (pid_t pid
);
248 static void open_proc_mem_file (ptid_t ptid
);
250 /* Return TRUE if LWP is the leader thread of the process. */
253 is_leader (lwp_info
*lp
)
255 return lp
->ptid
.pid () == lp
->ptid
.lwp ();
258 /* Convert an LWP's pending status to a std::string. */
261 pending_status_str (lwp_info
*lp
)
263 gdb_assert (lwp_status_pending_p (lp
));
265 if (lp
->waitstatus
.kind () != TARGET_WAITKIND_IGNORE
)
266 return lp
->waitstatus
.to_string ();
268 return status_to_str (lp
->status
);
271 /* Return true if we should report exit events for LP. */
274 report_exit_events_for (lwp_info
*lp
)
276 thread_info
*thr
= linux_target
->find_thread (lp
->ptid
);
277 gdb_assert (thr
!= nullptr);
279 return (report_thread_events
280 || (thr
->thread_options () & GDB_THREAD_OPTION_EXIT
) != 0);
286 /* See nat/linux-nat.h. */
289 ptid_of_lwp (struct lwp_info
*lwp
)
294 /* See nat/linux-nat.h. */
297 lwp_set_arch_private_info (struct lwp_info
*lwp
,
298 struct arch_lwp_info
*info
)
300 lwp
->arch_private
= info
;
303 /* See nat/linux-nat.h. */
305 struct arch_lwp_info
*
306 lwp_arch_private_info (struct lwp_info
*lwp
)
308 return lwp
->arch_private
;
311 /* See nat/linux-nat.h. */
314 lwp_is_stopped (struct lwp_info
*lwp
)
319 /* See nat/linux-nat.h. */
321 enum target_stop_reason
322 lwp_stop_reason (struct lwp_info
*lwp
)
324 return lwp
->stop_reason
;
327 /* See nat/linux-nat.h. */
330 lwp_is_stepping (struct lwp_info
*lwp
)
336 /* Trivial list manipulation functions to keep track of a list of
337 new stopped processes. */
339 add_to_pid_list (struct simple_pid_list
**listp
, int pid
, int status
)
341 struct simple_pid_list
*new_pid
= XNEW (struct simple_pid_list
);
344 new_pid
->status
= status
;
345 new_pid
->next
= *listp
;
350 pull_pid_from_list (struct simple_pid_list
**listp
, int pid
, int *statusp
)
352 struct simple_pid_list
**p
;
354 for (p
= listp
; *p
!= NULL
; p
= &(*p
)->next
)
355 if ((*p
)->pid
== pid
)
357 struct simple_pid_list
*next
= (*p
)->next
;
359 *statusp
= (*p
)->status
;
367 /* Return the ptrace options that we want to try to enable. */
370 linux_nat_ptrace_options (int attached
)
375 options
|= PTRACE_O_EXITKILL
;
377 options
|= (PTRACE_O_TRACESYSGOOD
378 | PTRACE_O_TRACEVFORKDONE
379 | PTRACE_O_TRACEVFORK
381 | PTRACE_O_TRACEEXEC
);
386 /* Initialize ptrace and procfs warnings and check for supported
387 ptrace features given PID.
389 ATTACHED should be nonzero iff we attached to the inferior. */
392 linux_init_ptrace_procfs (pid_t pid
, int attached
)
394 int options
= linux_nat_ptrace_options (attached
);
396 linux_enable_event_reporting (pid
, options
);
397 linux_ptrace_init_warnings ();
398 linux_proc_init_warnings ();
399 proc_mem_file_is_writable ();
402 linux_nat_target::~linux_nat_target ()
406 linux_nat_target::post_attach (int pid
)
408 linux_init_ptrace_procfs (pid
, 1);
411 /* Implement the virtual inf_ptrace_target::post_startup_inferior method. */
414 linux_nat_target::post_startup_inferior (ptid_t ptid
)
416 linux_init_ptrace_procfs (ptid
.pid (), 0);
419 /* Return the number of known LWPs in the tgid given by PID. */
426 for (const lwp_info
*lp ATTRIBUTE_UNUSED
: all_lwps ())
427 if (lp
->ptid
.pid () == pid
)
433 /* Deleter for lwp_info unique_ptr specialisation. */
437 void operator() (struct lwp_info
*lwp
) const
439 delete_lwp (lwp
->ptid
);
443 /* A unique_ptr specialisation for lwp_info. */
445 typedef std::unique_ptr
<struct lwp_info
, lwp_deleter
> lwp_info_up
;
447 /* Target hook for follow_fork. */
450 linux_nat_target::follow_fork (inferior
*child_inf
, ptid_t child_ptid
,
451 target_waitkind fork_kind
, bool follow_child
,
454 inf_ptrace_target::follow_fork (child_inf
, child_ptid
, fork_kind
,
455 follow_child
, detach_fork
);
459 bool has_vforked
= fork_kind
== TARGET_WAITKIND_VFORKED
;
460 ptid_t parent_ptid
= inferior_ptid
;
461 int parent_pid
= parent_ptid
.lwp ();
462 int child_pid
= child_ptid
.lwp ();
464 /* We're already attached to the parent, by default. */
465 lwp_info
*child_lp
= add_lwp (child_ptid
);
466 child_lp
->stopped
= 1;
467 child_lp
->last_resume_kind
= resume_stop
;
469 /* Detach new forked process? */
472 int child_stop_signal
= 0;
473 bool detach_child
= true;
475 /* Move CHILD_LP into a unique_ptr and clear the source pointer
476 to prevent us doing anything stupid with it. */
477 lwp_info_up
child_lp_ptr (child_lp
);
480 linux_target
->low_prepare_to_resume (child_lp_ptr
.get ());
482 /* When debugging an inferior in an architecture that supports
483 hardware single stepping on a kernel without commit
484 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
485 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
486 set if the parent process had them set.
487 To work around this, single step the child process
488 once before detaching to clear the flags. */
490 /* Note that we consult the parent's architecture instead of
491 the child's because there's no inferior for the child at
493 if (!gdbarch_software_single_step_p (target_thread_architecture
498 linux_disable_event_reporting (child_pid
);
499 if (ptrace (PTRACE_SINGLESTEP
, child_pid
, 0, 0) < 0)
500 perror_with_name (_("Couldn't do single step"));
501 if (my_waitpid (child_pid
, &status
, 0) < 0)
502 perror_with_name (_("Couldn't wait vfork process"));
505 detach_child
= WIFSTOPPED (status
);
506 child_stop_signal
= WSTOPSIG (status
);
512 int signo
= child_stop_signal
;
515 && !signal_pass_state (gdb_signal_from_host (signo
)))
517 ptrace (PTRACE_DETACH
, child_pid
, 0, signo
);
519 close_proc_mem_file (child_pid
);
525 lwp_info
*parent_lp
= find_lwp_pid (parent_ptid
);
526 linux_nat_debug_printf ("waiting for VFORK_DONE on %d", parent_pid
);
527 parent_lp
->stopped
= 1;
529 /* We'll handle the VFORK_DONE event like any other
530 event, in target_wait. */
535 struct lwp_info
*child_lp
;
537 child_lp
= add_lwp (child_ptid
);
538 child_lp
->stopped
= 1;
539 child_lp
->last_resume_kind
= resume_stop
;
545 linux_nat_target::insert_fork_catchpoint (int pid
)
551 linux_nat_target::remove_fork_catchpoint (int pid
)
557 linux_nat_target::insert_vfork_catchpoint (int pid
)
563 linux_nat_target::remove_vfork_catchpoint (int pid
)
569 linux_nat_target::insert_exec_catchpoint (int pid
)
575 linux_nat_target::remove_exec_catchpoint (int pid
)
581 linux_nat_target::set_syscall_catchpoint (int pid
, bool needed
, int any_count
,
582 gdb::array_view
<const int> syscall_counts
)
584 /* On GNU/Linux, we ignore the arguments. It means that we only
585 enable the syscall catchpoints, but do not disable them.
587 Also, we do not use the `syscall_counts' information because we do not
588 filter system calls here. We let GDB do the logic for us. */
592 /* List of known LWPs, keyed by LWP PID. This speeds up the common
593 case of mapping a PID returned from the kernel to our corresponding
594 lwp_info data structure. */
595 static htab_t lwp_lwpid_htab
;
597 /* Calculate a hash from a lwp_info's LWP PID. */
600 lwp_info_hash (const void *ap
)
602 const struct lwp_info
*lp
= (struct lwp_info
*) ap
;
603 pid_t pid
= lp
->ptid
.lwp ();
605 return iterative_hash_object (pid
, 0);
608 /* Equality function for the lwp_info hash table. Compares the LWP's
612 lwp_lwpid_htab_eq (const void *a
, const void *b
)
614 const struct lwp_info
*entry
= (const struct lwp_info
*) a
;
615 const struct lwp_info
*element
= (const struct lwp_info
*) b
;
617 return entry
->ptid
.lwp () == element
->ptid
.lwp ();
620 /* Create the lwp_lwpid_htab hash table. */
623 lwp_lwpid_htab_create (void)
625 lwp_lwpid_htab
= htab_create (100, lwp_info_hash
, lwp_lwpid_htab_eq
, NULL
);
628 /* Add LP to the hash table. */
631 lwp_lwpid_htab_add_lwp (struct lwp_info
*lp
)
635 slot
= htab_find_slot (lwp_lwpid_htab
, lp
, INSERT
);
636 gdb_assert (slot
!= NULL
&& *slot
== NULL
);
640 /* Head of doubly-linked list of known LWPs. Sorted by reverse
641 creation order. This order is assumed in some cases. E.g.,
642 reaping status after killing alls lwps of a process: the leader LWP
643 must be reaped last. */
645 static intrusive_list
<lwp_info
> lwp_list
;
647 /* See linux-nat.h. */
652 return lwp_info_range (lwp_list
.begin ());
655 /* See linux-nat.h. */
660 return lwp_info_safe_range (lwp_list
.begin ());
663 /* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
666 lwp_list_add (struct lwp_info
*lp
)
668 lwp_list
.push_front (*lp
);
671 /* Remove LP from sorted-by-reverse-creation-order doubly-linked
675 lwp_list_remove (struct lwp_info
*lp
)
677 /* Remove from sorted-by-creation-order list. */
678 lwp_list
.erase (lwp_list
.iterator_to (*lp
));
683 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
684 _initialize_linux_nat. */
685 static sigset_t suspend_mask
;
687 /* Signals to block to make that sigsuspend work. */
688 static sigset_t blocked_mask
;
690 /* SIGCHLD action. */
691 static struct sigaction sigchld_action
;
693 /* Block child signals (SIGCHLD and linux threads signals), and store
694 the previous mask in PREV_MASK. */
697 block_child_signals (sigset_t
*prev_mask
)
699 /* Make sure SIGCHLD is blocked. */
700 if (!sigismember (&blocked_mask
, SIGCHLD
))
701 sigaddset (&blocked_mask
, SIGCHLD
);
703 gdb_sigmask (SIG_BLOCK
, &blocked_mask
, prev_mask
);
706 /* Restore child signals mask, previously returned by
707 block_child_signals. */
710 restore_child_signals_mask (sigset_t
*prev_mask
)
712 gdb_sigmask (SIG_SETMASK
, prev_mask
, NULL
);
715 /* Mask of signals to pass directly to the inferior. */
716 static sigset_t pass_mask
;
718 /* Update signals to pass to the inferior. */
720 linux_nat_target::pass_signals
721 (gdb::array_view
<const unsigned char> pass_signals
)
725 sigemptyset (&pass_mask
);
727 for (signo
= 1; signo
< NSIG
; signo
++)
729 int target_signo
= gdb_signal_from_host (signo
);
730 if (target_signo
< pass_signals
.size () && pass_signals
[target_signo
])
731 sigaddset (&pass_mask
, signo
);
737 /* Prototypes for local functions. */
738 static int stop_wait_callback (struct lwp_info
*lp
);
739 static int resume_stopped_resumed_lwps (struct lwp_info
*lp
, const ptid_t wait_ptid
);
740 static int check_ptrace_stopped_lwp_gone (struct lwp_info
*lp
);
744 /* Destroy and free LP. */
746 lwp_info::~lwp_info ()
748 /* Let the arch specific bits release arch_lwp_info. */
749 linux_target
->low_delete_thread (this->arch_private
);
752 /* Traversal function for purge_lwp_list. */
755 lwp_lwpid_htab_remove_pid (void **slot
, void *info
)
757 struct lwp_info
*lp
= (struct lwp_info
*) *slot
;
758 int pid
= *(int *) info
;
760 if (lp
->ptid
.pid () == pid
)
762 htab_clear_slot (lwp_lwpid_htab
, slot
);
763 lwp_list_remove (lp
);
770 /* Remove all LWPs belong to PID from the lwp list. */
773 purge_lwp_list (int pid
)
775 htab_traverse_noresize (lwp_lwpid_htab
, lwp_lwpid_htab_remove_pid
, &pid
);
778 /* Add the LWP specified by PTID to the list. PTID is the first LWP
779 in the process. Return a pointer to the structure describing the
782 This differs from add_lwp in that we don't let the arch specific
783 bits know about this new thread. Current clients of this callback
784 take the opportunity to install watchpoints in the new thread, and
785 we shouldn't do that for the first thread. If we're spawning a
786 child ("run"), the thread executes the shell wrapper first, and we
787 shouldn't touch it until it execs the program we want to debug.
788 For "attach", it'd be okay to call the callback, but it's not
789 necessary, because watchpoints can't yet have been inserted into
792 static struct lwp_info
*
793 add_initial_lwp (ptid_t ptid
)
795 gdb_assert (ptid
.lwp_p ());
797 lwp_info
*lp
= new lwp_info (ptid
);
800 /* Add to sorted-by-reverse-creation-order list. */
803 /* Add to keyed-by-pid htab. */
804 lwp_lwpid_htab_add_lwp (lp
);
809 /* Add the LWP specified by PID to the list. Return a pointer to the
810 structure describing the new LWP. The LWP should already be
813 static struct lwp_info
*
814 add_lwp (ptid_t ptid
)
818 lp
= add_initial_lwp (ptid
);
820 /* Let the arch specific bits know about this new thread. Current
821 clients of this callback take the opportunity to install
822 watchpoints in the new thread. We don't do this for the first
823 thread though. See add_initial_lwp. */
824 linux_target
->low_new_thread (lp
);
829 /* Remove the LWP specified by PID from the list. */
832 delete_lwp (ptid_t ptid
)
834 lwp_info
dummy (ptid
);
836 void **slot
= htab_find_slot (lwp_lwpid_htab
, &dummy
, NO_INSERT
);
840 lwp_info
*lp
= *(struct lwp_info
**) slot
;
841 gdb_assert (lp
!= NULL
);
843 htab_clear_slot (lwp_lwpid_htab
, slot
);
845 /* Remove from sorted-by-creation-order list. */
846 lwp_list_remove (lp
);
852 /* Return a pointer to the structure describing the LWP corresponding
853 to PID. If no corresponding LWP could be found, return NULL. */
855 static struct lwp_info
*
856 find_lwp_pid (ptid_t ptid
)
865 lwp_info
dummy (ptid_t (0, lwp
));
866 return (struct lwp_info
*) htab_find (lwp_lwpid_htab
, &dummy
);
869 /* See nat/linux-nat.h. */
872 iterate_over_lwps (ptid_t filter
,
873 gdb::function_view
<iterate_over_lwps_ftype
> callback
)
875 for (lwp_info
*lp
: all_lwps_safe ())
877 if (lp
->ptid
.matches (filter
))
879 if (callback (lp
) != 0)
887 /* Update our internal state when changing from one checkpoint to
888 another indicated by NEW_PTID. We can only switch single-threaded
889 applications, so we only create one new LWP, and the previous list
893 linux_nat_switch_fork (ptid_t new_ptid
)
897 purge_lwp_list (inferior_ptid
.pid ());
899 lp
= add_lwp (new_ptid
);
902 /* This changes the thread's ptid while preserving the gdb thread
903 num. Also changes the inferior pid, while preserving the
905 thread_change_ptid (linux_target
, inferior_ptid
, new_ptid
);
907 /* We've just told GDB core that the thread changed target id, but,
908 in fact, it really is a different thread, with different register
910 registers_changed ();
913 /* Handle the exit of a single thread LP. If DEL_THREAD is true,
914 delete the thread_info associated to LP, if it exists. */
917 exit_lwp (struct lwp_info
*lp
, bool del_thread
= true)
919 struct thread_info
*th
= linux_target
->find_thread (lp
->ptid
);
921 if (th
!= nullptr && del_thread
)
924 delete_lwp (lp
->ptid
);
927 /* Wait for the LWP specified by LP, which we have just attached to.
928 Returns a wait status for that LWP, to cache. */
931 linux_nat_post_attach_wait (ptid_t ptid
, int *signalled
)
933 pid_t new_pid
, pid
= ptid
.lwp ();
936 if (linux_proc_pid_is_stopped (pid
))
938 linux_nat_debug_printf ("Attaching to a stopped process");
940 /* The process is definitely stopped. It is in a job control
941 stop, unless the kernel predates the TASK_STOPPED /
942 TASK_TRACED distinction, in which case it might be in a
943 ptrace stop. Make sure it is in a ptrace stop; from there we
944 can kill it, signal it, et cetera.
946 First make sure there is a pending SIGSTOP. Since we are
947 already attached, the process can not transition from stopped
948 to running without a PTRACE_CONT; so we know this signal will
949 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
950 probably already in the queue (unless this kernel is old
951 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
952 is not an RT signal, it can only be queued once. */
953 kill_lwp (pid
, SIGSTOP
);
955 /* Finally, resume the stopped process. This will deliver the SIGSTOP
956 (or a higher priority signal, just like normal PTRACE_ATTACH). */
957 ptrace (PTRACE_CONT
, pid
, 0, 0);
960 /* Make sure the initial process is stopped. The user-level threads
961 layer might want to poke around in the inferior, and that won't
962 work if things haven't stabilized yet. */
963 new_pid
= my_waitpid (pid
, &status
, __WALL
);
964 gdb_assert (pid
== new_pid
);
966 if (!WIFSTOPPED (status
))
968 /* The pid we tried to attach has apparently just exited. */
969 linux_nat_debug_printf ("Failed to stop %d: %s", pid
,
970 status_to_str (status
).c_str ());
974 if (WSTOPSIG (status
) != SIGSTOP
)
977 linux_nat_debug_printf ("Received %s after attaching",
978 status_to_str (status
).c_str ());
985 linux_nat_target::create_inferior (const char *exec_file
,
986 const std::string
&allargs
,
987 char **env
, int from_tty
)
989 maybe_disable_address_space_randomization restore_personality
990 (disable_randomization
);
992 /* The fork_child mechanism is synchronous and calls target_wait, so
993 we have to mask the async mode. */
995 /* Make sure we report all signals during startup. */
998 inf_ptrace_target::create_inferior (exec_file
, allargs
, env
, from_tty
);
1000 open_proc_mem_file (inferior_ptid
);
1003 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1004 already attached. Returns true if a new LWP is found, false
1008 attach_proc_task_lwp_callback (ptid_t ptid
)
1010 struct lwp_info
*lp
;
1012 /* Ignore LWPs we're already attached to. */
1013 lp
= find_lwp_pid (ptid
);
1016 int lwpid
= ptid
.lwp ();
1018 if (ptrace (PTRACE_ATTACH
, lwpid
, 0, 0) < 0)
1022 /* Be quiet if we simply raced with the thread exiting.
1023 EPERM is returned if the thread's task still exists, and
1024 is marked as exited or zombie, as well as other
1025 conditions, so in that case, confirm the status in
1026 /proc/PID/status. */
1028 || (err
== EPERM
&& linux_proc_pid_is_gone (lwpid
)))
1030 linux_nat_debug_printf
1031 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1032 lwpid
, err
, safe_strerror (err
));
1038 = linux_ptrace_attach_fail_reason_string (ptid
, err
);
1040 warning (_("Cannot attach to lwp %d: %s"),
1041 lwpid
, reason
.c_str ());
1046 linux_nat_debug_printf ("PTRACE_ATTACH %s, 0, 0 (OK)",
1047 ptid
.to_string ().c_str ());
1049 lp
= add_lwp (ptid
);
1051 /* The next time we wait for this LWP we'll see a SIGSTOP as
1052 PTRACE_ATTACH brings it to a halt. */
1055 /* We need to wait for a stop before being able to make the
1056 next ptrace call on this LWP. */
1057 lp
->must_set_ptrace_flags
= 1;
1059 /* So that wait collects the SIGSTOP. */
1062 /* Also add the LWP to gdb's thread list, in case a
1063 matching libthread_db is not found (or the process uses
1065 add_thread (linux_target
, lp
->ptid
);
1066 set_running (linux_target
, lp
->ptid
, true);
1067 set_executing (linux_target
, lp
->ptid
, true);
1076 linux_nat_target::attach (const char *args
, int from_tty
)
1078 struct lwp_info
*lp
;
1082 /* Make sure we report all signals during attach. */
1087 inf_ptrace_target::attach (args
, from_tty
);
1089 catch (const gdb_exception_error
&ex
)
1091 pid_t pid
= parse_pid_to_attach (args
);
1092 std::string reason
= linux_ptrace_attach_fail_reason (pid
);
1094 if (!reason
.empty ())
1095 throw_error (ex
.error
, "warning: %s\n%s", reason
.c_str (),
1098 throw_error (ex
.error
, "%s", ex
.what ());
1101 /* The ptrace base target adds the main thread with (pid,0,0)
1102 format. Decorate it with lwp info. */
1103 ptid
= ptid_t (inferior_ptid
.pid (),
1104 inferior_ptid
.pid ());
1105 thread_change_ptid (linux_target
, inferior_ptid
, ptid
);
1107 /* Add the initial process as the first LWP to the list. */
1108 lp
= add_initial_lwp (ptid
);
1110 status
= linux_nat_post_attach_wait (lp
->ptid
, &lp
->signalled
);
1111 if (!WIFSTOPPED (status
))
1113 if (WIFEXITED (status
))
1115 int exit_code
= WEXITSTATUS (status
);
1117 target_terminal::ours ();
1118 target_mourn_inferior (inferior_ptid
);
1120 error (_("Unable to attach: program exited normally."));
1122 error (_("Unable to attach: program exited with code %d."),
1125 else if (WIFSIGNALED (status
))
1127 enum gdb_signal signo
;
1129 target_terminal::ours ();
1130 target_mourn_inferior (inferior_ptid
);
1132 signo
= gdb_signal_from_host (WTERMSIG (status
));
1133 error (_("Unable to attach: program terminated with signal "
1135 gdb_signal_to_name (signo
),
1136 gdb_signal_to_string (signo
));
1139 internal_error (_("unexpected status %d for PID %ld"),
1140 status
, (long) ptid
.lwp ());
1145 open_proc_mem_file (lp
->ptid
);
1147 /* Save the wait status to report later. */
1149 linux_nat_debug_printf ("waitpid %ld, saving status %s",
1150 (long) lp
->ptid
.pid (),
1151 status_to_str (status
).c_str ());
1153 lp
->status
= status
;
1155 /* We must attach to every LWP. If /proc is mounted, use that to
1156 find them now. The inferior may be using raw clone instead of
1157 using pthreads. But even if it is using pthreads, thread_db
1158 walks structures in the inferior's address space to find the list
1159 of threads/LWPs, and those structures may well be corrupted.
1160 Note that once thread_db is loaded, we'll still use it to list
1161 threads and associate pthread info with each LWP. */
1162 linux_proc_attach_tgid_threads (lp
->ptid
.pid (),
1163 attach_proc_task_lwp_callback
);
1166 /* Ptrace-detach the thread with pid PID. */
1169 detach_one_pid (int pid
, int signo
)
1171 if (ptrace (PTRACE_DETACH
, pid
, 0, signo
) < 0)
1173 int save_errno
= errno
;
1175 /* We know the thread exists, so ESRCH must mean the lwp is
1176 zombie. This can happen if one of the already-detached
1177 threads exits the whole thread group. In that case we're
1178 still attached, and must reap the lwp. */
1179 if (save_errno
== ESRCH
)
1183 ret
= my_waitpid (pid
, &status
, __WALL
);
1186 warning (_("Couldn't reap LWP %d while detaching: %s"),
1187 pid
, safe_strerror (errno
));
1189 else if (!WIFEXITED (status
) && !WIFSIGNALED (status
))
1191 warning (_("Reaping LWP %d while detaching "
1192 "returned unexpected status 0x%x"),
1197 error (_("Can't detach %d: %s"),
1198 pid
, safe_strerror (save_errno
));
1201 linux_nat_debug_printf ("PTRACE_DETACH (%d, %s, 0) (OK)",
1202 pid
, strsignal (signo
));
1205 /* Get pending signal of THREAD as a host signal number, for detaching
1206 purposes. This is the signal the thread last stopped for, which we
1207 need to deliver to the thread when detaching, otherwise, it'd be
1211 get_detach_signal (struct lwp_info
*lp
)
1213 enum gdb_signal signo
= GDB_SIGNAL_0
;
1215 /* If we paused threads momentarily, we may have stored pending
1216 events in lp->status or lp->waitstatus (see stop_wait_callback),
1217 and GDB core hasn't seen any signal for those threads.
1218 Otherwise, the last signal reported to the core is found in the
1219 thread object's stop_signal.
1221 There's a corner case that isn't handled here at present. Only
1222 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1223 stop_signal make sense as a real signal to pass to the inferior.
1224 Some catchpoint related events, like
1225 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1226 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1227 those traps are debug API (ptrace in our case) related and
1228 induced; the inferior wouldn't see them if it wasn't being
1229 traced. Hence, we should never pass them to the inferior, even
1230 when set to pass state. Since this corner case isn't handled by
1231 infrun.c when proceeding with a signal, for consistency, neither
1232 do we handle it here (or elsewhere in the file we check for
1233 signal pass state). Normally SIGTRAP isn't set to pass state, so
1234 this is really a corner case. */
1236 if (lp
->waitstatus
.kind () != TARGET_WAITKIND_IGNORE
)
1237 signo
= GDB_SIGNAL_0
; /* a pending ptrace event, not a real signal. */
1238 else if (lp
->status
)
1239 signo
= gdb_signal_from_host (WSTOPSIG (lp
->status
));
1242 thread_info
*tp
= linux_target
->find_thread (lp
->ptid
);
1244 if (target_is_non_stop_p () && !tp
->executing ())
1246 if (tp
->has_pending_waitstatus ())
1248 /* If the thread has a pending event, and it was stopped with a
1249 signal, use that signal to resume it. If it has a pending
1250 event of another kind, it was not stopped with a signal, so
1251 resume it without a signal. */
1252 if (tp
->pending_waitstatus ().kind () == TARGET_WAITKIND_STOPPED
)
1253 signo
= tp
->pending_waitstatus ().sig ();
1255 signo
= GDB_SIGNAL_0
;
1258 signo
= tp
->stop_signal ();
1260 else if (!target_is_non_stop_p ())
1263 process_stratum_target
*last_target
;
1265 get_last_target_status (&last_target
, &last_ptid
, nullptr);
1267 if (last_target
== linux_target
1268 && lp
->ptid
.lwp () == last_ptid
.lwp ())
1269 signo
= tp
->stop_signal ();
1273 if (signo
== GDB_SIGNAL_0
)
1275 linux_nat_debug_printf ("lwp %s has no pending signal",
1276 lp
->ptid
.to_string ().c_str ());
1278 else if (!signal_pass_state (signo
))
1280 linux_nat_debug_printf
1281 ("lwp %s had signal %s but it is in no pass state",
1282 lp
->ptid
.to_string ().c_str (), gdb_signal_to_string (signo
));
1286 linux_nat_debug_printf ("lwp %s has pending signal %s",
1287 lp
->ptid
.to_string ().c_str (),
1288 gdb_signal_to_string (signo
));
1290 return gdb_signal_to_host (signo
);
1296 /* If LP has a pending fork/vfork/clone status, return it. */
1298 static std::optional
<target_waitstatus
>
1299 get_pending_child_status (lwp_info
*lp
)
1301 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT
;
1303 linux_nat_debug_printf ("lwp %s (stopped = %d)",
1304 lp
->ptid
.to_string ().c_str (), lp
->stopped
);
1306 /* Check in lwp_info::status. */
1307 if (WIFSTOPPED (lp
->status
) && linux_is_extended_waitstatus (lp
->status
))
1309 int event
= linux_ptrace_get_extended_event (lp
->status
);
1311 if (event
== PTRACE_EVENT_FORK
1312 || event
== PTRACE_EVENT_VFORK
1313 || event
== PTRACE_EVENT_CLONE
)
1315 unsigned long child_pid
;
1316 int ret
= ptrace (PTRACE_GETEVENTMSG
, lp
->ptid
.lwp (), 0, &child_pid
);
1319 target_waitstatus ws
;
1321 if (event
== PTRACE_EVENT_FORK
)
1322 ws
.set_forked (ptid_t (child_pid
, child_pid
));
1323 else if (event
== PTRACE_EVENT_VFORK
)
1324 ws
.set_vforked (ptid_t (child_pid
, child_pid
));
1325 else if (event
== PTRACE_EVENT_CLONE
)
1326 ws
.set_thread_cloned (ptid_t (lp
->ptid
.pid (), child_pid
));
1328 gdb_assert_not_reached ("unhandled");
1334 perror_warning_with_name (_("Failed to retrieve event msg"));
1340 /* Check in lwp_info::waitstatus. */
1341 if (is_new_child_status (lp
->waitstatus
.kind ()))
1342 return lp
->waitstatus
;
1344 thread_info
*tp
= linux_target
->find_thread (lp
->ptid
);
1346 /* Check in thread_info::pending_waitstatus. */
1347 if (tp
->has_pending_waitstatus ()
1348 && is_new_child_status (tp
->pending_waitstatus ().kind ()))
1349 return tp
->pending_waitstatus ();
1351 /* Check in thread_info::pending_follow. */
1352 if (is_new_child_status (tp
->pending_follow
.kind ()))
1353 return tp
->pending_follow
;
1358 /* Detach from LP. If SIGNO_P is non-NULL, then it points to the
1359 signal number that should be passed to the LWP when detaching.
1360 Otherwise pass any pending signal the LWP may have, if any. */
1363 detach_one_lwp (struct lwp_info
*lp
, int *signo_p
)
1365 int lwpid
= lp
->ptid
.lwp ();
1368 /* If the lwp/thread we are about to detach has a pending fork/clone
1369 event, there is a process/thread GDB is attached to that the core
1370 of GDB doesn't know about. Detach from it. */
1372 std::optional
<target_waitstatus
> ws
= get_pending_child_status (lp
);
1373 if (ws
.has_value ())
1374 detach_one_pid (ws
->child_ptid ().lwp (), 0);
1376 /* If there is a pending SIGSTOP, get rid of it. */
1379 linux_nat_debug_printf ("Sending SIGCONT to %s",
1380 lp
->ptid
.to_string ().c_str ());
1382 kill_lwp (lwpid
, SIGCONT
);
1386 if (signo_p
== NULL
)
1388 /* Pass on any pending signal for this LWP. */
1389 signo
= get_detach_signal (lp
);
1394 linux_nat_debug_printf ("preparing to resume lwp %s (stopped = %d)",
1395 lp
->ptid
.to_string ().c_str (),
1398 /* Preparing to resume may try to write registers, and fail if the
1399 lwp is zombie. If that happens, ignore the error. We'll handle
1400 it below, when detach fails with ESRCH. */
1403 linux_target
->low_prepare_to_resume (lp
);
1405 catch (const gdb_exception_error
&ex
)
1407 if (!check_ptrace_stopped_lwp_gone (lp
))
1411 detach_one_pid (lwpid
, signo
);
1413 delete_lwp (lp
->ptid
);
1417 detach_callback (struct lwp_info
*lp
)
1419 /* We don't actually detach from the thread group leader just yet.
1420 If the thread group exits, we must reap the zombie clone lwps
1421 before we're able to reap the leader. */
1422 if (lp
->ptid
.lwp () != lp
->ptid
.pid ())
1423 detach_one_lwp (lp
, NULL
);
1428 linux_nat_target::detach (inferior
*inf
, int from_tty
)
1430 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT
;
1432 struct lwp_info
*main_lwp
;
1435 /* Don't unregister from the event loop, as there may be other
1436 inferiors running. */
1438 /* Stop all threads before detaching. ptrace requires that the
1439 thread is stopped to successfully detach. */
1440 iterate_over_lwps (ptid_t (pid
), stop_callback
);
1441 /* ... and wait until all of them have reported back that
1442 they're no longer running. */
1443 iterate_over_lwps (ptid_t (pid
), stop_wait_callback
);
1445 /* We can now safely remove breakpoints. We don't this in earlier
1446 in common code because this target doesn't currently support
1447 writing memory while the inferior is running. */
1448 remove_breakpoints_inf (current_inferior ());
1450 iterate_over_lwps (ptid_t (pid
), detach_callback
);
1452 /* We have detached from everything except the main thread now, so
1453 should only have one thread left. However, in non-stop mode the
1454 main thread might have exited, in which case we'll have no threads
1456 gdb_assert (num_lwps (pid
) == 1
1457 || (target_is_non_stop_p () && num_lwps (pid
) == 0));
1459 if (forks_exist_p ())
1461 /* Multi-fork case. The current inferior_ptid is being detached
1462 from, but there are other viable forks to debug. Detach from
1463 the current fork, and context-switch to the first
1465 linux_fork_detach (from_tty
);
1469 target_announce_detach (from_tty
);
1471 /* In non-stop mode it is possible that the main thread has exited,
1472 in which case we don't try to detach. */
1473 main_lwp
= find_lwp_pid (ptid_t (pid
));
1474 if (main_lwp
!= nullptr)
1476 /* Pass on any pending signal for the last LWP. */
1477 int signo
= get_detach_signal (main_lwp
);
1479 detach_one_lwp (main_lwp
, &signo
);
1482 gdb_assert (target_is_non_stop_p ());
1484 detach_success (inf
);
1487 close_proc_mem_file (pid
);
1490 /* Resume execution of the inferior process. If STEP is nonzero,
1491 single-step it. If SIGNAL is nonzero, give it that signal. */
1494 linux_resume_one_lwp_throw (struct lwp_info
*lp
, int step
,
1495 enum gdb_signal signo
)
1499 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1500 We only presently need that if the LWP is stepped though (to
1501 handle the case of stepping a breakpoint instruction). */
1504 struct regcache
*regcache
= get_thread_regcache (linux_target
, lp
->ptid
);
1506 lp
->stop_pc
= regcache_read_pc (regcache
);
1511 linux_target
->low_prepare_to_resume (lp
);
1512 linux_target
->low_resume (lp
->ptid
, step
, signo
);
1514 /* Successfully resumed. Clear state that no longer makes sense,
1515 and mark the LWP as running. Must not do this before resuming
1516 otherwise if that fails other code will be confused. E.g., we'd
1517 later try to stop the LWP and hang forever waiting for a stop
1518 status. Note that we must not throw after this is cleared,
1519 otherwise handle_zombie_lwp_error would get confused. */
1522 lp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
1523 registers_changed_ptid (linux_target
, lp
->ptid
);
1526 /* Called when we try to resume a stopped LWP and that errors out. If
1527 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1528 or about to become), discard the error, clear any pending status
1529 the LWP may have, and return true (we'll collect the exit status
1530 soon enough). Otherwise, return false. */
1533 check_ptrace_stopped_lwp_gone (struct lwp_info
*lp
)
1535 /* If we get an error after resuming the LWP successfully, we'd
1536 confuse !T state for the LWP being gone. */
1537 gdb_assert (lp
->stopped
);
1539 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1540 because even if ptrace failed with ESRCH, the tracee may be "not
1541 yet fully dead", but already refusing ptrace requests. In that
1542 case the tracee has 'R (Running)' state for a little bit
1543 (observed in Linux 3.18). See also the note on ESRCH in the
1544 ptrace(2) man page. Instead, check whether the LWP has any state
1545 other than ptrace-stopped. */
1547 /* Don't assume anything if /proc/PID/status can't be read. */
1548 if (linux_proc_pid_is_trace_stopped_nowarn (lp
->ptid
.lwp ()) == 0)
1550 lp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
1552 lp
->waitstatus
.set_ignore ();
1558 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1559 disappears while we try to resume it. */
1562 linux_resume_one_lwp (struct lwp_info
*lp
, int step
, enum gdb_signal signo
)
1566 linux_resume_one_lwp_throw (lp
, step
, signo
);
1568 catch (const gdb_exception_error
&ex
)
1570 if (!check_ptrace_stopped_lwp_gone (lp
))
1578 resume_lwp (struct lwp_info
*lp
, int step
, enum gdb_signal signo
)
1582 struct inferior
*inf
= find_inferior_ptid (linux_target
, lp
->ptid
);
1584 if (inf
->vfork_child
!= NULL
)
1586 linux_nat_debug_printf ("Not resuming sibling %s (vfork parent)",
1587 lp
->ptid
.to_string ().c_str ());
1589 else if (!lwp_status_pending_p (lp
))
1591 linux_nat_debug_printf ("Resuming sibling %s, %s, %s",
1592 lp
->ptid
.to_string ().c_str (),
1593 (signo
!= GDB_SIGNAL_0
1594 ? strsignal (gdb_signal_to_host (signo
))
1596 step
? "step" : "resume");
1598 linux_resume_one_lwp (lp
, step
, signo
);
1602 linux_nat_debug_printf ("Not resuming sibling %s (has pending)",
1603 lp
->ptid
.to_string ().c_str ());
1607 linux_nat_debug_printf ("Not resuming sibling %s (not stopped)",
1608 lp
->ptid
.to_string ().c_str ());
1611 /* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1612 Resume LWP with the last stop signal, if it is in pass state. */
1615 linux_nat_resume_callback (struct lwp_info
*lp
, struct lwp_info
*except
)
1617 enum gdb_signal signo
= GDB_SIGNAL_0
;
1624 struct thread_info
*thread
;
1626 thread
= linux_target
->find_thread (lp
->ptid
);
1629 signo
= thread
->stop_signal ();
1630 thread
->set_stop_signal (GDB_SIGNAL_0
);
1634 resume_lwp (lp
, 0, signo
);
1639 resume_clear_callback (struct lwp_info
*lp
)
1642 lp
->last_resume_kind
= resume_stop
;
1647 resume_set_callback (struct lwp_info
*lp
)
1650 lp
->last_resume_kind
= resume_continue
;
1655 linux_nat_target::resume (ptid_t scope_ptid
, int step
, enum gdb_signal signo
)
1657 struct lwp_info
*lp
;
1659 linux_nat_debug_printf ("Preparing to %s %s, %s, inferior_ptid %s",
1660 step
? "step" : "resume",
1661 scope_ptid
.to_string ().c_str (),
1662 (signo
!= GDB_SIGNAL_0
1663 ? strsignal (gdb_signal_to_host (signo
)) : "0"),
1664 inferior_ptid
.to_string ().c_str ());
1666 /* Mark the lwps we're resuming as resumed and update their
1667 last_resume_kind to resume_continue. */
1668 iterate_over_lwps (scope_ptid
, resume_set_callback
);
1670 lp
= find_lwp_pid (inferior_ptid
);
1671 gdb_assert (lp
!= NULL
);
1673 /* Remember if we're stepping. */
1674 lp
->last_resume_kind
= step
? resume_step
: resume_continue
;
1676 /* If we have a pending wait status for this thread, there is no
1677 point in resuming the process. But first make sure that
1678 linux_nat_wait won't preemptively handle the event - we
1679 should never take this short-circuit if we are going to
1680 leave LP running, since we have skipped resuming all the
1681 other threads. This bit of code needs to be synchronized
1682 with linux_nat_wait. */
1684 if (lp
->status
&& WIFSTOPPED (lp
->status
))
1687 && WSTOPSIG (lp
->status
)
1688 && sigismember (&pass_mask
, WSTOPSIG (lp
->status
)))
1690 linux_nat_debug_printf
1691 ("Not short circuiting for ignored status 0x%x", lp
->status
);
1693 /* FIXME: What should we do if we are supposed to continue
1694 this thread with a signal? */
1695 gdb_assert (signo
== GDB_SIGNAL_0
);
1696 signo
= gdb_signal_from_host (WSTOPSIG (lp
->status
));
1701 if (lwp_status_pending_p (lp
))
1703 /* FIXME: What should we do if we are supposed to continue
1704 this thread with a signal? */
1705 gdb_assert (signo
== GDB_SIGNAL_0
);
1707 linux_nat_debug_printf ("Short circuiting for status %s",
1708 pending_status_str (lp
).c_str ());
1710 if (target_can_async_p ())
1712 target_async (true);
1713 /* Tell the event loop we have something to process. */
1719 /* No use iterating unless we're resuming other threads. */
1720 if (scope_ptid
!= lp
->ptid
)
1721 iterate_over_lwps (scope_ptid
, [=] (struct lwp_info
*info
)
1723 return linux_nat_resume_callback (info
, lp
);
1726 linux_nat_debug_printf ("%s %s, %s (resume event thread)",
1727 step
? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1728 lp
->ptid
.to_string ().c_str (),
1729 (signo
!= GDB_SIGNAL_0
1730 ? strsignal (gdb_signal_to_host (signo
)) : "0"));
1732 linux_resume_one_lwp (lp
, step
, signo
);
1735 /* Send a signal to an LWP. */
1738 kill_lwp (int lwpid
, int signo
)
1743 ret
= syscall (__NR_tkill
, lwpid
, signo
);
1744 if (errno
== ENOSYS
)
1746 /* If tkill fails, then we are not using nptl threads, a
1747 configuration we no longer support. */
1748 perror_with_name (("tkill"));
1753 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1754 event, check if the core is interested in it: if not, ignore the
1755 event, and keep waiting; otherwise, we need to toggle the LWP's
1756 syscall entry/exit status, since the ptrace event itself doesn't
1757 indicate it, and report the trap to higher layers. */
1760 linux_handle_syscall_trap (struct lwp_info
*lp
, int stopping
)
1762 struct target_waitstatus
*ourstatus
= &lp
->waitstatus
;
1763 struct gdbarch
*gdbarch
= target_thread_architecture (lp
->ptid
);
1764 thread_info
*thread
= linux_target
->find_thread (lp
->ptid
);
1765 int syscall_number
= (int) gdbarch_get_syscall_number (gdbarch
, thread
);
1769 /* If we're stopping threads, there's a SIGSTOP pending, which
1770 makes it so that the LWP reports an immediate syscall return,
1771 followed by the SIGSTOP. Skip seeing that "return" using
1772 PTRACE_CONT directly, and let stop_wait_callback collect the
1773 SIGSTOP. Later when the thread is resumed, a new syscall
1774 entry event. If we didn't do this (and returned 0), we'd
1775 leave a syscall entry pending, and our caller, by using
1776 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1777 itself. Later, when the user re-resumes this LWP, we'd see
1778 another syscall entry event and we'd mistake it for a return.
1780 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1781 (leaving immediately with LWP->signalled set, without issuing
1782 a PTRACE_CONT), it would still be problematic to leave this
1783 syscall enter pending, as later when the thread is resumed,
1784 it would then see the same syscall exit mentioned above,
1785 followed by the delayed SIGSTOP, while the syscall didn't
1786 actually get to execute. It seems it would be even more
1787 confusing to the user. */
1789 linux_nat_debug_printf
1790 ("ignoring syscall %d for LWP %ld (stopping threads), resuming with "
1791 "PTRACE_CONT for SIGSTOP", syscall_number
, lp
->ptid
.lwp ());
1793 lp
->syscall_state
= TARGET_WAITKIND_IGNORE
;
1794 ptrace (PTRACE_CONT
, lp
->ptid
.lwp (), 0, 0);
1799 /* Always update the entry/return state, even if this particular
1800 syscall isn't interesting to the core now. In async mode,
1801 the user could install a new catchpoint for this syscall
1802 between syscall enter/return, and we'll need to know to
1803 report a syscall return if that happens. */
1804 lp
->syscall_state
= (lp
->syscall_state
== TARGET_WAITKIND_SYSCALL_ENTRY
1805 ? TARGET_WAITKIND_SYSCALL_RETURN
1806 : TARGET_WAITKIND_SYSCALL_ENTRY
);
1808 if (catch_syscall_enabled ())
1810 if (catching_syscall_number (syscall_number
))
1812 /* Alright, an event to report. */
1813 if (lp
->syscall_state
== TARGET_WAITKIND_SYSCALL_ENTRY
)
1814 ourstatus
->set_syscall_entry (syscall_number
);
1815 else if (lp
->syscall_state
== TARGET_WAITKIND_SYSCALL_RETURN
)
1816 ourstatus
->set_syscall_return (syscall_number
);
1818 gdb_assert_not_reached ("unexpected syscall state");
1820 linux_nat_debug_printf
1821 ("stopping for %s of syscall %d for LWP %ld",
1822 (lp
->syscall_state
== TARGET_WAITKIND_SYSCALL_ENTRY
1823 ? "entry" : "return"), syscall_number
, lp
->ptid
.lwp ());
1828 linux_nat_debug_printf
1829 ("ignoring %s of syscall %d for LWP %ld",
1830 (lp
->syscall_state
== TARGET_WAITKIND_SYSCALL_ENTRY
1831 ? "entry" : "return"), syscall_number
, lp
->ptid
.lwp ());
1835 /* If we had been syscall tracing, and hence used PT_SYSCALL
1836 before on this LWP, it could happen that the user removes all
1837 syscall catchpoints before we get to process this event.
1838 There are two noteworthy issues here:
1840 - When stopped at a syscall entry event, resuming with
1841 PT_STEP still resumes executing the syscall and reports a
1844 - Only PT_SYSCALL catches syscall enters. If we last
1845 single-stepped this thread, then this event can't be a
1846 syscall enter. If we last single-stepped this thread, this
1847 has to be a syscall exit.
1849 The points above mean that the next resume, be it PT_STEP or
1850 PT_CONTINUE, can not trigger a syscall trace event. */
1851 linux_nat_debug_printf
1852 ("caught syscall event with no syscall catchpoints. %d for LWP %ld, "
1853 "ignoring", syscall_number
, lp
->ptid
.lwp ());
1854 lp
->syscall_state
= TARGET_WAITKIND_IGNORE
;
1857 /* The core isn't interested in this event. For efficiency, avoid
1858 stopping all threads only to have the core resume them all again.
1859 Since we're not stopping threads, if we're still syscall tracing
1860 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1861 subsequent syscall. Simply resume using the inf-ptrace layer,
1862 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1864 linux_resume_one_lwp (lp
, lp
->step
, GDB_SIGNAL_0
);
1871 linux_nat_target::follow_clone (ptid_t child_ptid
)
1873 lwp_info
*new_lp
= add_lwp (child_ptid
);
1874 new_lp
->stopped
= 1;
1876 /* If the thread_db layer is active, let it record the user
1877 level thread id and status, and add the thread to GDB's
1879 if (!thread_db_notice_clone (inferior_ptid
, new_lp
->ptid
))
1881 /* The process is not using thread_db. Add the LWP to
1883 add_thread (linux_target
, new_lp
->ptid
);
1886 /* We just created NEW_LP so it cannot yet contain STATUS. */
1887 gdb_assert (new_lp
->status
== 0);
1889 if (!pull_pid_from_list (&stopped_pids
, child_ptid
.lwp (), &new_lp
->status
))
1890 internal_error (_("no saved status for clone lwp"));
1892 if (WSTOPSIG (new_lp
->status
) != SIGSTOP
)
1894 /* This can happen if someone starts sending signals to
1895 the new thread before it gets a chance to run, which
1896 have a lower number than SIGSTOP (e.g. SIGUSR1).
1897 This is an unlikely case, and harder to handle for
1898 fork / vfork than for clone, so we do not try - but
1899 we handle it for clone events here. */
1901 new_lp
->signalled
= 1;
1903 /* Save the wait status to report later. */
1904 linux_nat_debug_printf
1905 ("waitpid of new LWP %ld, saving status %s",
1906 (long) new_lp
->ptid
.lwp (), status_to_str (new_lp
->status
).c_str ());
1912 if (report_thread_events
)
1913 new_lp
->waitstatus
.set_thread_created ();
1917 /* Handle a GNU/Linux extended wait response. If we see a clone
1918 event, we need to add the new LWP to our list (and not report the
1919 trap to higher layers). This function returns non-zero if the
1920 event should be ignored and we should wait again. If STOPPING is
1921 true, the new LWP remains stopped, otherwise it is continued. */
1924 linux_handle_extended_wait (struct lwp_info
*lp
, int status
)
1926 int pid
= lp
->ptid
.lwp ();
1927 struct target_waitstatus
*ourstatus
= &lp
->waitstatus
;
1928 int event
= linux_ptrace_get_extended_event (status
);
1930 /* All extended events we currently use are mid-syscall. Only
1931 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
1932 you have to be using PTRACE_SEIZE to get that. */
1933 lp
->syscall_state
= TARGET_WAITKIND_SYSCALL_ENTRY
;
1935 if (event
== PTRACE_EVENT_FORK
|| event
== PTRACE_EVENT_VFORK
1936 || event
== PTRACE_EVENT_CLONE
)
1938 unsigned long new_pid
;
1941 ptrace (PTRACE_GETEVENTMSG
, pid
, 0, &new_pid
);
1943 /* If we haven't already seen the new PID stop, wait for it now. */
1944 if (! pull_pid_from_list (&stopped_pids
, new_pid
, &status
))
1946 /* The new child has a pending SIGSTOP. We can't affect it until it
1947 hits the SIGSTOP, but we're already attached. */
1948 ret
= my_waitpid (new_pid
, &status
, __WALL
);
1950 perror_with_name (_("waiting for new child"));
1951 else if (ret
!= new_pid
)
1952 internal_error (_("wait returned unexpected PID %d"), ret
);
1953 else if (!WIFSTOPPED (status
))
1954 internal_error (_("wait returned unexpected status 0x%x"), status
);
1957 if (event
== PTRACE_EVENT_FORK
|| event
== PTRACE_EVENT_VFORK
)
1959 open_proc_mem_file (ptid_t (new_pid
, new_pid
));
1961 /* The arch-specific native code may need to know about new
1962 forks even if those end up never mapped to an
1964 linux_target
->low_new_fork (lp
, new_pid
);
1966 else if (event
== PTRACE_EVENT_CLONE
)
1968 linux_target
->low_new_clone (lp
, new_pid
);
1971 if (event
== PTRACE_EVENT_FORK
1972 && linux_fork_checkpointing_p (lp
->ptid
.pid ()))
1974 /* Handle checkpointing by linux-fork.c here as a special
1975 case. We don't want the follow-fork-mode or 'catch fork'
1976 to interfere with this. */
1978 /* This won't actually modify the breakpoint list, but will
1979 physically remove the breakpoints from the child. */
1980 detach_breakpoints (ptid_t (new_pid
, new_pid
));
1982 /* Retain child fork in ptrace (stopped) state. */
1983 if (!find_fork_pid (new_pid
))
1986 /* Report as spurious, so that infrun doesn't want to follow
1987 this fork. We're actually doing an infcall in
1989 ourstatus
->set_spurious ();
1991 /* Report the stop to the core. */
1995 if (event
== PTRACE_EVENT_FORK
)
1996 ourstatus
->set_forked (ptid_t (new_pid
, new_pid
));
1997 else if (event
== PTRACE_EVENT_VFORK
)
1998 ourstatus
->set_vforked (ptid_t (new_pid
, new_pid
));
1999 else if (event
== PTRACE_EVENT_CLONE
)
2001 linux_nat_debug_printf
2002 ("Got clone event from LWP %d, new child is LWP %ld", pid
, new_pid
);
2004 /* Save the status again, we'll use it in follow_clone. */
2005 add_to_pid_list (&stopped_pids
, new_pid
, status
);
2007 ourstatus
->set_thread_cloned (ptid_t (lp
->ptid
.pid (), new_pid
));
2013 if (event
== PTRACE_EVENT_EXEC
)
2015 linux_nat_debug_printf ("Got exec event from LWP %ld", lp
->ptid
.lwp ());
2017 /* Close the previous /proc/PID/mem file for this inferior,
2018 which was using the address space which is now gone.
2019 Reading/writing from this file would return 0/EOF. */
2020 close_proc_mem_file (lp
->ptid
.pid ());
2022 /* Open a new file for the new address space. */
2023 open_proc_mem_file (lp
->ptid
);
2025 ourstatus
->set_execd
2026 (make_unique_xstrdup (linux_proc_pid_to_exec_file (pid
)));
2028 /* The thread that execed must have been resumed, but, when a
2029 thread execs, it changes its tid to the tgid, and the old
2030 tgid thread might have not been resumed. */
2033 /* All other LWPs are gone now. We'll have received a thread
2034 exit notification for all threads other the execing one.
2035 That one, if it wasn't the leader, just silently changes its
2036 tid to the tgid, and the previous leader vanishes. Since
2037 Linux 3.0, the former thread ID can be retrieved with
2038 PTRACE_GETEVENTMSG, but since we support older kernels, don't
2039 bother with it, and just walk the LWP list. Even with
2040 PTRACE_GETEVENTMSG, we'd still need to lookup the
2041 corresponding LWP object, and it would be an extra ptrace
2042 syscall, so this way may even be more efficient. */
2043 for (lwp_info
*other_lp
: all_lwps_safe ())
2044 if (other_lp
!= lp
&& other_lp
->ptid
.pid () == lp
->ptid
.pid ())
2045 exit_lwp (other_lp
);
2050 if (event
== PTRACE_EVENT_VFORK_DONE
)
2052 linux_nat_debug_printf
2053 ("Got PTRACE_EVENT_VFORK_DONE from LWP %ld",
2055 ourstatus
->set_vfork_done ();
2059 internal_error (_("unknown ptrace event %d"), event
);
2062 /* Suspend waiting for a signal. We're mostly interested in
2068 linux_nat_debug_printf ("about to sigsuspend");
2069 sigsuspend (&suspend_mask
);
2071 /* If the quit flag is set, it means that the user pressed Ctrl-C
2072 and we're debugging a process that is running on a separate
2073 terminal, so we must forward the Ctrl-C to the inferior. (If the
2074 inferior is sharing GDB's terminal, then the Ctrl-C reaches the
2075 inferior directly.) We must do this here because functions that
2076 need to block waiting for a signal loop forever until there's an
2077 event to report before returning back to the event loop. */
2078 if (!target_terminal::is_ours ())
2080 if (check_quit_flag ())
2081 target_pass_ctrlc ();
2085 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2089 wait_lwp (struct lwp_info
*lp
)
2093 int thread_dead
= 0;
2096 gdb_assert (!lp
->stopped
);
2097 gdb_assert (lp
->status
== 0);
2099 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2100 block_child_signals (&prev_mask
);
2104 pid
= my_waitpid (lp
->ptid
.lwp (), &status
, __WALL
| WNOHANG
);
2105 if (pid
== -1 && errno
== ECHILD
)
2107 /* The thread has previously exited. We need to delete it
2108 now because if this was a non-leader thread execing, we
2109 won't get an exit event. See comments on exec events at
2110 the top of the file. */
2112 linux_nat_debug_printf ("%s vanished.",
2113 lp
->ptid
.to_string ().c_str ());
2118 /* Bugs 10970, 12702.
2119 Thread group leader may have exited in which case we'll lock up in
2120 waitpid if there are other threads, even if they are all zombies too.
2121 Basically, we're not supposed to use waitpid this way.
2122 tkill(pid,0) cannot be used here as it gets ESRCH for both
2123 for zombie and running processes.
2125 As a workaround, check if we're waiting for the thread group leader and
2126 if it's a zombie, and avoid calling waitpid if it is.
2128 This is racy, what if the tgl becomes a zombie right after we check?
2129 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2130 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2132 if (lp
->ptid
.pid () == lp
->ptid
.lwp ()
2133 && linux_proc_pid_is_zombie (lp
->ptid
.lwp ()))
2136 linux_nat_debug_printf ("Thread group leader %s vanished.",
2137 lp
->ptid
.to_string ().c_str ());
2141 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2142 get invoked despite our caller had them intentionally blocked by
2143 block_child_signals. This is sensitive only to the loop of
2144 linux_nat_wait_1 and there if we get called my_waitpid gets called
2145 again before it gets to sigsuspend so we can safely let the handlers
2146 get executed here. */
2150 restore_child_signals_mask (&prev_mask
);
2154 gdb_assert (pid
== lp
->ptid
.lwp ());
2156 linux_nat_debug_printf ("waitpid %s received %s",
2157 lp
->ptid
.to_string ().c_str (),
2158 status_to_str (status
).c_str ());
2160 /* Check if the thread has exited. */
2161 if (WIFEXITED (status
) || WIFSIGNALED (status
))
2163 if (report_exit_events_for (lp
) || is_leader (lp
))
2165 linux_nat_debug_printf ("LWP %d exited.", lp
->ptid
.pid ());
2167 /* If this is the leader exiting, it means the whole
2168 process is gone. Store the status to report to the
2169 core. Store it in lp->waitstatus, because lp->status
2170 would be ambiguous (W_EXITCODE(0,0) == 0). */
2171 lp
->waitstatus
= host_status_to_waitstatus (status
);
2176 linux_nat_debug_printf ("%s exited.",
2177 lp
->ptid
.to_string ().c_str ());
2187 gdb_assert (WIFSTOPPED (status
));
2190 if (lp
->must_set_ptrace_flags
)
2192 inferior
*inf
= find_inferior_pid (linux_target
, lp
->ptid
.pid ());
2193 int options
= linux_nat_ptrace_options (inf
->attach_flag
);
2195 linux_enable_event_reporting (lp
->ptid
.lwp (), options
);
2196 lp
->must_set_ptrace_flags
= 0;
2199 /* Handle GNU/Linux's syscall SIGTRAPs. */
2200 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SYSCALL_SIGTRAP
)
2202 /* No longer need the sysgood bit. The ptrace event ends up
2203 recorded in lp->waitstatus if we care for it. We can carry
2204 on handling the event like a regular SIGTRAP from here
2206 status
= W_STOPCODE (SIGTRAP
);
2207 if (linux_handle_syscall_trap (lp
, 1))
2208 return wait_lwp (lp
);
2212 /* Almost all other ptrace-stops are known to be outside of system
2213 calls, with further exceptions in linux_handle_extended_wait. */
2214 lp
->syscall_state
= TARGET_WAITKIND_IGNORE
;
2217 /* Handle GNU/Linux's extended waitstatus for trace events. */
2218 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
2219 && linux_is_extended_waitstatus (status
))
2221 linux_nat_debug_printf ("Handling extended status 0x%06x", status
);
2222 linux_handle_extended_wait (lp
, status
);
2229 /* Send a SIGSTOP to LP. */
2232 stop_callback (struct lwp_info
*lp
)
2234 if (!lp
->stopped
&& !lp
->signalled
)
2238 linux_nat_debug_printf ("kill %s **<SIGSTOP>**",
2239 lp
->ptid
.to_string ().c_str ());
2242 ret
= kill_lwp (lp
->ptid
.lwp (), SIGSTOP
);
2243 linux_nat_debug_printf ("lwp kill %d %s", ret
,
2244 errno
? safe_strerror (errno
) : "ERRNO-OK");
2247 gdb_assert (lp
->status
== 0);
2253 /* Request a stop on LWP. */
2256 linux_stop_lwp (struct lwp_info
*lwp
)
2258 stop_callback (lwp
);
2261 /* See linux-nat.h */
2264 linux_stop_and_wait_all_lwps (void)
2266 /* Stop all LWP's ... */
2267 iterate_over_lwps (minus_one_ptid
, stop_callback
);
2269 /* ... and wait until all of them have reported back that
2270 they're no longer running. */
2271 iterate_over_lwps (minus_one_ptid
, stop_wait_callback
);
2274 /* See linux-nat.h */
2277 linux_unstop_all_lwps (void)
2279 iterate_over_lwps (minus_one_ptid
,
2280 [] (struct lwp_info
*info
)
2282 return resume_stopped_resumed_lwps (info
, minus_one_ptid
);
2286 /* Return non-zero if LWP PID has a pending SIGINT. */
2289 linux_nat_has_pending_sigint (int pid
)
2291 sigset_t pending
, blocked
, ignored
;
2293 linux_proc_pending_signals (pid
, &pending
, &blocked
, &ignored
);
2295 if (sigismember (&pending
, SIGINT
)
2296 && !sigismember (&ignored
, SIGINT
))
2302 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2305 set_ignore_sigint (struct lwp_info
*lp
)
2307 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2308 flag to consume the next one. */
2309 if (lp
->stopped
&& lp
->status
!= 0 && WIFSTOPPED (lp
->status
)
2310 && WSTOPSIG (lp
->status
) == SIGINT
)
2313 lp
->ignore_sigint
= 1;
2318 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2319 This function is called after we know the LWP has stopped; if the LWP
2320 stopped before the expected SIGINT was delivered, then it will never have
2321 arrived. Also, if the signal was delivered to a shared queue and consumed
2322 by a different thread, it will never be delivered to this LWP. */
2325 maybe_clear_ignore_sigint (struct lwp_info
*lp
)
2327 if (!lp
->ignore_sigint
)
2330 if (!linux_nat_has_pending_sigint (lp
->ptid
.lwp ()))
2332 linux_nat_debug_printf ("Clearing bogus flag for %s",
2333 lp
->ptid
.to_string ().c_str ());
2334 lp
->ignore_sigint
= 0;
2338 /* Fetch the possible triggered data watchpoint info and store it in
2341 On some archs, like x86, that use debug registers to set
2342 watchpoints, it's possible that the way to know which watched
2343 address trapped, is to check the register that is used to select
2344 which address to watch. Problem is, between setting the watchpoint
2345 and reading back which data address trapped, the user may change
2346 the set of watchpoints, and, as a consequence, GDB changes the
2347 debug registers in the inferior. To avoid reading back a stale
2348 stopped-data-address when that happens, we cache in LP the fact
2349 that a watchpoint trapped, and the corresponding data address, as
2350 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2351 registers meanwhile, we have the cached data we can rely on. */
2354 check_stopped_by_watchpoint (struct lwp_info
*lp
)
2356 scoped_restore save_inferior_ptid
= make_scoped_restore (&inferior_ptid
);
2357 inferior_ptid
= lp
->ptid
;
2359 if (linux_target
->low_stopped_by_watchpoint ())
2361 lp
->stop_reason
= TARGET_STOPPED_BY_WATCHPOINT
;
2362 lp
->stopped_data_address_p
2363 = linux_target
->low_stopped_data_address (&lp
->stopped_data_address
);
2366 return lp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
2369 /* Returns true if the LWP had stopped for a watchpoint. */
2372 linux_nat_target::stopped_by_watchpoint ()
2374 struct lwp_info
*lp
= find_lwp_pid (inferior_ptid
);
2376 gdb_assert (lp
!= NULL
);
2378 return lp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
2382 linux_nat_target::stopped_data_address (CORE_ADDR
*addr_p
)
2384 struct lwp_info
*lp
= find_lwp_pid (inferior_ptid
);
2386 gdb_assert (lp
!= NULL
);
2388 *addr_p
= lp
->stopped_data_address
;
2390 return lp
->stopped_data_address_p
;
2393 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2396 linux_nat_target::low_status_is_event (int status
)
2398 return WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
;
2401 /* Wait until LP is stopped. */
2404 stop_wait_callback (struct lwp_info
*lp
)
2406 inferior
*inf
= find_inferior_ptid (linux_target
, lp
->ptid
);
2408 /* If this is a vfork parent, bail out, it is not going to report
2409 any SIGSTOP until the vfork is done with. */
2410 if (inf
->vfork_child
!= NULL
)
2417 status
= wait_lwp (lp
);
2421 if (lp
->ignore_sigint
&& WIFSTOPPED (status
)
2422 && WSTOPSIG (status
) == SIGINT
)
2424 lp
->ignore_sigint
= 0;
2427 ptrace (PTRACE_CONT
, lp
->ptid
.lwp (), 0, 0);
2429 linux_nat_debug_printf
2430 ("PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)",
2431 lp
->ptid
.to_string ().c_str (),
2432 errno
? safe_strerror (errno
) : "OK");
2434 return stop_wait_callback (lp
);
2437 maybe_clear_ignore_sigint (lp
);
2439 if (WSTOPSIG (status
) != SIGSTOP
)
2441 /* The thread was stopped with a signal other than SIGSTOP. */
2443 linux_nat_debug_printf ("Pending event %s in %s",
2444 status_to_str ((int) status
).c_str (),
2445 lp
->ptid
.to_string ().c_str ());
2447 /* Save the sigtrap event. */
2448 lp
->status
= status
;
2449 gdb_assert (lp
->signalled
);
2450 save_stop_reason (lp
);
2454 /* We caught the SIGSTOP that we intended to catch. */
2456 linux_nat_debug_printf ("Expected SIGSTOP caught for %s.",
2457 lp
->ptid
.to_string ().c_str ());
2461 /* If we are waiting for this stop so we can report the thread
2462 stopped then we need to record this status. Otherwise, we can
2463 now discard this stop event. */
2464 if (lp
->last_resume_kind
== resume_stop
)
2466 lp
->status
= status
;
2467 save_stop_reason (lp
);
2475 /* Get the inferior associated to LWP. Must be called with an LWP that has
2476 an associated inferior. Always return non-nullptr. */
2479 lwp_inferior (const lwp_info
*lwp
)
2481 inferior
*inf
= find_inferior_ptid (linux_target
, lwp
->ptid
);
2482 gdb_assert (inf
!= nullptr);
2486 /* Return non-zero if LP has a wait status pending. Discard the
2487 pending event and resume the LWP if the event that originally
2488 caused the stop became uninteresting. */
2491 status_callback (struct lwp_info
*lp
)
2493 /* Only report a pending wait status if we pretend that this has
2494 indeed been resumed. */
2498 if (!lwp_status_pending_p (lp
))
2501 if (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
2502 || lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
)
2504 struct regcache
*regcache
= get_thread_regcache (linux_target
, lp
->ptid
);
2508 pc
= regcache_read_pc (regcache
);
2510 if (pc
!= lp
->stop_pc
)
2512 linux_nat_debug_printf ("PC of %s changed. was=%s, now=%s",
2513 lp
->ptid
.to_string ().c_str (),
2514 paddress (current_inferior ()->arch (),
2516 paddress (current_inferior ()->arch (), pc
));
2520 #if !USE_SIGTRAP_SIGINFO
2521 else if (!breakpoint_inserted_here_p (lwp_inferior (lp
)->aspace
, pc
))
2523 linux_nat_debug_printf ("previous breakpoint of %s, at %s gone",
2524 lp
->ptid
.to_string ().c_str (),
2525 paddress (current_inferior ()->arch (),
2534 linux_nat_debug_printf ("pending event of %s cancelled.",
2535 lp
->ptid
.to_string ().c_str ());
2538 linux_resume_one_lwp (lp
, lp
->step
, GDB_SIGNAL_0
);
2546 /* Count the LWP's that have had events. */
2549 count_events_callback (struct lwp_info
*lp
, int *count
)
2551 gdb_assert (count
!= NULL
);
2553 /* Select only resumed LWPs that have an event pending. */
2554 if (lp
->resumed
&& lwp_status_pending_p (lp
))
2560 /* Select the LWP (if any) that is currently being single-stepped. */
2563 select_singlestep_lwp_callback (struct lwp_info
*lp
)
2565 if (lp
->last_resume_kind
== resume_step
2572 /* Returns true if LP has a status pending. */
2575 lwp_status_pending_p (struct lwp_info
*lp
)
2577 /* We check for lp->waitstatus in addition to lp->status, because we
2578 can have pending process exits recorded in lp->status and
2579 W_EXITCODE(0,0) happens to be 0. */
2580 return lp
->status
!= 0 || lp
->waitstatus
.kind () != TARGET_WAITKIND_IGNORE
;
2583 /* Select the Nth LWP that has had an event. */
2586 select_event_lwp_callback (struct lwp_info
*lp
, int *selector
)
2588 gdb_assert (selector
!= NULL
);
2590 /* Select only resumed LWPs that have an event pending. */
2591 if (lp
->resumed
&& lwp_status_pending_p (lp
))
2592 if ((*selector
)-- == 0)
2598 /* Called when the LWP stopped for a signal/trap. If it stopped for a
2599 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2600 and save the result in the LWP's stop_reason field. If it stopped
2601 for a breakpoint, decrement the PC if necessary on the lwp's
2605 save_stop_reason (struct lwp_info
*lp
)
2607 struct regcache
*regcache
;
2608 struct gdbarch
*gdbarch
;
2611 #if USE_SIGTRAP_SIGINFO
2615 gdb_assert (lp
->stop_reason
== TARGET_STOPPED_BY_NO_REASON
);
2616 gdb_assert (lp
->status
!= 0);
2618 if (!linux_target
->low_status_is_event (lp
->status
))
2621 inferior
*inf
= lwp_inferior (lp
);
2622 if (inf
->starting_up
)
2625 regcache
= get_thread_regcache (linux_target
, lp
->ptid
);
2626 gdbarch
= regcache
->arch ();
2628 pc
= regcache_read_pc (regcache
);
2629 sw_bp_pc
= pc
- gdbarch_decr_pc_after_break (gdbarch
);
2631 #if USE_SIGTRAP_SIGINFO
2632 if (linux_nat_get_siginfo (lp
->ptid
, &siginfo
))
2634 if (siginfo
.si_signo
== SIGTRAP
)
2636 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo
.si_code
)
2637 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo
.si_code
))
2639 /* The si_code is ambiguous on this arch -- check debug
2641 if (!check_stopped_by_watchpoint (lp
))
2642 lp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
2644 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo
.si_code
))
2646 /* If we determine the LWP stopped for a SW breakpoint,
2647 trust it. Particularly don't check watchpoint
2648 registers, because, at least on s390, we'd find
2649 stopped-by-watchpoint as long as there's a watchpoint
2651 lp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
2653 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo
.si_code
))
2655 /* This can indicate either a hardware breakpoint or
2656 hardware watchpoint. Check debug registers. */
2657 if (!check_stopped_by_watchpoint (lp
))
2658 lp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
2660 else if (siginfo
.si_code
== TRAP_TRACE
)
2662 linux_nat_debug_printf ("%s stopped by trace",
2663 lp
->ptid
.to_string ().c_str ());
2665 /* We may have single stepped an instruction that
2666 triggered a watchpoint. In that case, on some
2667 architectures (such as x86), instead of TRAP_HWBKPT,
2668 si_code indicates TRAP_TRACE, and we need to check
2669 the debug registers separately. */
2670 check_stopped_by_watchpoint (lp
);
2675 if ((!lp
->step
|| lp
->stop_pc
== sw_bp_pc
)
2676 && software_breakpoint_inserted_here_p (inf
->aspace
, sw_bp_pc
))
2678 /* The LWP was either continued, or stepped a software
2679 breakpoint instruction. */
2680 lp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
2683 if (hardware_breakpoint_inserted_here_p (inf
->aspace
, pc
))
2684 lp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
2686 if (lp
->stop_reason
== TARGET_STOPPED_BY_NO_REASON
)
2687 check_stopped_by_watchpoint (lp
);
2690 if (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
)
2692 linux_nat_debug_printf ("%s stopped by software breakpoint",
2693 lp
->ptid
.to_string ().c_str ());
2695 /* Back up the PC if necessary. */
2697 regcache_write_pc (regcache
, sw_bp_pc
);
2699 /* Update this so we record the correct stop PC below. */
2702 else if (lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
)
2704 linux_nat_debug_printf ("%s stopped by hardware breakpoint",
2705 lp
->ptid
.to_string ().c_str ());
2707 else if (lp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
)
2709 linux_nat_debug_printf ("%s stopped by hardware watchpoint",
2710 lp
->ptid
.to_string ().c_str ());
2717 /* Returns true if the LWP had stopped for a software breakpoint. */
2720 linux_nat_target::stopped_by_sw_breakpoint ()
2722 struct lwp_info
*lp
= find_lwp_pid (inferior_ptid
);
2724 gdb_assert (lp
!= NULL
);
2726 return lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
;
2729 /* Implement the supports_stopped_by_sw_breakpoint method. */
2732 linux_nat_target::supports_stopped_by_sw_breakpoint ()
2734 return USE_SIGTRAP_SIGINFO
;
2737 /* Returns true if the LWP had stopped for a hardware
2738 breakpoint/watchpoint. */
2741 linux_nat_target::stopped_by_hw_breakpoint ()
2743 struct lwp_info
*lp
= find_lwp_pid (inferior_ptid
);
2745 gdb_assert (lp
!= NULL
);
2747 return lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
;
2750 /* Implement the supports_stopped_by_hw_breakpoint method. */
2753 linux_nat_target::supports_stopped_by_hw_breakpoint ()
2755 return USE_SIGTRAP_SIGINFO
;
2758 /* Select one LWP out of those that have events pending. */
2761 select_event_lwp (ptid_t filter
, struct lwp_info
**orig_lp
, int *status
)
2764 int random_selector
;
2765 struct lwp_info
*event_lp
= NULL
;
2767 /* Record the wait status for the original LWP. */
2768 (*orig_lp
)->status
= *status
;
2770 /* In all-stop, give preference to the LWP that is being
2771 single-stepped. There will be at most one, and it will be the
2772 LWP that the core is most interested in. If we didn't do this,
2773 then we'd have to handle pending step SIGTRAPs somehow in case
2774 the core later continues the previously-stepped thread, as
2775 otherwise we'd report the pending SIGTRAP then, and the core, not
2776 having stepped the thread, wouldn't understand what the trap was
2777 for, and therefore would report it to the user as a random
2779 if (!target_is_non_stop_p ())
2781 event_lp
= iterate_over_lwps (filter
, select_singlestep_lwp_callback
);
2782 if (event_lp
!= NULL
)
2784 linux_nat_debug_printf ("Select single-step %s",
2785 event_lp
->ptid
.to_string ().c_str ());
2789 if (event_lp
== NULL
)
2791 /* Pick one at random, out of those which have had events. */
2793 /* First see how many events we have. */
2794 iterate_over_lwps (filter
,
2795 [&] (struct lwp_info
*info
)
2797 return count_events_callback (info
, &num_events
);
2799 gdb_assert (num_events
> 0);
2801 /* Now randomly pick a LWP out of those that have had
2803 random_selector
= (int)
2804 ((num_events
* (double) rand ()) / (RAND_MAX
+ 1.0));
2807 linux_nat_debug_printf ("Found %d events, selecting #%d",
2808 num_events
, random_selector
);
2811 = (iterate_over_lwps
2813 [&] (struct lwp_info
*info
)
2815 return select_event_lwp_callback (info
,
2820 if (event_lp
!= NULL
)
2822 /* Switch the event LWP. */
2823 *orig_lp
= event_lp
;
2824 *status
= event_lp
->status
;
2827 /* Flush the wait status for the event LWP. */
2828 (*orig_lp
)->status
= 0;
2831 /* Return non-zero if LP has been resumed. */
2834 resumed_callback (struct lwp_info
*lp
)
2839 /* Check if we should go on and pass this event to common code.
2841 If so, save the status to the lwp_info structure associated to LWPID. */
2844 linux_nat_filter_event (int lwpid
, int status
)
2846 struct lwp_info
*lp
;
2847 int event
= linux_ptrace_get_extended_event (status
);
2849 lp
= find_lwp_pid (ptid_t (lwpid
));
2851 /* Check for events reported by anything not in our LWP list. */
2854 if (WIFSTOPPED (status
))
2856 if (WSTOPSIG (status
) == SIGTRAP
&& event
== PTRACE_EVENT_EXEC
)
2858 /* A non-leader thread exec'ed after we've seen the
2859 leader zombie, and removed it from our lists (in
2860 check_zombie_leaders). The non-leader thread changes
2861 its tid to the tgid. */
2862 linux_nat_debug_printf
2863 ("Re-adding thread group leader LWP %d after exec.",
2866 lp
= add_lwp (ptid_t (lwpid
, lwpid
));
2869 add_thread (linux_target
, lp
->ptid
);
2873 /* A process we are controlling has forked and the new
2874 child's stop was reported to us by the kernel. Save
2875 its PID and go back to waiting for the fork event to
2876 be reported - the stopped process might be returned
2877 from waitpid before or after the fork event is. */
2878 linux_nat_debug_printf
2879 ("Saving LWP %d status %s in stopped_pids list",
2880 lwpid
, status_to_str (status
).c_str ());
2881 add_to_pid_list (&stopped_pids
, lwpid
, status
);
2886 /* Don't report an event for the exit of an LWP not in our
2887 list, i.e. not part of any inferior we're debugging.
2888 This can happen if we detach from a program we originally
2889 forked and then it exits. However, note that we may have
2890 earlier deleted a leader of an inferior we're debugging,
2891 in check_zombie_leaders. Re-add it back here if so. */
2892 for (inferior
*inf
: all_inferiors (linux_target
))
2894 if (inf
->pid
== lwpid
)
2896 linux_nat_debug_printf
2897 ("Re-adding thread group leader LWP %d after exit.",
2900 lp
= add_lwp (ptid_t (lwpid
, lwpid
));
2902 add_thread (linux_target
, lp
->ptid
);
2912 /* This LWP is stopped now. (And if dead, this prevents it from
2913 ever being continued.) */
2916 if (WIFSTOPPED (status
) && lp
->must_set_ptrace_flags
)
2918 inferior
*inf
= find_inferior_pid (linux_target
, lp
->ptid
.pid ());
2919 int options
= linux_nat_ptrace_options (inf
->attach_flag
);
2921 linux_enable_event_reporting (lp
->ptid
.lwp (), options
);
2922 lp
->must_set_ptrace_flags
= 0;
2925 /* Handle GNU/Linux's syscall SIGTRAPs. */
2926 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SYSCALL_SIGTRAP
)
2928 /* No longer need the sysgood bit. The ptrace event ends up
2929 recorded in lp->waitstatus if we care for it. We can carry
2930 on handling the event like a regular SIGTRAP from here
2932 status
= W_STOPCODE (SIGTRAP
);
2933 if (linux_handle_syscall_trap (lp
, 0))
2938 /* Almost all other ptrace-stops are known to be outside of system
2939 calls, with further exceptions in linux_handle_extended_wait. */
2940 lp
->syscall_state
= TARGET_WAITKIND_IGNORE
;
2943 /* Handle GNU/Linux's extended waitstatus for trace events. */
2944 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
2945 && linux_is_extended_waitstatus (status
))
2947 linux_nat_debug_printf ("Handling extended status 0x%06x", status
);
2949 if (linux_handle_extended_wait (lp
, status
))
2953 /* Check if the thread has exited. */
2954 if (WIFEXITED (status
) || WIFSIGNALED (status
))
2956 if (!report_exit_events_for (lp
) && !is_leader (lp
))
2958 linux_nat_debug_printf ("%s exited.",
2959 lp
->ptid
.to_string ().c_str ());
2961 /* If this was not the leader exiting, then the exit signal
2962 was not the end of the debugged application and should be
2968 /* Note that even if the leader was ptrace-stopped, it can still
2969 exit, if e.g., some other thread brings down the whole
2970 process (calls `exit'). So don't assert that the lwp is
2972 linux_nat_debug_printf ("LWP %ld exited (resumed=%d)",
2973 lp
->ptid
.lwp (), lp
->resumed
);
2975 /* Dead LWP's aren't expected to reported a pending sigstop. */
2978 /* Store the pending event in the waitstatus, because
2979 W_EXITCODE(0,0) == 0. */
2980 lp
->waitstatus
= host_status_to_waitstatus (status
);
2984 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2985 an attempt to stop an LWP. */
2987 && WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGSTOP
)
2991 if (lp
->last_resume_kind
== resume_stop
)
2993 linux_nat_debug_printf ("resume_stop SIGSTOP caught for %s.",
2994 lp
->ptid
.to_string ().c_str ());
2998 /* This is a delayed SIGSTOP. Filter out the event. */
3000 linux_nat_debug_printf
3001 ("%s %s, 0, 0 (discard delayed SIGSTOP)",
3002 lp
->step
? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3003 lp
->ptid
.to_string ().c_str ());
3005 linux_resume_one_lwp (lp
, lp
->step
, GDB_SIGNAL_0
);
3006 gdb_assert (lp
->resumed
);
3011 /* Make sure we don't report a SIGINT that we have already displayed
3012 for another thread. */
3013 if (lp
->ignore_sigint
3014 && WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGINT
)
3016 linux_nat_debug_printf ("Delayed SIGINT caught for %s.",
3017 lp
->ptid
.to_string ().c_str ());
3019 /* This is a delayed SIGINT. */
3020 lp
->ignore_sigint
= 0;
3022 linux_resume_one_lwp (lp
, lp
->step
, GDB_SIGNAL_0
);
3023 linux_nat_debug_printf ("%s %s, 0, 0 (discard SIGINT)",
3024 lp
->step
? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3025 lp
->ptid
.to_string ().c_str ());
3026 gdb_assert (lp
->resumed
);
3028 /* Discard the event. */
3032 /* Don't report signals that GDB isn't interested in, such as
3033 signals that are neither printed nor stopped upon. Stopping all
3034 threads can be a bit time-consuming, so if we want decent
3035 performance with heavily multi-threaded programs, especially when
3036 they're using a high frequency timer, we'd better avoid it if we
3038 if (WIFSTOPPED (status
))
3040 enum gdb_signal signo
= gdb_signal_from_host (WSTOPSIG (status
));
3042 if (!target_is_non_stop_p ())
3044 /* Only do the below in all-stop, as we currently use SIGSTOP
3045 to implement target_stop (see linux_nat_stop) in
3047 if (signo
== GDB_SIGNAL_INT
&& signal_pass_state (signo
) == 0)
3049 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3050 forwarded to the entire process group, that is, all LWPs
3051 will receive it - unless they're using CLONE_THREAD to
3052 share signals. Since we only want to report it once, we
3053 mark it as ignored for all LWPs except this one. */
3054 iterate_over_lwps (ptid_t (lp
->ptid
.pid ()), set_ignore_sigint
);
3055 lp
->ignore_sigint
= 0;
3058 maybe_clear_ignore_sigint (lp
);
3061 /* When using hardware single-step, we need to report every signal.
3062 Otherwise, signals in pass_mask may be short-circuited
3063 except signals that might be caused by a breakpoint, or SIGSTOP
3064 if we sent the SIGSTOP and are waiting for it to arrive. */
3066 && WSTOPSIG (status
) && sigismember (&pass_mask
, WSTOPSIG (status
))
3067 && (WSTOPSIG (status
) != SIGSTOP
3068 || !linux_target
->find_thread (lp
->ptid
)->stop_requested
)
3069 && !linux_wstatus_maybe_breakpoint (status
))
3071 linux_resume_one_lwp (lp
, lp
->step
, signo
);
3072 linux_nat_debug_printf
3073 ("%s %s, %s (preempt 'handle')",
3074 lp
->step
? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3075 lp
->ptid
.to_string ().c_str (),
3076 (signo
!= GDB_SIGNAL_0
3077 ? strsignal (gdb_signal_to_host (signo
)) : "0"));
3082 /* An interesting event. */
3084 lp
->status
= status
;
3085 save_stop_reason (lp
);
3088 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3089 their exits until all other threads in the group have exited. */
3092 check_zombie_leaders (void)
3094 for (inferior
*inf
: all_inferiors ())
3096 struct lwp_info
*leader_lp
;
3101 leader_lp
= find_lwp_pid (ptid_t (inf
->pid
));
3102 if (leader_lp
!= NULL
3103 /* Check if there are other threads in the group, as we may
3104 have raced with the inferior simply exiting. Note this
3105 isn't a watertight check. If the inferior is
3106 multi-threaded and is exiting, it may be we see the
3107 leader as zombie before we reap all the non-leader
3108 threads. See comments below. */
3109 && num_lwps (inf
->pid
) > 1
3110 && linux_proc_pid_is_zombie (inf
->pid
))
3112 /* A zombie leader in a multi-threaded program can mean one
3115 #1 - Only the leader exited, not the whole program, e.g.,
3116 with pthread_exit. Since we can't reap the leader's exit
3117 status until all other threads are gone and reaped too,
3118 we want to delete the zombie leader right away, as it
3119 can't be debugged, we can't read its registers, etc.
3120 This is the main reason we check for zombie leaders
3123 #2 - The whole thread-group/process exited (a group exit,
3124 via e.g. exit(3), and there is (or will be shortly) an
3125 exit reported for each thread in the process, and then
3126 finally an exit for the leader once the non-leaders are
3129 #3 - There are 3 or more threads in the group, and a
3130 thread other than the leader exec'd. See comments on
3131 exec events at the top of the file.
3133 Ideally we would never delete the leader for case #2.
3134 Instead, we want to collect the exit status of each
3135 non-leader thread, and then finally collect the exit
3136 status of the leader as normal and use its exit code as
3137 whole-process exit code. Unfortunately, there's no
3138 race-free way to distinguish cases #1 and #2. We can't
3139 assume the exit events for the non-leaders threads are
3140 already pending in the kernel, nor can we assume the
3141 non-leader threads are in zombie state already. Between
3142 the leader becoming zombie and the non-leaders exiting
3143 and becoming zombie themselves, there's a small time
3144 window, so such a check would be racy. Temporarily
3145 pausing all threads and checking to see if all threads
3146 exit or not before re-resuming them would work in the
3147 case that all threads are running right now, but it
3148 wouldn't work if some thread is currently already
3149 ptrace-stopped, e.g., due to scheduler-locking.
3151 So what we do is we delete the leader anyhow, and then
3152 later on when we see its exit status, we re-add it back.
3153 We also make sure that we only report a whole-process
3154 exit when we see the leader exiting, as opposed to when
3155 the last LWP in the LWP list exits, which can be a
3156 non-leader if we deleted the leader here. */
3157 linux_nat_debug_printf ("Thread group leader %d zombie "
3158 "(it exited, or another thread execd), "
3161 exit_lwp (leader_lp
);
3166 /* Convenience function that is called when we're about to return an
3167 event to the core. If the event is an exit or signalled event,
3168 then this decides whether to report it as process-wide event, as a
3169 thread exit event, or to suppress it. All other event kinds are
3170 passed through unmodified. */
3173 filter_exit_event (struct lwp_info
*event_child
,
3174 struct target_waitstatus
*ourstatus
)
3176 ptid_t ptid
= event_child
->ptid
;
3178 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
3179 if a non-leader thread exits with a signal, we'd report it to the
3180 core which would interpret it as the whole-process exiting.
3181 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
3182 if (ourstatus
->kind () != TARGET_WAITKIND_EXITED
3183 && ourstatus
->kind () != TARGET_WAITKIND_SIGNALLED
)
3186 if (!is_leader (event_child
))
3188 if (report_exit_events_for (event_child
))
3190 ourstatus
->set_thread_exited (0);
3191 /* Delete lwp, but not thread_info, infrun will need it to
3192 process the event. */
3193 exit_lwp (event_child
, false);
3197 ourstatus
->set_ignore ();
3198 exit_lwp (event_child
);
3206 linux_nat_wait_1 (ptid_t ptid
, struct target_waitstatus
*ourstatus
,
3207 target_wait_flags target_options
)
3209 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT
;
3212 enum resume_kind last_resume_kind
;
3213 struct lwp_info
*lp
;
3216 /* The first time we get here after starting a new inferior, we may
3217 not have added it to the LWP list yet - this is the earliest
3218 moment at which we know its PID. */
3219 if (ptid
.is_pid () && find_lwp_pid (ptid
) == nullptr)
3221 ptid_t
lwp_ptid (ptid
.pid (), ptid
.pid ());
3223 /* Upgrade the main thread's ptid. */
3224 thread_change_ptid (linux_target
, ptid
, lwp_ptid
);
3225 lp
= add_initial_lwp (lwp_ptid
);
3229 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
3230 block_child_signals (&prev_mask
);
3232 /* First check if there is a LWP with a wait status pending. */
3233 lp
= iterate_over_lwps (ptid
, status_callback
);
3236 linux_nat_debug_printf ("Using pending wait status %s for %s.",
3237 pending_status_str (lp
).c_str (),
3238 lp
->ptid
.to_string ().c_str ());
3241 /* But if we don't find a pending event, we'll have to wait. Always
3242 pull all events out of the kernel. We'll randomly select an
3243 event LWP out of all that have events, to prevent starvation. */
3249 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3252 - If the thread group leader exits while other threads in the
3253 thread group still exist, waitpid(TGID, ...) hangs. That
3254 waitpid won't return an exit status until the other threads
3255 in the group are reaped.
3257 - When a non-leader thread execs, that thread just vanishes
3258 without reporting an exit (so we'd hang if we waited for it
3259 explicitly in that case). The exec event is reported to
3263 lwpid
= my_waitpid (-1, &status
, __WALL
| WNOHANG
);
3265 linux_nat_debug_printf ("waitpid(-1, ...) returned %d, %s",
3267 errno
? safe_strerror (errno
) : "ERRNO-OK");
3271 linux_nat_debug_printf ("waitpid %ld received %s",
3273 status_to_str (status
).c_str ());
3275 linux_nat_filter_event (lwpid
, status
);
3276 /* Retry until nothing comes out of waitpid. A single
3277 SIGCHLD can indicate more than one child stopped. */
3281 /* Now that we've pulled all events out of the kernel, resume
3282 LWPs that don't have an interesting event to report. */
3283 iterate_over_lwps (minus_one_ptid
,
3284 [] (struct lwp_info
*info
)
3286 return resume_stopped_resumed_lwps (info
, minus_one_ptid
);
3289 /* ... and find an LWP with a status to report to the core, if
3291 lp
= iterate_over_lwps (ptid
, status_callback
);
3295 /* Check for zombie thread group leaders. Those can't be reaped
3296 until all other threads in the thread group are. */
3297 check_zombie_leaders ();
3299 /* If there are no resumed children left, bail. We'd be stuck
3300 forever in the sigsuspend call below otherwise. */
3301 if (iterate_over_lwps (ptid
, resumed_callback
) == NULL
)
3303 linux_nat_debug_printf ("exit (no resumed LWP)");
3305 ourstatus
->set_no_resumed ();
3307 restore_child_signals_mask (&prev_mask
);
3308 return minus_one_ptid
;
3311 /* No interesting event to report to the core. */
3313 if (target_options
& TARGET_WNOHANG
)
3315 linux_nat_debug_printf ("no interesting events found");
3317 ourstatus
->set_ignore ();
3318 restore_child_signals_mask (&prev_mask
);
3319 return minus_one_ptid
;
3322 /* We shouldn't end up here unless we want to try again. */
3323 gdb_assert (lp
== NULL
);
3325 /* Block until we get an event reported with SIGCHLD. */
3331 status
= lp
->status
;
3334 if (!target_is_non_stop_p ())
3336 /* Now stop all other LWP's ... */
3337 iterate_over_lwps (minus_one_ptid
, stop_callback
);
3339 /* ... and wait until all of them have reported back that
3340 they're no longer running. */
3341 iterate_over_lwps (minus_one_ptid
, stop_wait_callback
);
3344 /* If we're not waiting for a specific LWP, choose an event LWP from
3345 among those that have had events. Giving equal priority to all
3346 LWPs that have had events helps prevent starvation. */
3347 if (ptid
== minus_one_ptid
|| ptid
.is_pid ())
3348 select_event_lwp (ptid
, &lp
, &status
);
3350 gdb_assert (lp
!= NULL
);
3352 /* Now that we've selected our final event LWP, un-adjust its PC if
3353 it was a software breakpoint, and we can't reliably support the
3354 "stopped by software breakpoint" stop reason. */
3355 if (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
3356 && !USE_SIGTRAP_SIGINFO
)
3358 struct regcache
*regcache
= get_thread_regcache (linux_target
, lp
->ptid
);
3359 struct gdbarch
*gdbarch
= regcache
->arch ();
3360 int decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
3366 pc
= regcache_read_pc (regcache
);
3367 regcache_write_pc (regcache
, pc
+ decr_pc
);
3371 /* We'll need this to determine whether to report a SIGSTOP as
3372 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3374 last_resume_kind
= lp
->last_resume_kind
;
3376 if (!target_is_non_stop_p ())
3378 /* In all-stop, from the core's perspective, all LWPs are now
3379 stopped until a new resume action is sent over. */
3380 iterate_over_lwps (minus_one_ptid
, resume_clear_callback
);
3384 resume_clear_callback (lp
);
3387 if (linux_target
->low_status_is_event (status
))
3389 linux_nat_debug_printf ("trap ptid is %s.",
3390 lp
->ptid
.to_string ().c_str ());
3393 if (lp
->waitstatus
.kind () != TARGET_WAITKIND_IGNORE
)
3395 *ourstatus
= lp
->waitstatus
;
3396 lp
->waitstatus
.set_ignore ();
3399 *ourstatus
= host_status_to_waitstatus (status
);
3401 linux_nat_debug_printf ("event found");
3403 restore_child_signals_mask (&prev_mask
);
3405 if (last_resume_kind
== resume_stop
3406 && ourstatus
->kind () == TARGET_WAITKIND_STOPPED
3407 && WSTOPSIG (status
) == SIGSTOP
)
3409 /* A thread that has been requested to stop by GDB with
3410 target_stop, and it stopped cleanly, so report as SIG0. The
3411 use of SIGSTOP is an implementation detail. */
3412 ourstatus
->set_stopped (GDB_SIGNAL_0
);
3415 if (ourstatus
->kind () == TARGET_WAITKIND_EXITED
3416 || ourstatus
->kind () == TARGET_WAITKIND_SIGNALLED
)
3419 lp
->core
= linux_common_core_of_thread (lp
->ptid
);
3421 return filter_exit_event (lp
, ourstatus
);
3424 /* Resume LWPs that are currently stopped without any pending status
3425 to report, but are resumed from the core's perspective. */
3428 resume_stopped_resumed_lwps (struct lwp_info
*lp
, const ptid_t wait_ptid
)
3430 inferior
*inf
= lwp_inferior (lp
);
3434 linux_nat_debug_printf ("NOT resuming LWP %s, not stopped",
3435 lp
->ptid
.to_string ().c_str ());
3437 else if (!lp
->resumed
)
3439 linux_nat_debug_printf ("NOT resuming LWP %s, not resumed",
3440 lp
->ptid
.to_string ().c_str ());
3442 else if (lwp_status_pending_p (lp
))
3444 linux_nat_debug_printf ("NOT resuming LWP %s, has pending status",
3445 lp
->ptid
.to_string ().c_str ());
3447 else if (inf
->vfork_child
!= nullptr)
3449 linux_nat_debug_printf ("NOT resuming LWP %s (vfork parent)",
3450 lp
->ptid
.to_string ().c_str ());
3454 struct regcache
*regcache
= get_thread_regcache (linux_target
, lp
->ptid
);
3455 struct gdbarch
*gdbarch
= regcache
->arch ();
3459 CORE_ADDR pc
= regcache_read_pc (regcache
);
3460 int leave_stopped
= 0;
3462 /* Don't bother if there's a breakpoint at PC that we'd hit
3463 immediately, and we're not waiting for this LWP. */
3464 if (!lp
->ptid
.matches (wait_ptid
))
3466 if (breakpoint_inserted_here_p (inf
->aspace
, pc
))
3472 linux_nat_debug_printf
3473 ("resuming stopped-resumed LWP %s at %s: step=%d",
3474 lp
->ptid
.to_string ().c_str (), paddress (gdbarch
, pc
),
3477 linux_resume_one_lwp_throw (lp
, lp
->step
, GDB_SIGNAL_0
);
3480 catch (const gdb_exception_error
&ex
)
3482 if (!check_ptrace_stopped_lwp_gone (lp
))
3491 linux_nat_target::wait (ptid_t ptid
, struct target_waitstatus
*ourstatus
,
3492 target_wait_flags target_options
)
3494 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT
;
3498 linux_nat_debug_printf ("[%s], [%s]", ptid
.to_string ().c_str (),
3499 target_options_to_string (target_options
).c_str ());
3501 /* Flush the async file first. */
3502 if (target_is_async_p ())
3503 async_file_flush ();
3505 /* Resume LWPs that are currently stopped without any pending status
3506 to report, but are resumed from the core's perspective. LWPs get
3507 in this state if we find them stopping at a time we're not
3508 interested in reporting the event (target_wait on a
3509 specific_process, for example, see linux_nat_wait_1), and
3510 meanwhile the event became uninteresting. Don't bother resuming
3511 LWPs we're not going to wait for if they'd stop immediately. */
3512 if (target_is_non_stop_p ())
3513 iterate_over_lwps (minus_one_ptid
,
3514 [=] (struct lwp_info
*info
)
3516 return resume_stopped_resumed_lwps (info
, ptid
);
3519 event_ptid
= linux_nat_wait_1 (ptid
, ourstatus
, target_options
);
3521 /* If we requested any event, and something came out, assume there
3522 may be more. If we requested a specific lwp or process, also
3523 assume there may be more. */
3524 if (target_is_async_p ()
3525 && ((ourstatus
->kind () != TARGET_WAITKIND_IGNORE
3526 && ourstatus
->kind () != TARGET_WAITKIND_NO_RESUMED
)
3527 || ptid
!= minus_one_ptid
))
3536 kill_one_lwp (pid_t pid
)
3538 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3541 kill_lwp (pid
, SIGKILL
);
3543 if (debug_linux_nat
)
3545 int save_errno
= errno
;
3547 linux_nat_debug_printf
3548 ("kill (SIGKILL) %ld, 0, 0 (%s)", (long) pid
,
3549 save_errno
!= 0 ? safe_strerror (save_errno
) : "OK");
3552 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3555 ptrace (PTRACE_KILL
, pid
, 0, 0);
3556 if (debug_linux_nat
)
3558 int save_errno
= errno
;
3560 linux_nat_debug_printf
3561 ("PTRACE_KILL %ld, 0, 0 (%s)", (long) pid
,
3562 save_errno
? safe_strerror (save_errno
) : "OK");
3566 /* Wait for an LWP to die. */
3569 kill_wait_one_lwp (pid_t pid
)
3573 /* We must make sure that there are no pending events (delayed
3574 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3575 program doesn't interfere with any following debugging session. */
3579 res
= my_waitpid (pid
, NULL
, __WALL
);
3580 if (res
!= (pid_t
) -1)
3582 linux_nat_debug_printf ("wait %ld received unknown.", (long) pid
);
3584 /* The Linux kernel sometimes fails to kill a thread
3585 completely after PTRACE_KILL; that goes from the stop
3586 point in do_fork out to the one in get_signal_to_deliver
3587 and waits again. So kill it again. */
3593 gdb_assert (res
== -1 && errno
== ECHILD
);
3596 /* Callback for iterate_over_lwps. */
3599 kill_callback (struct lwp_info
*lp
)
3601 kill_one_lwp (lp
->ptid
.lwp ());
3605 /* Callback for iterate_over_lwps. */
3608 kill_wait_callback (struct lwp_info
*lp
)
3610 kill_wait_one_lwp (lp
->ptid
.lwp ());
3614 /* Kill the fork/clone child of LP if it has an unfollowed child. */
3617 kill_unfollowed_child_callback (lwp_info
*lp
)
3619 std::optional
<target_waitstatus
> ws
= get_pending_child_status (lp
);
3620 if (ws
.has_value ())
3622 ptid_t child_ptid
= ws
->child_ptid ();
3623 int child_pid
= child_ptid
.pid ();
3624 int child_lwp
= child_ptid
.lwp ();
3626 kill_one_lwp (child_lwp
);
3627 kill_wait_one_lwp (child_lwp
);
3629 /* Let the arch-specific native code know this process is
3631 if (ws
->kind () != TARGET_WAITKIND_THREAD_CLONED
)
3632 linux_target
->low_forget_process (child_pid
);
3639 linux_nat_target::kill ()
3641 ptid_t
pid_ptid (inferior_ptid
.pid ());
3643 /* If we're stopped while forking/cloning and we haven't followed
3644 yet, kill the child task. We need to do this first because the
3645 parent will be sleeping if this is a vfork. */
3646 iterate_over_lwps (pid_ptid
, kill_unfollowed_child_callback
);
3648 if (forks_exist_p ())
3649 linux_fork_killall ();
3652 /* Stop all threads before killing them, since ptrace requires
3653 that the thread is stopped to successfully PTRACE_KILL. */
3654 iterate_over_lwps (pid_ptid
, stop_callback
);
3655 /* ... and wait until all of them have reported back that
3656 they're no longer running. */
3657 iterate_over_lwps (pid_ptid
, stop_wait_callback
);
3659 /* Kill all LWP's ... */
3660 iterate_over_lwps (pid_ptid
, kill_callback
);
3662 /* ... and wait until we've flushed all events. */
3663 iterate_over_lwps (pid_ptid
, kill_wait_callback
);
3666 target_mourn_inferior (inferior_ptid
);
3670 linux_nat_target::mourn_inferior ()
3672 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT
;
3674 int pid
= inferior_ptid
.pid ();
3676 purge_lwp_list (pid
);
3678 close_proc_mem_file (pid
);
3680 if (! forks_exist_p ())
3681 /* Normal case, no other forks available. */
3682 inf_ptrace_target::mourn_inferior ();
3684 /* Multi-fork case. The current inferior_ptid has exited, but
3685 there are other viable forks to debug. Delete the exiting
3686 one and context-switch to the first available. */
3687 linux_fork_mourn_inferior ();
3689 /* Let the arch-specific native code know this process is gone. */
3690 linux_target
->low_forget_process (pid
);
3693 /* Convert a native/host siginfo object, into/from the siginfo in the
3694 layout of the inferiors' architecture. */
3697 siginfo_fixup (siginfo_t
*siginfo
, gdb_byte
*inf_siginfo
, int direction
)
3699 /* If the low target didn't do anything, then just do a straight
3701 if (!linux_target
->low_siginfo_fixup (siginfo
, inf_siginfo
, direction
))
3704 memcpy (siginfo
, inf_siginfo
, sizeof (siginfo_t
));
3706 memcpy (inf_siginfo
, siginfo
, sizeof (siginfo_t
));
3710 static enum target_xfer_status
3711 linux_xfer_siginfo (ptid_t ptid
, enum target_object object
,
3712 const char *annex
, gdb_byte
*readbuf
,
3713 const gdb_byte
*writebuf
, ULONGEST offset
, ULONGEST len
,
3714 ULONGEST
*xfered_len
)
3717 gdb_byte inf_siginfo
[sizeof (siginfo_t
)];
3719 gdb_assert (object
== TARGET_OBJECT_SIGNAL_INFO
);
3720 gdb_assert (readbuf
|| writebuf
);
3722 if (offset
> sizeof (siginfo
))
3723 return TARGET_XFER_E_IO
;
3725 if (!linux_nat_get_siginfo (ptid
, &siginfo
))
3726 return TARGET_XFER_E_IO
;
3728 /* When GDB is built as a 64-bit application, ptrace writes into
3729 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3730 inferior with a 64-bit GDB should look the same as debugging it
3731 with a 32-bit GDB, we need to convert it. GDB core always sees
3732 the converted layout, so any read/write will have to be done
3734 siginfo_fixup (&siginfo
, inf_siginfo
, 0);
3736 if (offset
+ len
> sizeof (siginfo
))
3737 len
= sizeof (siginfo
) - offset
;
3739 if (readbuf
!= NULL
)
3740 memcpy (readbuf
, inf_siginfo
+ offset
, len
);
3743 memcpy (inf_siginfo
+ offset
, writebuf
, len
);
3745 /* Convert back to ptrace layout before flushing it out. */
3746 siginfo_fixup (&siginfo
, inf_siginfo
, 1);
3748 int pid
= get_ptrace_pid (ptid
);
3750 ptrace (PTRACE_SETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
);
3752 return TARGET_XFER_E_IO
;
3756 return TARGET_XFER_OK
;
3759 static enum target_xfer_status
3760 linux_nat_xfer_osdata (enum target_object object
,
3761 const char *annex
, gdb_byte
*readbuf
,
3762 const gdb_byte
*writebuf
, ULONGEST offset
, ULONGEST len
,
3763 ULONGEST
*xfered_len
);
3765 static enum target_xfer_status
3766 linux_proc_xfer_memory_partial (int pid
, gdb_byte
*readbuf
,
3767 const gdb_byte
*writebuf
, ULONGEST offset
,
3768 LONGEST len
, ULONGEST
*xfered_len
);
3770 enum target_xfer_status
3771 linux_nat_target::xfer_partial (enum target_object object
,
3772 const char *annex
, gdb_byte
*readbuf
,
3773 const gdb_byte
*writebuf
,
3774 ULONGEST offset
, ULONGEST len
, ULONGEST
*xfered_len
)
3776 if (object
== TARGET_OBJECT_SIGNAL_INFO
)
3777 return linux_xfer_siginfo (inferior_ptid
, object
, annex
, readbuf
, writebuf
,
3778 offset
, len
, xfered_len
);
3780 /* The target is connected but no live inferior is selected. Pass
3781 this request down to a lower stratum (e.g., the executable
3783 if (object
== TARGET_OBJECT_MEMORY
&& inferior_ptid
== null_ptid
)
3784 return TARGET_XFER_EOF
;
3786 if (object
== TARGET_OBJECT_AUXV
)
3787 return memory_xfer_auxv (this, object
, annex
, readbuf
, writebuf
,
3788 offset
, len
, xfered_len
);
3790 if (object
== TARGET_OBJECT_OSDATA
)
3791 return linux_nat_xfer_osdata (object
, annex
, readbuf
, writebuf
,
3792 offset
, len
, xfered_len
);
3794 if (object
== TARGET_OBJECT_MEMORY
)
3796 /* GDB calculates all addresses in the largest possible address
3797 width. The address width must be masked before its final use
3798 by linux_proc_xfer_partial.
3800 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
3801 int addr_bit
= gdbarch_addr_bit (current_inferior ()->arch ());
3803 if (addr_bit
< (sizeof (ULONGEST
) * HOST_CHAR_BIT
))
3804 offset
&= ((ULONGEST
) 1 << addr_bit
) - 1;
3806 /* If /proc/pid/mem is writable, don't fallback to ptrace. If
3807 the write via /proc/pid/mem fails because the inferior execed
3808 (and we haven't seen the exec event yet), a subsequent ptrace
3809 poke would incorrectly write memory to the post-exec address
3810 space, while the core was trying to write to the pre-exec
3812 if (proc_mem_file_is_writable ())
3813 return linux_proc_xfer_memory_partial (inferior_ptid
.pid (), readbuf
,
3814 writebuf
, offset
, len
,
3818 return inf_ptrace_target::xfer_partial (object
, annex
, readbuf
, writebuf
,
3819 offset
, len
, xfered_len
);
3823 linux_nat_target::thread_alive (ptid_t ptid
)
3825 /* As long as a PTID is in lwp list, consider it alive. */
3826 return find_lwp_pid (ptid
) != NULL
;
3829 /* Implement the to_update_thread_list target method for this
3833 linux_nat_target::update_thread_list ()
3835 /* We add/delete threads from the list as clone/exit events are
3836 processed, so just try deleting exited threads still in the
3838 delete_exited_threads ();
3840 /* Update the processor core that each lwp/thread was last seen
3842 for (lwp_info
*lwp
: all_lwps ())
3844 /* Avoid accessing /proc if the thread hasn't run since we last
3845 time we fetched the thread's core. Accessing /proc becomes
3846 noticeably expensive when we have thousands of LWPs. */
3847 if (lwp
->core
== -1)
3848 lwp
->core
= linux_common_core_of_thread (lwp
->ptid
);
3853 linux_nat_target::pid_to_str (ptid_t ptid
)
3856 && (ptid
.pid () != ptid
.lwp ()
3857 || num_lwps (ptid
.pid ()) > 1))
3858 return string_printf ("LWP %ld", ptid
.lwp ());
3860 return normal_pid_to_str (ptid
);
3864 linux_nat_target::thread_name (struct thread_info
*thr
)
3866 return linux_proc_tid_get_name (thr
->ptid
);
3869 /* Accepts an integer PID; Returns a string representing a file that
3870 can be opened to get the symbols for the child process. */
3873 linux_nat_target::pid_to_exec_file (int pid
)
3875 return linux_proc_pid_to_exec_file (pid
);
3878 /* Object representing an /proc/PID/mem open file. We keep one such
3879 file open per inferior.
3881 It might be tempting to think about only ever opening one file at
3882 most for all inferiors, closing/reopening the file as we access
3883 memory of different inferiors, to minimize number of file
3884 descriptors open, which can otherwise run into resource limits.
3885 However, that does not work correctly -- if the inferior execs and
3886 we haven't processed the exec event yet, and, we opened a
3887 /proc/PID/mem file, we will get a mem file accessing the post-exec
3888 address space, thinking we're opening it for the pre-exec address
3889 space. That is dangerous as we can poke memory (e.g. clearing
3890 breakpoints) in the post-exec memory by mistake, corrupting the
3891 inferior. For that reason, we open the mem file as early as
3892 possible, right after spawning, forking or attaching to the
3893 inferior, when the inferior is stopped and thus before it has a
3896 Note that after opening the file, even if the thread we opened it
3897 for subsequently exits, the open file is still usable for accessing
3898 memory. It's only when the whole process exits or execs that the
3899 file becomes invalid, at which point reads/writes return EOF. */
3904 proc_mem_file (ptid_t ptid
, int fd
)
3905 : m_ptid (ptid
), m_fd (fd
)
3907 gdb_assert (m_fd
!= -1);
3912 linux_nat_debug_printf ("closing fd %d for /proc/%d/task/%ld/mem",
3913 m_fd
, m_ptid
.pid (), m_ptid
.lwp ());
3917 DISABLE_COPY_AND_ASSIGN (proc_mem_file
);
3925 /* The LWP this file was opened for. Just for debugging
3929 /* The file descriptor. */
3933 /* The map between an inferior process id, and the open /proc/PID/mem
3934 file. This is stored in a map instead of in a per-inferior
3935 structure because we need to be able to access memory of processes
3936 which don't have a corresponding struct inferior object. E.g.,
3937 with "detach-on-fork on" (the default), and "follow-fork parent"
3938 (also default), we don't create an inferior for the fork child, but
3939 we still need to remove breakpoints from the fork child's
3941 static std::unordered_map
<int, proc_mem_file
> proc_mem_file_map
;
3943 /* Close the /proc/PID/mem file for PID. */
3946 close_proc_mem_file (pid_t pid
)
3948 proc_mem_file_map
.erase (pid
);
3951 /* Open the /proc/PID/mem file for the process (thread group) of PTID.
3952 We actually open /proc/PID/task/LWP/mem, as that's the LWP we know
3953 exists and is stopped right now. We prefer the
3954 /proc/PID/task/LWP/mem form over /proc/LWP/mem to avoid tid-reuse
3955 races, just in case this is ever called on an already-waited
3959 open_proc_mem_file (ptid_t ptid
)
3961 auto iter
= proc_mem_file_map
.find (ptid
.pid ());
3962 gdb_assert (iter
== proc_mem_file_map
.end ());
3965 xsnprintf (filename
, sizeof filename
,
3966 "/proc/%d/task/%ld/mem", ptid
.pid (), ptid
.lwp ());
3968 int fd
= gdb_open_cloexec (filename
, O_RDWR
| O_LARGEFILE
, 0).release ();
3972 warning (_("opening /proc/PID/mem file for lwp %d.%ld failed: %s (%d)"),
3973 ptid
.pid (), ptid
.lwp (),
3974 safe_strerror (errno
), errno
);
3978 proc_mem_file_map
.emplace (std::piecewise_construct
,
3979 std::forward_as_tuple (ptid
.pid ()),
3980 std::forward_as_tuple (ptid
, fd
));
3982 linux_nat_debug_printf ("opened fd %d for lwp %d.%ld",
3983 fd
, ptid
.pid (), ptid
.lwp ());
3986 /* Helper for linux_proc_xfer_memory_partial and
3987 proc_mem_file_is_writable. FD is the already opened /proc/pid/mem
3988 file, and PID is the pid of the corresponding process. The rest of
3989 the arguments are like linux_proc_xfer_memory_partial's. */
3991 static enum target_xfer_status
3992 linux_proc_xfer_memory_partial_fd (int fd
, int pid
,
3993 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
3994 ULONGEST offset
, LONGEST len
,
3995 ULONGEST
*xfered_len
)
3999 gdb_assert (fd
!= -1);
4001 /* Use pread64/pwrite64 if available, since they save a syscall and
4002 can handle 64-bit offsets even on 32-bit platforms (for instance,
4003 SPARC debugging a SPARC64 application). But only use them if the
4004 offset isn't so high that when cast to off_t it'd be negative, as
4005 seen on SPARC64. pread64/pwrite64 outright reject such offsets.
4008 if ((off_t
) offset
>= 0)
4009 ret
= (readbuf
!= nullptr
4010 ? pread64 (fd
, readbuf
, len
, offset
)
4011 : pwrite64 (fd
, writebuf
, len
, offset
));
4015 ret
= lseek (fd
, offset
, SEEK_SET
);
4017 ret
= (readbuf
!= nullptr
4018 ? read (fd
, readbuf
, len
)
4019 : write (fd
, writebuf
, len
));
4024 linux_nat_debug_printf ("accessing fd %d for pid %d failed: %s (%d)",
4025 fd
, pid
, safe_strerror (errno
), errno
);
4026 return TARGET_XFER_E_IO
;
4030 /* EOF means the address space is gone, the whole process exited
4032 linux_nat_debug_printf ("accessing fd %d for pid %d got EOF",
4034 return TARGET_XFER_EOF
;
4039 return TARGET_XFER_OK
;
4043 /* Implement the to_xfer_partial target method using /proc/PID/mem.
4044 Because we can use a single read/write call, this can be much more
4045 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
4046 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running
4049 static enum target_xfer_status
4050 linux_proc_xfer_memory_partial (int pid
, gdb_byte
*readbuf
,
4051 const gdb_byte
*writebuf
, ULONGEST offset
,
4052 LONGEST len
, ULONGEST
*xfered_len
)
4054 auto iter
= proc_mem_file_map
.find (pid
);
4055 if (iter
== proc_mem_file_map
.end ())
4056 return TARGET_XFER_EOF
;
4058 int fd
= iter
->second
.fd ();
4060 return linux_proc_xfer_memory_partial_fd (fd
, pid
, readbuf
, writebuf
, offset
,
4064 /* Check whether /proc/pid/mem is writable in the current kernel, and
4065 return true if so. It wasn't writable before Linux 2.6.39, but
4066 there's no way to know whether the feature was backported to older
4067 kernels. So we check to see if it works. The result is cached,
4068 and this is guaranteed to be called once early during inferior
4069 startup, so that any warning is printed out consistently between
4070 GDB invocations. Note we don't call it during GDB startup instead
4071 though, because then we might warn with e.g. just "gdb --version"
4072 on sandboxed systems. See PR gdb/29907. */
4075 proc_mem_file_is_writable ()
4077 static std::optional
<bool> writable
;
4079 if (writable
.has_value ())
4082 writable
.emplace (false);
4084 /* We check whether /proc/pid/mem is writable by trying to write to
4085 one of our variables via /proc/self/mem. */
4087 int fd
= gdb_open_cloexec ("/proc/self/mem", O_RDWR
| O_LARGEFILE
, 0).release ();
4091 warning (_("opening /proc/self/mem file failed: %s (%d)"),
4092 safe_strerror (errno
), errno
);
4096 SCOPE_EXIT
{ close (fd
); };
4098 /* This is the variable we try to write to. Note OFFSET below. */
4099 volatile gdb_byte test_var
= 0;
4101 gdb_byte writebuf
[] = {0x55};
4102 ULONGEST offset
= (uintptr_t) &test_var
;
4103 ULONGEST xfered_len
;
4105 enum target_xfer_status res
4106 = linux_proc_xfer_memory_partial_fd (fd
, getpid (), nullptr, writebuf
,
4107 offset
, 1, &xfered_len
);
4109 if (res
== TARGET_XFER_OK
)
4111 gdb_assert (xfered_len
== 1);
4112 gdb_assert (test_var
== 0x55);
4120 /* Parse LINE as a signal set and add its set bits to SIGS. */
4123 add_line_to_sigset (const char *line
, sigset_t
*sigs
)
4125 int len
= strlen (line
) - 1;
4129 if (line
[len
] != '\n')
4130 error (_("Could not parse signal set: %s"), line
);
4138 if (*p
>= '0' && *p
<= '9')
4140 else if (*p
>= 'a' && *p
<= 'f')
4141 digit
= *p
- 'a' + 10;
4143 error (_("Could not parse signal set: %s"), line
);
4148 sigaddset (sigs
, signum
+ 1);
4150 sigaddset (sigs
, signum
+ 2);
4152 sigaddset (sigs
, signum
+ 3);
4154 sigaddset (sigs
, signum
+ 4);
4160 /* Find process PID's pending signals from /proc/pid/status and set
4164 linux_proc_pending_signals (int pid
, sigset_t
*pending
,
4165 sigset_t
*blocked
, sigset_t
*ignored
)
4167 char buffer
[PATH_MAX
], fname
[PATH_MAX
];
4169 sigemptyset (pending
);
4170 sigemptyset (blocked
);
4171 sigemptyset (ignored
);
4172 xsnprintf (fname
, sizeof fname
, "/proc/%d/status", pid
);
4173 gdb_file_up procfile
= gdb_fopen_cloexec (fname
, "r");
4174 if (procfile
== NULL
)
4175 error (_("Could not open %s"), fname
);
4177 while (fgets (buffer
, PATH_MAX
, procfile
.get ()) != NULL
)
4179 /* Normal queued signals are on the SigPnd line in the status
4180 file. However, 2.6 kernels also have a "shared" pending
4181 queue for delivering signals to a thread group, so check for
4184 Unfortunately some Red Hat kernels include the shared pending
4185 queue but not the ShdPnd status field. */
4187 if (startswith (buffer
, "SigPnd:\t"))
4188 add_line_to_sigset (buffer
+ 8, pending
);
4189 else if (startswith (buffer
, "ShdPnd:\t"))
4190 add_line_to_sigset (buffer
+ 8, pending
);
4191 else if (startswith (buffer
, "SigBlk:\t"))
4192 add_line_to_sigset (buffer
+ 8, blocked
);
4193 else if (startswith (buffer
, "SigIgn:\t"))
4194 add_line_to_sigset (buffer
+ 8, ignored
);
4198 static enum target_xfer_status
4199 linux_nat_xfer_osdata (enum target_object object
,
4200 const char *annex
, gdb_byte
*readbuf
,
4201 const gdb_byte
*writebuf
, ULONGEST offset
, ULONGEST len
,
4202 ULONGEST
*xfered_len
)
4204 gdb_assert (object
== TARGET_OBJECT_OSDATA
);
4206 *xfered_len
= linux_common_xfer_osdata (annex
, readbuf
, offset
, len
);
4207 if (*xfered_len
== 0)
4208 return TARGET_XFER_EOF
;
4210 return TARGET_XFER_OK
;
4213 std::vector
<static_tracepoint_marker
>
4214 linux_nat_target::static_tracepoint_markers_by_strid (const char *strid
)
4216 char s
[IPA_CMD_BUF_SIZE
];
4217 int pid
= inferior_ptid
.pid ();
4218 std::vector
<static_tracepoint_marker
> markers
;
4220 ptid_t ptid
= ptid_t (pid
, 0);
4221 static_tracepoint_marker marker
;
4226 strcpy (s
, "qTfSTM");
4227 agent_run_command (pid
, s
, strlen (s
) + 1);
4230 SCOPE_EXIT
{ target_continue_no_signal (ptid
); };
4236 parse_static_tracepoint_marker_definition (p
, &p
, &marker
);
4238 if (strid
== NULL
|| marker
.str_id
== strid
)
4239 markers
.push_back (std::move (marker
));
4241 while (*p
++ == ','); /* comma-separated list */
4243 strcpy (s
, "qTsSTM");
4244 agent_run_command (pid
, s
, strlen (s
) + 1);
4251 /* target_can_async_p implementation. */
4254 linux_nat_target::can_async_p ()
4256 /* This flag should be checked in the common target.c code. */
4257 gdb_assert (target_async_permitted
);
4259 /* Otherwise, this targets is always able to support async mode. */
4264 linux_nat_target::supports_non_stop ()
4269 /* to_always_non_stop_p implementation. */
4272 linux_nat_target::always_non_stop_p ()
4278 linux_nat_target::supports_multi_process ()
4284 linux_nat_target::supports_disable_randomization ()
4289 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4290 so we notice when any child changes state, and notify the
4291 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4292 above to wait for the arrival of a SIGCHLD. */
4295 sigchld_handler (int signo
)
4297 int old_errno
= errno
;
4299 if (debug_linux_nat
)
4300 gdb_stdlog
->write_async_safe ("sigchld\n", sizeof ("sigchld\n") - 1);
4302 if (signo
== SIGCHLD
)
4304 /* Let the event loop know that there are events to handle. */
4305 linux_nat_target::async_file_mark_if_open ();
4311 /* Callback registered with the target events file descriptor. */
4314 handle_target_event (int error
, gdb_client_data client_data
)
4316 inferior_event_handler (INF_REG_EVENT
);
4319 /* target_async implementation. */
4322 linux_nat_target::async (bool enable
)
4324 if (enable
== is_async_p ())
4327 /* Block child signals while we create/destroy the pipe, as their
4328 handler writes to it. */
4329 gdb::block_signals blocker
;
4333 if (!async_file_open ())
4334 internal_error ("creating event pipe failed.");
4336 add_file_handler (async_wait_fd (), handle_target_event
, NULL
,
4339 /* There may be pending events to handle. Tell the event loop
4345 delete_file_handler (async_wait_fd ());
4346 async_file_close ();
4350 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4354 linux_nat_stop_lwp (struct lwp_info
*lwp
)
4358 linux_nat_debug_printf ("running -> suspending %s",
4359 lwp
->ptid
.to_string ().c_str ());
4362 if (lwp
->last_resume_kind
== resume_stop
)
4364 linux_nat_debug_printf ("already stopping LWP %ld at GDB's request",
4369 stop_callback (lwp
);
4370 lwp
->last_resume_kind
= resume_stop
;
4374 /* Already known to be stopped; do nothing. */
4376 if (debug_linux_nat
)
4378 if (linux_target
->find_thread (lwp
->ptid
)->stop_requested
)
4379 linux_nat_debug_printf ("already stopped/stop_requested %s",
4380 lwp
->ptid
.to_string ().c_str ());
4382 linux_nat_debug_printf ("already stopped/no stop_requested yet %s",
4383 lwp
->ptid
.to_string ().c_str ());
4390 linux_nat_target::stop (ptid_t ptid
)
4392 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT
;
4393 iterate_over_lwps (ptid
, linux_nat_stop_lwp
);
4396 /* Return the cached value of the processor core for thread PTID. */
4399 linux_nat_target::core_of_thread (ptid_t ptid
)
4401 struct lwp_info
*info
= find_lwp_pid (ptid
);
4408 /* Implementation of to_filesystem_is_local. */
4411 linux_nat_target::filesystem_is_local ()
4413 struct inferior
*inf
= current_inferior ();
4415 if (inf
->fake_pid_p
|| inf
->pid
== 0)
4418 return linux_ns_same (inf
->pid
, LINUX_NS_MNT
);
4421 /* Convert the INF argument passed to a to_fileio_* method
4422 to a process ID suitable for passing to its corresponding
4423 linux_mntns_* function. If INF is non-NULL then the
4424 caller is requesting the filesystem seen by INF. If INF
4425 is NULL then the caller is requesting the filesystem seen
4426 by the GDB. We fall back to GDB's filesystem in the case
4427 that INF is non-NULL but its PID is unknown. */
4430 linux_nat_fileio_pid_of (struct inferior
*inf
)
4432 if (inf
== NULL
|| inf
->fake_pid_p
|| inf
->pid
== 0)
4438 /* Implementation of to_fileio_open. */
4441 linux_nat_target::fileio_open (struct inferior
*inf
, const char *filename
,
4442 int flags
, int mode
, int warn_if_slow
,
4443 fileio_error
*target_errno
)
4449 if (fileio_to_host_openflags (flags
, &nat_flags
) == -1
4450 || fileio_to_host_mode (mode
, &nat_mode
) == -1)
4452 *target_errno
= FILEIO_EINVAL
;
4456 fd
= linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf
),
4457 filename
, nat_flags
, nat_mode
);
4459 *target_errno
= host_to_fileio_error (errno
);
4464 /* Implementation of to_fileio_readlink. */
4466 std::optional
<std::string
>
4467 linux_nat_target::fileio_readlink (struct inferior
*inf
, const char *filename
,
4468 fileio_error
*target_errno
)
4473 len
= linux_mntns_readlink (linux_nat_fileio_pid_of (inf
),
4474 filename
, buf
, sizeof (buf
));
4477 *target_errno
= host_to_fileio_error (errno
);
4481 return std::string (buf
, len
);
4484 /* Implementation of to_fileio_unlink. */
4487 linux_nat_target::fileio_unlink (struct inferior
*inf
, const char *filename
,
4488 fileio_error
*target_errno
)
4492 ret
= linux_mntns_unlink (linux_nat_fileio_pid_of (inf
),
4495 *target_errno
= host_to_fileio_error (errno
);
4500 /* Implementation of the to_thread_events method. */
4503 linux_nat_target::thread_events (int enable
)
4505 report_thread_events
= enable
;
4509 linux_nat_target::supports_set_thread_options (gdb_thread_options options
)
4511 constexpr gdb_thread_options supported_options
4512 = GDB_THREAD_OPTION_CLONE
| GDB_THREAD_OPTION_EXIT
;
4513 return ((options
& supported_options
) == options
);
4516 linux_nat_target::linux_nat_target ()
4518 /* We don't change the stratum; this target will sit at
4519 process_stratum and thread_db will set at thread_stratum. This
4520 is a little strange, since this is a multi-threaded-capable
4521 target, but we want to be on the stack below thread_db, and we
4522 also want to be used for single-threaded processes. */
4525 /* See linux-nat.h. */
4528 linux_nat_get_siginfo (ptid_t ptid
, siginfo_t
*siginfo
)
4530 int pid
= get_ptrace_pid (ptid
);
4531 return ptrace (PTRACE_GETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, siginfo
) == 0;
4534 /* See nat/linux-nat.h. */
4537 current_lwp_ptid (void)
4539 gdb_assert (inferior_ptid
.lwp_p ());
4540 return inferior_ptid
;
4543 /* Implement 'maintenance info linux-lwps'. Displays some basic
4544 information about all the current lwp_info objects. */
4547 maintenance_info_lwps (const char *arg
, int from_tty
)
4549 if (all_lwps ().size () == 0)
4551 gdb_printf ("No Linux LWPs\n");
4555 /* Start the width at 8 to match the column heading below, then
4556 figure out the widest ptid string. We'll use this to build our
4557 output table below. */
4558 size_t ptid_width
= 8;
4559 for (lwp_info
*lp
: all_lwps ())
4560 ptid_width
= std::max (ptid_width
, lp
->ptid
.to_string ().size ());
4562 /* Setup the table headers. */
4563 struct ui_out
*uiout
= current_uiout
;
4564 ui_out_emit_table
table_emitter (uiout
, 2, -1, "linux-lwps");
4565 uiout
->table_header (ptid_width
, ui_left
, "lwp-ptid", _("LWP Ptid"));
4566 uiout
->table_header (9, ui_left
, "thread-info", _("Thread ID"));
4567 uiout
->table_body ();
4569 /* Display one table row for each lwp_info. */
4570 for (lwp_info
*lp
: all_lwps ())
4572 ui_out_emit_tuple
tuple_emitter (uiout
, "lwp-entry");
4574 thread_info
*th
= linux_target
->find_thread (lp
->ptid
);
4576 uiout
->field_string ("lwp-ptid", lp
->ptid
.to_string ().c_str ());
4578 uiout
->field_string ("thread-info", "None");
4580 uiout
->field_string ("thread-info", print_full_thread_id (th
));
4582 uiout
->message ("\n");
4586 void _initialize_linux_nat ();
4588 _initialize_linux_nat ()
4590 add_setshow_boolean_cmd ("linux-nat", class_maintenance
,
4591 &debug_linux_nat
, _("\
4592 Set debugging of GNU/Linux native target."), _(" \
4593 Show debugging of GNU/Linux native target."), _(" \
4594 When on, print debug messages relating to the GNU/Linux native target."),
4596 show_debug_linux_nat
,
4597 &setdebuglist
, &showdebuglist
);
4599 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance
,
4600 &debug_linux_namespaces
, _("\
4601 Set debugging of GNU/Linux namespaces module."), _("\
4602 Show debugging of GNU/Linux namespaces module."), _("\
4603 Enables printf debugging output."),
4606 &setdebuglist
, &showdebuglist
);
4608 /* Install a SIGCHLD handler. */
4609 sigchld_action
.sa_handler
= sigchld_handler
;
4610 sigemptyset (&sigchld_action
.sa_mask
);
4611 sigchld_action
.sa_flags
= SA_RESTART
;
4613 /* Make it the default. */
4614 sigaction (SIGCHLD
, &sigchld_action
, NULL
);
4616 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4617 gdb_sigmask (SIG_SETMASK
, NULL
, &suspend_mask
);
4618 sigdelset (&suspend_mask
, SIGCHLD
);
4620 sigemptyset (&blocked_mask
);
4622 lwp_lwpid_htab_create ();
4624 add_cmd ("linux-lwps", class_maintenance
, maintenance_info_lwps
,
4625 _("List the Linux LWPS."), &maintenanceinfolist
);
4629 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4630 the GNU/Linux Threads library and therefore doesn't really belong
4633 /* NPTL reserves the first two RT signals, but does not provide any
4634 way for the debugger to query the signal numbers - fortunately
4635 they don't change. */
4636 static int lin_thread_signals
[] = { __SIGRTMIN
, __SIGRTMIN
+ 1 };
4638 /* See linux-nat.h. */
4641 lin_thread_get_thread_signal_num (void)
4643 return sizeof (lin_thread_signals
) / sizeof (lin_thread_signals
[0]);
4646 /* See linux-nat.h. */
4649 lin_thread_get_thread_signal (unsigned int i
)
4651 gdb_assert (i
< lin_thread_get_thread_signal_num ());
4652 return lin_thread_signals
[i
];