1 /* Target-struct-independent code to start (run) and stop an inferior
4 Copyright (C) 1986-2025 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 #include "cli/cli-cmds.h"
22 #include "cli/cli-style.h"
23 #include "displaced-stepping.h"
26 #include "exceptions.h"
30 #include "breakpoint.h"
33 #include "target-connection.h"
34 #include "gdbthread.h"
42 #include "observable.h"
47 #include "mi/mi-common.h"
48 #include "event-top.h"
50 #include "record-full.h"
51 #include "inline-frame.h"
53 #include "tracepoint.h"
57 #include "completer.h"
58 #include "target-descriptions.h"
59 #include "target-dcache.h"
61 #include "gdbsupport/event-loop.h"
62 #include "thread-fsm.h"
63 #include "gdbsupport/enum-flags.h"
64 #include "progspace-and-thread.h"
66 #include "arch-utils.h"
67 #include "gdbsupport/scope-exit.h"
68 #include "gdbsupport/forward-scope-exit.h"
69 #include "gdbsupport/gdb_select.h"
70 #include <unordered_map>
71 #include "async-event.h"
72 #include "gdbsupport/selftest.h"
73 #include "scoped-mock-context.h"
74 #include "test-target.h"
75 #include "gdbsupport/common-debug.h"
76 #include "gdbsupport/buildargv.h"
77 #include "extension.h"
81 /* Prototypes for local functions */
83 static void sig_print_info (enum gdb_signal
);
85 static void sig_print_header (void);
87 static void follow_inferior_reset_breakpoints (void);
89 static bool currently_stepping (struct thread_info
*tp
);
91 static void insert_hp_step_resume_breakpoint_at_frame (const frame_info_ptr
&);
93 static void insert_step_resume_breakpoint_at_caller (const frame_info_ptr
&);
95 static void insert_longjmp_resume_breakpoint (struct gdbarch
*, CORE_ADDR
);
97 static bool maybe_software_singlestep (struct gdbarch
*gdbarch
);
99 static void resume (gdb_signal sig
);
101 static void wait_for_inferior (inferior
*inf
);
103 static void restart_threads (struct thread_info
*event_thread
,
104 inferior
*inf
= nullptr);
106 static bool start_step_over (void);
108 static bool step_over_info_valid_p (void);
110 static bool schedlock_applies (struct thread_info
*tp
);
112 /* Asynchronous signal handler registered as event loop source for
113 when we have pending events ready to be passed to the core. */
114 static struct async_event_handler
*infrun_async_inferior_event_token
;
116 /* Stores whether infrun_async was previously enabled or disabled.
117 Starts off as -1, indicating "never enabled/disabled". */
118 static int infrun_is_async
= -1;
119 static CORE_ADDR
update_line_range_start (CORE_ADDR pc
,
120 struct execution_control_state
*ecs
);
125 infrun_async (int enable
)
127 if (infrun_is_async
!= enable
)
129 infrun_is_async
= enable
;
131 infrun_debug_printf ("enable=%d", enable
);
134 mark_async_event_handler (infrun_async_inferior_event_token
);
136 clear_async_event_handler (infrun_async_inferior_event_token
);
143 mark_infrun_async_event_handler (void)
145 mark_async_event_handler (infrun_async_inferior_event_token
);
148 /* When set, stop the 'step' command if we enter a function which has
149 no line number information. The normal behavior is that we step
150 over such function. */
151 bool step_stop_if_no_debug
= false;
153 show_step_stop_if_no_debug (struct ui_file
*file
, int from_tty
,
154 struct cmd_list_element
*c
, const char *value
)
156 gdb_printf (file
, _("Mode of the step operation is %s.\n"), value
);
159 /* proceed and normal_stop use this to notify the user when the
160 inferior stopped in a different thread than it had been running in.
161 It can also be used to find for which thread normal_stop last
163 static thread_info_ref previous_thread
;
168 update_previous_thread ()
170 if (inferior_ptid
== null_ptid
)
171 previous_thread
= nullptr;
173 previous_thread
= thread_info_ref::new_reference (inferior_thread ());
179 get_previous_thread ()
181 return previous_thread
.get ();
184 /* If set (default for legacy reasons), when following a fork, GDB
185 will detach from one of the fork branches, child or parent.
186 Exactly which branch is detached depends on 'set follow-fork-mode'
189 static bool detach_fork
= true;
191 bool debug_infrun
= false;
193 show_debug_infrun (struct ui_file
*file
, int from_tty
,
194 struct cmd_list_element
*c
, const char *value
)
196 gdb_printf (file
, _("Inferior debugging is %s.\n"), value
);
199 /* Support for disabling address space randomization. */
201 bool disable_randomization
= true;
204 show_disable_randomization (struct ui_file
*file
, int from_tty
,
205 struct cmd_list_element
*c
, const char *value
)
207 if (target_supports_disable_randomization ())
209 _("Disabling randomization of debuggee's "
210 "virtual address space is %s.\n"),
213 gdb_puts (_("Disabling randomization of debuggee's "
214 "virtual address space is unsupported on\n"
215 "this platform.\n"), file
);
219 set_disable_randomization (const char *args
, int from_tty
,
220 struct cmd_list_element
*c
)
222 if (!target_supports_disable_randomization ())
223 error (_("Disabling randomization of debuggee's "
224 "virtual address space is unsupported on\n"
228 /* User interface for non-stop mode. */
230 bool non_stop
= false;
231 static bool non_stop_1
= false;
234 set_non_stop (const char *args
, int from_tty
,
235 struct cmd_list_element
*c
)
237 if (target_has_execution ())
239 non_stop_1
= non_stop
;
240 error (_("Cannot change this setting while the inferior is running."));
243 non_stop
= non_stop_1
;
247 show_non_stop (struct ui_file
*file
, int from_tty
,
248 struct cmd_list_element
*c
, const char *value
)
251 _("Controlling the inferior in non-stop mode is %s.\n"),
255 /* "Observer mode" is somewhat like a more extreme version of
256 non-stop, in which all GDB operations that might affect the
257 target's execution have been disabled. */
259 static bool observer_mode
= false;
260 static bool observer_mode_1
= false;
263 set_observer_mode (const char *args
, int from_tty
,
264 struct cmd_list_element
*c
)
266 if (target_has_execution ())
268 observer_mode_1
= observer_mode
;
269 error (_("Cannot change this setting while the inferior is running."));
272 observer_mode
= observer_mode_1
;
274 may_write_registers
= !observer_mode
;
275 may_write_memory
= !observer_mode
;
276 may_insert_breakpoints
= !observer_mode
;
277 may_insert_tracepoints
= !observer_mode
;
278 /* We can insert fast tracepoints in or out of observer mode,
279 but enable them if we're going into this mode. */
281 may_insert_fast_tracepoints
= true;
282 may_stop
= !observer_mode
;
283 update_target_permissions ();
285 /* Going *into* observer mode we must force non-stop, then
286 going out we leave it that way. */
289 pagination_enabled
= false;
290 non_stop
= non_stop_1
= true;
294 gdb_printf (_("Observer mode is now %s.\n"),
295 (observer_mode
? "on" : "off"));
299 show_observer_mode (struct ui_file
*file
, int from_tty
,
300 struct cmd_list_element
*c
, const char *value
)
302 gdb_printf (file
, _("Observer mode is %s.\n"), value
);
305 /* This updates the value of observer mode based on changes in
306 permissions. Note that we are deliberately ignoring the values of
307 may-write-registers and may-write-memory, since the user may have
308 reason to enable these during a session, for instance to turn on a
309 debugging-related global. */
312 update_observer_mode (void)
314 bool newval
= (!may_insert_breakpoints
315 && !may_insert_tracepoints
316 && may_insert_fast_tracepoints
320 /* Let the user know if things change. */
321 if (newval
!= observer_mode
)
322 gdb_printf (_("Observer mode is now %s.\n"),
323 (newval
? "on" : "off"));
325 observer_mode
= observer_mode_1
= newval
;
328 /* Tables of how to react to signals; the user sets them. */
330 static unsigned char signal_stop
[GDB_SIGNAL_LAST
];
331 static unsigned char signal_print
[GDB_SIGNAL_LAST
];
332 static unsigned char signal_program
[GDB_SIGNAL_LAST
];
334 /* Table of signals that are registered with "catch signal". A
335 non-zero entry indicates that the signal is caught by some "catch
337 static unsigned char signal_catch
[GDB_SIGNAL_LAST
];
339 /* Table of signals that the target may silently handle.
340 This is automatically determined from the flags above,
341 and simply cached here. */
342 static unsigned char signal_pass
[GDB_SIGNAL_LAST
];
344 #define SET_SIGS(nsigs,sigs,flags) \
346 int signum = (nsigs); \
347 while (signum-- > 0) \
348 if ((sigs)[signum]) \
349 (flags)[signum] = 1; \
352 #define UNSET_SIGS(nsigs,sigs,flags) \
354 int signum = (nsigs); \
355 while (signum-- > 0) \
356 if ((sigs)[signum]) \
357 (flags)[signum] = 0; \
360 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
361 this function is to avoid exporting `signal_program'. */
364 update_signals_program_target (void)
366 target_program_signals (signal_program
);
369 /* Value to pass to target_resume() to cause all threads to resume. */
371 #define RESUME_ALL minus_one_ptid
373 /* Command list pointer for the "stop" placeholder. */
375 static struct cmd_list_element
*stop_command
;
377 /* Nonzero if we want to give control to the user when we're notified
378 of shared library events by the dynamic linker. */
379 int stop_on_solib_events
;
381 /* Enable or disable optional shared library event breakpoints
382 as appropriate when the above flag is changed. */
385 set_stop_on_solib_events (const char *args
,
386 int from_tty
, struct cmd_list_element
*c
)
388 update_solib_breakpoints ();
392 show_stop_on_solib_events (struct ui_file
*file
, int from_tty
,
393 struct cmd_list_element
*c
, const char *value
)
395 gdb_printf (file
, _("Stopping for shared library events is %s.\n"),
399 /* True after stop if current stack frame should be printed. */
401 static bool stop_print_frame
;
403 /* This is a cached copy of the target/ptid/waitstatus of the last
404 event returned by target_wait().
405 This information is returned by get_last_target_status(). */
406 static process_stratum_target
*target_last_proc_target
;
407 static ptid_t target_last_wait_ptid
;
408 static struct target_waitstatus target_last_waitstatus
;
410 void init_thread_stepping_state (struct thread_info
*tss
);
412 static const char follow_fork_mode_child
[] = "child";
413 static const char follow_fork_mode_parent
[] = "parent";
415 static const char *const follow_fork_mode_kind_names
[] = {
416 follow_fork_mode_child
,
417 follow_fork_mode_parent
,
421 static const char *follow_fork_mode_string
= follow_fork_mode_parent
;
423 show_follow_fork_mode_string (struct ui_file
*file
, int from_tty
,
424 struct cmd_list_element
*c
, const char *value
)
427 _("Debugger response to a program "
428 "call of fork or vfork is \"%s\".\n"),
433 /* Handle changes to the inferior list based on the type of fork,
434 which process is being followed, and whether the other process
435 should be detached. On entry inferior_ptid must be the ptid of
436 the fork parent. At return inferior_ptid is the ptid of the
437 followed inferior. */
440 follow_fork_inferior (bool follow_child
, bool detach_fork
)
442 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
444 infrun_debug_printf ("follow_child = %d, detach_fork = %d",
445 follow_child
, detach_fork
);
447 target_waitkind fork_kind
= inferior_thread ()->pending_follow
.kind ();
448 gdb_assert (fork_kind
== TARGET_WAITKIND_FORKED
449 || fork_kind
== TARGET_WAITKIND_VFORKED
);
450 bool has_vforked
= fork_kind
== TARGET_WAITKIND_VFORKED
;
451 ptid_t parent_ptid
= inferior_ptid
;
452 ptid_t child_ptid
= inferior_thread ()->pending_follow
.child_ptid ();
455 && !non_stop
/* Non-stop always resumes both branches. */
456 && current_ui
->prompt_state
== PROMPT_BLOCKED
457 && !(follow_child
|| detach_fork
|| sched_multi
))
459 /* The parent stays blocked inside the vfork syscall until the
460 child execs or exits. If we don't let the child run, then
461 the parent stays blocked. If we're telling the parent to run
462 in the foreground, the user will not be able to ctrl-c to get
463 back the terminal, effectively hanging the debug session. */
464 gdb_printf (gdb_stderr
, _("\
465 Can not resume the parent process over vfork in the foreground while\n\
466 holding the child stopped. Try \"set %ps\" or \"%ps\".\n"),
467 styled_string (command_style
.style (), "set detach-on-fork"),
468 styled_string (command_style
.style (),
469 "set schedule-multiple"));
473 inferior
*parent_inf
= current_inferior ();
474 inferior
*child_inf
= nullptr;
475 bool child_has_new_pspace
= false;
477 gdb_assert (parent_inf
->thread_waiting_for_vfork_done
== nullptr);
481 /* Detach new forked process? */
484 /* Before detaching from the child, remove all breakpoints
485 from it. If we forked, then this has already been taken
486 care of by infrun.c. If we vforked however, any
487 breakpoint inserted in the parent is visible in the
488 child, even those added while stopped in a vfork
489 catchpoint. This will remove the breakpoints from the
490 parent also, but they'll be reinserted below. */
493 /* Keep breakpoints list in sync. */
494 remove_breakpoints_inf (current_inferior ());
497 if (print_inferior_events
)
499 /* Ensure that we have a process ptid. */
500 ptid_t process_ptid
= ptid_t (child_ptid
.pid ());
502 target_terminal::ours_for_output ();
503 gdb_printf (_("[Detaching after %s from child %s]\n"),
504 has_vforked
? "vfork" : "fork",
505 target_pid_to_str (process_ptid
).c_str ());
510 /* Add process to GDB's tables. */
511 child_inf
= add_inferior (child_ptid
.pid ());
513 child_inf
->attach_flag
= parent_inf
->attach_flag
;
514 copy_terminal_info (child_inf
, parent_inf
);
515 child_inf
->set_arch (parent_inf
->arch ());
516 child_inf
->tdesc_info
= parent_inf
->tdesc_info
;
518 child_inf
->symfile_flags
= SYMFILE_NO_READ
;
520 /* If this is a vfork child, then the address-space is
521 shared with the parent. */
524 child_inf
->pspace
= parent_inf
->pspace
;
525 child_inf
->aspace
= parent_inf
->aspace
;
527 exec_on_vfork (child_inf
);
529 /* The parent will be frozen until the child is done
530 with the shared region. Keep track of the
532 child_inf
->vfork_parent
= parent_inf
;
533 child_inf
->pending_detach
= false;
534 parent_inf
->vfork_child
= child_inf
;
535 parent_inf
->pending_detach
= false;
539 child_inf
->pspace
= new program_space (new_address_space ());
540 child_has_new_pspace
= true;
541 child_inf
->aspace
= child_inf
->pspace
->aspace
;
542 child_inf
->removable
= true;
543 clone_program_space (child_inf
->pspace
, parent_inf
->pspace
);
549 /* If we detached from the child, then we have to be careful
550 to not insert breakpoints in the parent until the child
551 is done with the shared memory region. However, if we're
552 staying attached to the child, then we can and should
553 insert breakpoints, so that we can debug it. A
554 subsequent child exec or exit is enough to know when does
555 the child stops using the parent's address space. */
556 parent_inf
->thread_waiting_for_vfork_done
557 = detach_fork
? inferior_thread () : nullptr;
558 parent_inf
->pspace
->breakpoints_not_allowed
= detach_fork
;
561 ("parent_inf->thread_waiting_for_vfork_done == %s",
562 (parent_inf
->thread_waiting_for_vfork_done
== nullptr
564 : (parent_inf
->thread_waiting_for_vfork_done
565 ->ptid
.to_string ().c_str ())));
570 /* Follow the child. */
572 if (print_inferior_events
)
574 std::string parent_pid
= target_pid_to_str (parent_ptid
);
575 std::string child_pid
= target_pid_to_str (child_ptid
);
577 target_terminal::ours_for_output ();
578 gdb_printf (_("[Attaching after %s %s to child %s]\n"),
580 has_vforked
? "vfork" : "fork",
584 /* Add the new inferior first, so that the target_detach below
585 doesn't unpush the target. */
587 child_inf
= add_inferior (child_ptid
.pid ());
589 child_inf
->attach_flag
= parent_inf
->attach_flag
;
590 copy_terminal_info (child_inf
, parent_inf
);
591 child_inf
->set_arch (parent_inf
->arch ());
592 child_inf
->tdesc_info
= parent_inf
->tdesc_info
;
596 /* If this is a vfork child, then the address-space is shared
598 child_inf
->aspace
= parent_inf
->aspace
;
599 child_inf
->pspace
= parent_inf
->pspace
;
601 exec_on_vfork (child_inf
);
603 else if (detach_fork
)
605 /* We follow the child and detach from the parent: move the parent's
606 program space to the child. This simplifies some things, like
607 doing "next" over fork() and landing on the expected line in the
608 child (note, that is broken with "set detach-on-fork off").
610 Before assigning brand new spaces for the parent, remove
611 breakpoints from it: because the new pspace won't match
612 currently inserted locations, the normal detach procedure
613 wouldn't remove them, and we would leave them inserted when
615 remove_breakpoints_inf (parent_inf
);
617 child_inf
->aspace
= parent_inf
->aspace
;
618 child_inf
->pspace
= parent_inf
->pspace
;
619 parent_inf
->pspace
= new program_space (new_address_space ());
620 parent_inf
->aspace
= parent_inf
->pspace
->aspace
;
621 clone_program_space (parent_inf
->pspace
, child_inf
->pspace
);
623 /* The parent inferior is still the current one, so keep things
625 set_current_program_space (parent_inf
->pspace
);
629 child_inf
->pspace
= new program_space (new_address_space ());
630 child_has_new_pspace
= true;
631 child_inf
->aspace
= child_inf
->pspace
->aspace
;
632 child_inf
->removable
= true;
633 child_inf
->symfile_flags
= SYMFILE_NO_READ
;
634 clone_program_space (child_inf
->pspace
, parent_inf
->pspace
);
638 gdb_assert (current_inferior () == parent_inf
);
640 /* If we are setting up an inferior for the child, target_follow_fork is
641 responsible for pushing the appropriate targets on the new inferior's
642 target stack and adding the initial thread (with ptid CHILD_PTID).
644 If we are not setting up an inferior for the child (because following
645 the parent and detach_fork is true), it is responsible for detaching
647 target_follow_fork (child_inf
, child_ptid
, fork_kind
, follow_child
,
650 gdb::observers::inferior_forked
.notify (parent_inf
, child_inf
, fork_kind
);
652 /* target_follow_fork must leave the parent as the current inferior. If we
653 want to follow the child, we make it the current one below. */
654 gdb_assert (current_inferior () == parent_inf
);
656 /* If there is a child inferior, target_follow_fork must have created a thread
658 if (child_inf
!= nullptr)
659 gdb_assert (!child_inf
->thread_list
.empty ());
661 /* Clear the parent thread's pending follow field. Do this before calling
662 target_detach, so that the target can differentiate the two following
665 - We continue past a fork with "follow-fork-mode == child" &&
666 "detach-on-fork on", and therefore detach the parent. In that
667 case the target should not detach the fork child.
668 - We run to a fork catchpoint and the user types "detach". In that
669 case, the target should detach the fork child in addition to the
672 The former case will have pending_follow cleared, the later will have
673 pending_follow set. */
674 thread_info
*parent_thread
= parent_inf
->find_thread (parent_ptid
);
675 gdb_assert (parent_thread
!= nullptr);
676 parent_thread
->pending_follow
.set_spurious ();
678 /* Detach the parent if needed. */
681 /* If we're vforking, we want to hold on to the parent until
682 the child exits or execs. At child exec or exit time we
683 can remove the old breakpoints from the parent and detach
684 or resume debugging it. Otherwise, detach the parent now;
685 we'll want to reuse it's program/address spaces, but we
686 can't set them to the child before removing breakpoints
687 from the parent, otherwise, the breakpoints module could
688 decide to remove breakpoints from the wrong process (since
689 they'd be assigned to the same address space). */
693 gdb_assert (child_inf
->vfork_parent
== nullptr);
694 gdb_assert (parent_inf
->vfork_child
== nullptr);
695 child_inf
->vfork_parent
= parent_inf
;
696 child_inf
->pending_detach
= false;
697 parent_inf
->vfork_child
= child_inf
;
698 parent_inf
->pending_detach
= detach_fork
;
700 else if (detach_fork
)
702 if (print_inferior_events
)
704 /* Ensure that we have a process ptid. */
705 ptid_t process_ptid
= ptid_t (parent_ptid
.pid ());
707 target_terminal::ours_for_output ();
708 gdb_printf (_("[Detaching after fork from "
710 target_pid_to_str (process_ptid
).c_str ());
713 target_detach (parent_inf
, 0);
717 /* If we ended up creating a new inferior, call post_create_inferior to inform
718 the various subcomponents. */
719 if (child_inf
!= nullptr)
721 /* If FOLLOW_CHILD, we leave CHILD_INF as the current inferior
722 (do not restore the parent as the current inferior). */
723 std::optional
<scoped_restore_current_thread
> maybe_restore
;
725 if (!follow_child
&& !sched_multi
)
726 maybe_restore
.emplace ();
728 switch_to_thread (*child_inf
->threads ().begin ());
730 post_create_inferior (0, child_has_new_pspace
);
736 /* Set the last target status as TP having stopped. */
739 set_last_target_status_stopped (thread_info
*tp
)
741 set_last_target_status (tp
->inf
->process_target (), tp
->ptid
,
742 target_waitstatus
{}.set_stopped (GDB_SIGNAL_0
));
745 /* Tell the target to follow the fork we're stopped at. Returns true
746 if the inferior should be resumed; false, if the target for some
747 reason decided it's best not to resume. */
752 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
754 bool follow_child
= (follow_fork_mode_string
== follow_fork_mode_child
);
755 bool should_resume
= true;
757 /* Copy user stepping state to the new inferior thread. FIXME: the
758 followed fork child thread should have a copy of most of the
759 parent thread structure's run control related fields, not just these.
760 Initialized to avoid "may be used uninitialized" warnings from gcc. */
761 struct breakpoint
*step_resume_breakpoint
= nullptr;
762 struct breakpoint
*exception_resume_breakpoint
= nullptr;
763 CORE_ADDR step_range_start
= 0;
764 CORE_ADDR step_range_end
= 0;
765 int current_line
= 0;
766 symtab
*current_symtab
= nullptr;
767 struct frame_id step_frame_id
= { 0 };
771 thread_info
*cur_thr
= inferior_thread ();
774 = user_visible_resume_ptid (cur_thr
->control
.stepping_command
);
775 process_stratum_target
*resume_target
776 = user_visible_resume_target (resume_ptid
);
778 /* Check if there's a thread that we're about to resume, other
779 than the current, with an unfollowed fork/vfork. If so,
780 switch back to it, to tell the target to follow it (in either
781 direction). We'll afterwards refuse to resume, and inform
782 the user what happened. */
783 for (thread_info
*tp
: all_non_exited_threads (resume_target
,
789 /* follow_fork_inferior clears tp->pending_follow, and below
790 we'll need the value after the follow_fork_inferior
792 target_waitkind kind
= tp
->pending_follow
.kind ();
794 if (kind
!= TARGET_WAITKIND_SPURIOUS
)
796 infrun_debug_printf ("need to follow-fork [%s] first",
797 tp
->ptid
.to_string ().c_str ());
799 switch_to_thread (tp
);
801 /* Set up inferior(s) as specified by the caller, and
802 tell the target to do whatever is necessary to follow
803 either parent or child. */
806 /* The thread that started the execution command
807 won't exist in the child. Abort the command and
808 immediately stop in this thread, in the child,
810 should_resume
= false;
814 /* Following the parent, so let the thread fork its
815 child freely, it won't influence the current
816 execution command. */
817 if (follow_fork_inferior (follow_child
, detach_fork
))
819 /* Target refused to follow, or there's some
820 other reason we shouldn't resume. */
821 switch_to_thread (cur_thr
);
822 set_last_target_status_stopped (cur_thr
);
826 /* If we're following a vfork, when we need to leave
827 the just-forked thread as selected, as we need to
828 solo-resume it to collect the VFORK_DONE event.
829 If we're following a fork, however, switch back
830 to the original thread that we continue stepping
832 if (kind
!= TARGET_WAITKIND_VFORKED
)
834 gdb_assert (kind
== TARGET_WAITKIND_FORKED
);
835 switch_to_thread (cur_thr
);
844 thread_info
*tp
= inferior_thread ();
846 /* If there were any forks/vforks that were caught and are now to be
847 followed, then do so now. */
848 switch (tp
->pending_follow
.kind ())
850 case TARGET_WAITKIND_FORKED
:
851 case TARGET_WAITKIND_VFORKED
:
853 ptid_t parent
, child
;
854 std::unique_ptr
<struct thread_fsm
> thread_fsm
;
856 /* If the user did a next/step, etc, over a fork call,
857 preserve the stepping state in the fork child. */
858 if (follow_child
&& should_resume
)
860 step_resume_breakpoint
= clone_momentary_breakpoint
861 (tp
->control
.step_resume_breakpoint
);
862 step_range_start
= tp
->control
.step_range_start
;
863 step_range_end
= tp
->control
.step_range_end
;
864 current_line
= tp
->current_line
;
865 current_symtab
= tp
->current_symtab
;
866 step_frame_id
= tp
->control
.step_frame_id
;
867 exception_resume_breakpoint
868 = clone_momentary_breakpoint (tp
->control
.exception_resume_breakpoint
);
869 thread_fsm
= tp
->release_thread_fsm ();
871 /* For now, delete the parent's sr breakpoint, otherwise,
872 parent/child sr breakpoints are considered duplicates,
873 and the child version will not be installed. Remove
874 this when the breakpoints module becomes aware of
875 inferiors and address spaces. */
876 delete_step_resume_breakpoint (tp
);
877 tp
->control
.step_range_start
= 0;
878 tp
->control
.step_range_end
= 0;
879 tp
->control
.step_frame_id
= null_frame_id
;
880 delete_exception_resume_breakpoint (tp
);
883 parent
= inferior_ptid
;
884 child
= tp
->pending_follow
.child_ptid ();
886 /* If handling a vfork, stop all the inferior's threads, they will be
887 restarted when the vfork shared region is complete. */
888 if (tp
->pending_follow
.kind () == TARGET_WAITKIND_VFORKED
889 && target_is_non_stop_p ())
890 stop_all_threads ("handling vfork", tp
->inf
);
892 process_stratum_target
*parent_targ
= tp
->inf
->process_target ();
893 /* Set up inferior(s) as specified by the caller, and tell the
894 target to do whatever is necessary to follow either parent
896 if (follow_fork_inferior (follow_child
, detach_fork
))
898 /* Target refused to follow, or there's some other reason
899 we shouldn't resume. */
904 /* If we followed the child, switch to it... */
907 tp
= parent_targ
->find_thread (child
);
908 switch_to_thread (tp
);
910 /* ... and preserve the stepping state, in case the
911 user was stepping over the fork call. */
914 tp
->control
.step_resume_breakpoint
915 = step_resume_breakpoint
;
916 tp
->control
.step_range_start
= step_range_start
;
917 tp
->control
.step_range_end
= step_range_end
;
918 tp
->current_line
= current_line
;
919 tp
->current_symtab
= current_symtab
;
920 tp
->control
.step_frame_id
= step_frame_id
;
921 tp
->control
.exception_resume_breakpoint
922 = exception_resume_breakpoint
;
923 tp
->set_thread_fsm (std::move (thread_fsm
));
927 /* If we get here, it was because we're trying to
928 resume from a fork catchpoint, but, the user
929 has switched threads away from the thread that
930 forked. In that case, the resume command
931 issued is most likely not applicable to the
932 child, so just warn, and refuse to resume. */
933 warning (_("Not resuming: switched threads "
934 "before following fork child."));
937 /* Reset breakpoints in the child as appropriate. */
938 follow_inferior_reset_breakpoints ();
943 case TARGET_WAITKIND_SPURIOUS
:
944 /* Nothing to follow. */
947 internal_error ("Unexpected pending_follow.kind %d\n",
948 tp
->pending_follow
.kind ());
953 set_last_target_status_stopped (tp
);
954 return should_resume
;
958 follow_inferior_reset_breakpoints (void)
960 struct thread_info
*tp
= inferior_thread ();
962 /* Was there a step_resume breakpoint? (There was if the user
963 did a "next" at the fork() call.) If so, explicitly reset its
964 thread number. Cloned step_resume breakpoints are disabled on
965 creation, so enable it here now that it is associated with the
968 step_resumes are a form of bp that are made to be per-thread.
969 Since we created the step_resume bp when the parent process
970 was being debugged, and now are switching to the child process,
971 from the breakpoint package's viewpoint, that's a switch of
972 "threads". We must update the bp's notion of which thread
973 it is for, or it'll be ignored when it triggers. */
975 if (tp
->control
.step_resume_breakpoint
)
977 breakpoint_re_set_thread (tp
->control
.step_resume_breakpoint
);
978 tp
->control
.step_resume_breakpoint
->first_loc ().enabled
= 1;
981 /* Treat exception_resume breakpoints like step_resume breakpoints. */
982 if (tp
->control
.exception_resume_breakpoint
)
984 breakpoint_re_set_thread (tp
->control
.exception_resume_breakpoint
);
985 tp
->control
.exception_resume_breakpoint
->first_loc ().enabled
= 1;
988 /* Reinsert all breakpoints in the child. The user may have set
989 breakpoints after catching the fork, in which case those
990 were never set in the child, but only in the parent. This makes
991 sure the inserted breakpoints match the breakpoint list. */
993 breakpoint_re_set ();
994 insert_breakpoints ();
997 /* The child has exited or execed: resume THREAD, a thread of the parent,
998 if it was meant to be executing. */
1001 proceed_after_vfork_done (thread_info
*thread
)
1003 if (thread
->state
== THREAD_RUNNING
1004 && !thread
->executing ()
1005 && !thread
->stop_requested
1006 && thread
->stop_signal () == GDB_SIGNAL_0
)
1008 infrun_debug_printf ("resuming vfork parent thread %s",
1009 thread
->ptid
.to_string ().c_str ());
1011 switch_to_thread (thread
);
1012 clear_proceed_status (0);
1013 proceed ((CORE_ADDR
) -1, GDB_SIGNAL_DEFAULT
);
1017 /* Called whenever we notice an exec or exit event, to handle
1018 detaching or resuming a vfork parent. */
1021 handle_vfork_child_exec_or_exit (int exec
)
1023 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
1025 struct inferior
*inf
= current_inferior ();
1027 if (inf
->vfork_parent
)
1029 inferior
*resume_parent
= nullptr;
1031 /* This exec or exit marks the end of the shared memory region
1032 between the parent and the child. Break the bonds. */
1033 inferior
*vfork_parent
= inf
->vfork_parent
;
1034 inf
->vfork_parent
->vfork_child
= nullptr;
1035 inf
->vfork_parent
= nullptr;
1037 /* If the user wanted to detach from the parent, now is the
1039 if (vfork_parent
->pending_detach
)
1041 struct program_space
*pspace
;
1043 /* follow-fork child, detach-on-fork on. */
1045 vfork_parent
->pending_detach
= false;
1047 scoped_restore_current_pspace_and_thread restore_thread
;
1049 /* We're letting loose of the parent. */
1050 thread_info
*tp
= any_live_thread_of_inferior (vfork_parent
);
1051 switch_to_thread (tp
);
1053 /* We're about to detach from the parent, which implicitly
1054 removes breakpoints from its address space. There's a
1055 catch here: we want to reuse the spaces for the child,
1056 but, parent/child are still sharing the pspace at this
1057 point, although the exec in reality makes the kernel give
1058 the child a fresh set of new pages. The problem here is
1059 that the breakpoints module being unaware of this, would
1060 likely chose the child process to write to the parent
1061 address space. Swapping the child temporarily away from
1062 the spaces has the desired effect. Yes, this is "sort
1065 pspace
= inf
->pspace
;
1066 inf
->pspace
= nullptr;
1067 address_space_ref_ptr aspace
= std::move (inf
->aspace
);
1069 if (print_inferior_events
)
1072 = target_pid_to_str (ptid_t (vfork_parent
->pid
));
1074 target_terminal::ours_for_output ();
1078 gdb_printf (_("[Detaching vfork parent %s "
1079 "after child exec]\n"), pidstr
.c_str ());
1083 gdb_printf (_("[Detaching vfork parent %s "
1084 "after child exit]\n"), pidstr
.c_str ());
1088 target_detach (vfork_parent
, 0);
1091 inf
->pspace
= pspace
;
1092 inf
->aspace
= aspace
;
1096 /* We're staying attached to the parent, so, really give the
1097 child a new address space. */
1098 inf
->pspace
= new program_space (maybe_new_address_space ());
1099 inf
->aspace
= inf
->pspace
->aspace
;
1100 inf
->removable
= true;
1101 set_current_program_space (inf
->pspace
);
1103 resume_parent
= vfork_parent
;
1107 /* If this is a vfork child exiting, then the pspace and
1108 aspaces were shared with the parent. Since we're
1109 reporting the process exit, we'll be mourning all that is
1110 found in the address space, and switching to null_ptid,
1111 preparing to start a new inferior. But, since we don't
1112 want to clobber the parent's address/program spaces, we
1113 go ahead and create a new one for this exiting
1116 scoped_restore_current_thread restore_thread
;
1118 /* Temporarily switch to the vfork parent, to facilitate ptrace
1119 calls done during maybe_new_address_space. */
1120 switch_to_thread (any_live_thread_of_inferior (vfork_parent
));
1121 address_space_ref_ptr aspace
= maybe_new_address_space ();
1123 /* Switch back to the vfork child inferior. Switch to no-thread
1124 while running clone_program_space, so that clone_program_space
1125 doesn't want to read the selected frame of a dead process. */
1126 switch_to_inferior_no_thread (inf
);
1128 inf
->pspace
= new program_space (std::move (aspace
));
1129 inf
->aspace
= inf
->pspace
->aspace
;
1130 set_current_program_space (inf
->pspace
);
1131 inf
->removable
= true;
1132 inf
->symfile_flags
= SYMFILE_NO_READ
;
1133 clone_program_space (inf
->pspace
, vfork_parent
->pspace
);
1135 resume_parent
= vfork_parent
;
1138 gdb_assert (current_program_space
== inf
->pspace
);
1140 if (non_stop
&& resume_parent
!= nullptr)
1142 /* If the user wanted the parent to be running, let it go
1144 scoped_restore_current_thread restore_thread
;
1146 infrun_debug_printf ("resuming vfork parent process %d",
1147 resume_parent
->pid
);
1149 for (thread_info
*thread
: resume_parent
->threads ())
1150 proceed_after_vfork_done (thread
);
1155 /* Handle TARGET_WAITKIND_VFORK_DONE. */
1158 handle_vfork_done (thread_info
*event_thread
)
1160 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
1162 /* We only care about this event if inferior::thread_waiting_for_vfork_done is
1163 set, that is if we are waiting for a vfork child not under our control
1164 (because we detached it) to exec or exit.
1166 If an inferior has vforked and we are debugging the child, we don't use
1167 the vfork-done event to get notified about the end of the shared address
1168 space window. We rely instead on the child's exec or exit event, and the
1169 inferior::vfork_{parent,child} fields are used instead. See
1170 handle_vfork_child_exec_or_exit for that. */
1171 if (event_thread
->inf
->thread_waiting_for_vfork_done
== nullptr)
1173 infrun_debug_printf ("not waiting for a vfork-done event");
1177 /* We stopped all threads (other than the vforking thread) of the inferior in
1178 follow_fork and kept them stopped until now. It should therefore not be
1179 possible for another thread to have reported a vfork during that window.
1180 If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
1181 vfork-done we are handling right now. */
1182 gdb_assert (event_thread
->inf
->thread_waiting_for_vfork_done
== event_thread
);
1184 event_thread
->inf
->thread_waiting_for_vfork_done
= nullptr;
1185 event_thread
->inf
->pspace
->breakpoints_not_allowed
= 0;
1187 /* On non-stop targets, we stopped all the inferior's threads in follow_fork,
1188 resume them now. On all-stop targets, everything that needs to be resumed
1189 will be when we resume the event thread. */
1190 if (target_is_non_stop_p ())
1192 /* restart_threads and start_step_over may change the current thread, make
1193 sure we leave the event thread as the current thread. */
1194 scoped_restore_current_thread restore_thread
;
1196 insert_breakpoints ();
1199 if (!step_over_info_valid_p ())
1200 restart_threads (event_thread
, event_thread
->inf
);
1204 /* Enum strings for "set|show follow-exec-mode". */
1206 static const char follow_exec_mode_new
[] = "new";
1207 static const char follow_exec_mode_same
[] = "same";
1208 static const char *const follow_exec_mode_names
[] =
1210 follow_exec_mode_new
,
1211 follow_exec_mode_same
,
1215 static const char *follow_exec_mode_string
= follow_exec_mode_same
;
1217 show_follow_exec_mode_string (struct ui_file
*file
, int from_tty
,
1218 struct cmd_list_element
*c
, const char *value
)
1220 gdb_printf (file
, _("Follow exec mode is \"%s\".\n"), value
);
1223 /* EXEC_FILE_TARGET is assumed to be non-NULL. */
1226 follow_exec (ptid_t ptid
, const char *exec_file_target
)
1228 int pid
= ptid
.pid ();
1229 ptid_t process_ptid
;
1231 /* Switch terminal for any messages produced e.g. by
1232 breakpoint_re_set. */
1233 target_terminal::ours_for_output ();
1235 /* This is an exec event that we actually wish to pay attention to.
1236 Refresh our symbol table to the newly exec'd program, remove any
1237 momentary bp's, etc.
1239 If there are breakpoints, they aren't really inserted now,
1240 since the exec() transformed our inferior into a fresh set
1243 We want to preserve symbolic breakpoints on the list, since
1244 we have hopes that they can be reset after the new a.out's
1245 symbol table is read.
1247 However, any "raw" breakpoints must be removed from the list
1248 (e.g., the solib bp's), since their address is probably invalid
1251 And, we DON'T want to call delete_breakpoints() here, since
1252 that may write the bp's "shadow contents" (the instruction
1253 value that was overwritten with a TRAP instruction). Since
1254 we now have a new a.out, those shadow contents aren't valid. */
1256 mark_breakpoints_out (current_program_space
);
1258 /* The target reports the exec event to the main thread, even if
1259 some other thread does the exec, and even if the main thread was
1260 stopped or already gone. We may still have non-leader threads of
1261 the process on our list. E.g., on targets that don't have thread
1262 exit events (like remote) and nothing forces an update of the
1263 thread list up to here. When debugging remotely, it's best to
1264 avoid extra traffic, when possible, so avoid syncing the thread
1265 list with the target, and instead go ahead and delete all threads
1266 of the process but the one that reported the event. Note this must
1267 be done before calling update_breakpoints_after_exec, as
1268 otherwise clearing the threads' resources would reference stale
1269 thread breakpoints -- it may have been one of these threads that
1270 stepped across the exec. We could just clear their stepping
1271 states, but as long as we're iterating, might as well delete
1272 them. Deleting them now rather than at the next user-visible
1273 stop provides a nicer sequence of events for user and MI
1275 for (thread_info
*th
: all_threads_safe ())
1276 if (th
->ptid
.pid () == pid
&& th
->ptid
!= ptid
)
1279 /* We also need to clear any left over stale state for the
1280 leader/event thread. E.g., if there was any step-resume
1281 breakpoint or similar, it's gone now. We cannot truly
1282 step-to-next statement through an exec(). */
1283 thread_info
*th
= inferior_thread ();
1284 th
->control
.step_resume_breakpoint
= nullptr;
1285 th
->control
.exception_resume_breakpoint
= nullptr;
1286 th
->control
.single_step_breakpoints
= nullptr;
1287 th
->control
.step_range_start
= 0;
1288 th
->control
.step_range_end
= 0;
1290 /* The user may have had the main thread held stopped in the
1291 previous image (e.g., schedlock on, or non-stop). Release
1293 th
->stop_requested
= false;
1295 update_breakpoints_after_exec ();
1297 /* What is this a.out's name? */
1298 process_ptid
= ptid_t (pid
);
1299 gdb_printf (_("%s is executing new program: %s\n"),
1300 target_pid_to_str (process_ptid
).c_str (),
1303 /* We've followed the inferior through an exec. Therefore, the
1304 inferior has essentially been killed & reborn. */
1306 breakpoint_init_inferior (current_inferior (), inf_execd
);
1308 gdb::unique_xmalloc_ptr
<char> exec_file_host
1309 = exec_file_find (exec_file_target
, nullptr);
1311 /* If we were unable to map the executable target pathname onto a host
1312 pathname, tell the user that. Otherwise GDB's subsequent behavior
1313 is confusing. Maybe it would even be better to stop at this point
1314 so that the user can specify a file manually before continuing. */
1315 if (exec_file_host
== nullptr)
1316 warning (_("Could not load symbols for executable %s.\n"
1317 "Do you need \"%ps\"?"),
1319 styled_string (command_style
.style (), "set sysroot"));
1321 /* Reset the shared library package. This ensures that we get a
1322 shlib event when the child reaches "_start", at which point the
1323 dld will have had a chance to initialize the child. */
1324 /* Also, loading a symbol file below may trigger symbol lookups, and
1325 we don't want those to be satisfied by the libraries of the
1326 previous incarnation of this process. */
1327 no_shared_libraries (current_program_space
);
1328 current_program_space
->unset_solib_ops ();
1330 inferior
*execing_inferior
= current_inferior ();
1331 inferior
*following_inferior
;
1333 if (follow_exec_mode_string
== follow_exec_mode_new
)
1335 /* The user wants to keep the old inferior and program spaces
1336 around. Create a new fresh one, and switch to it. */
1338 /* Do exit processing for the original inferior before setting the new
1339 inferior's pid. Having two inferiors with the same pid would confuse
1340 find_inferior_p(t)id. Transfer the terminal state and info from the
1341 old to the new inferior. */
1342 following_inferior
= add_inferior_with_spaces ();
1344 swap_terminal_info (following_inferior
, execing_inferior
);
1345 exit_inferior (execing_inferior
);
1347 following_inferior
->pid
= pid
;
1351 /* follow-exec-mode is "same", we continue execution in the execing
1353 following_inferior
= execing_inferior
;
1355 /* The old description may no longer be fit for the new image.
1356 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1357 old description; we'll read a new one below. No need to do
1358 this on "follow-exec-mode new", as the old inferior stays
1359 around (its description is later cleared/refetched on
1361 target_clear_description ();
1364 target_follow_exec (following_inferior
, ptid
, exec_file_target
);
1366 gdb_assert (current_inferior () == following_inferior
);
1367 gdb_assert (current_program_space
== following_inferior
->pspace
);
1369 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1370 because the proper displacement for a PIE (Position Independent
1371 Executable) main symbol file will only be computed by
1372 solib_create_inferior_hook below. breakpoint_re_set would fail
1373 to insert the breakpoints with the zero displacement. */
1374 try_open_exec_file (exec_file_host
.get (), following_inferior
,
1375 SYMFILE_DEFER_BP_RESET
);
1377 /* If the target can specify a description, read it. Must do this
1378 after flipping to the new executable (because the target supplied
1379 description must be compatible with the executable's
1380 architecture, and the old executable may e.g., be 32-bit, while
1381 the new one 64-bit), and before anything involving memory or
1383 target_find_description ();
1385 current_program_space
->set_solib_ops
1386 (gdbarch_make_solib_ops (following_inferior
->arch ()));
1387 gdb::observers::inferior_execd
.notify (execing_inferior
, following_inferior
);
1389 breakpoint_re_set ();
1391 /* Reinsert all breakpoints. (Those which were symbolic have
1392 been reset to the proper address in the new a.out, thanks
1393 to symbol_file_command...). */
1394 insert_breakpoints ();
1396 /* The next resume of this inferior should bring it to the shlib
1397 startup breakpoints. (If the user had also set bp's on
1398 "main" from the old (parent) process, then they'll auto-
1399 matically get reset there in the new process.). */
1402 /* The chain of threads that need to do a step-over operation to get
1403 past e.g., a breakpoint. What technique is used to step over the
1404 breakpoint/watchpoint does not matter -- all threads end up in the
1405 same queue, to maintain rough temporal order of execution, in order
1406 to avoid starvation, otherwise, we could e.g., find ourselves
1407 constantly stepping the same couple threads past their breakpoints
1408 over and over, if the single-step finish fast enough. */
1409 thread_step_over_list global_thread_step_over_list
;
1411 /* Bit flags indicating what the thread needs to step over. */
1413 enum step_over_what_flag
1415 /* Step over a breakpoint. */
1416 STEP_OVER_BREAKPOINT
= 1,
1418 /* Step past a non-continuable watchpoint, in order to let the
1419 instruction execute so we can evaluate the watchpoint
1421 STEP_OVER_WATCHPOINT
= 2
1423 DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag
, step_over_what
);
1425 /* Info about an instruction that is being stepped over. */
1427 struct step_over_info
1429 /* If we're stepping past a breakpoint, this is the address space
1430 and address of the instruction the breakpoint is set at. We'll
1431 skip inserting all breakpoints here. Valid iff ASPACE is
1433 const address_space
*aspace
= nullptr;
1434 CORE_ADDR address
= 0;
1436 /* The instruction being stepped over triggers a nonsteppable
1437 watchpoint. If true, we'll skip inserting watchpoints. */
1438 int nonsteppable_watchpoint_p
= 0;
1440 /* The thread's global number. */
1444 /* The step-over info of the location that is being stepped over.
1446 Note that with async/breakpoint always-inserted mode, a user might
1447 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1448 being stepped over. As setting a new breakpoint inserts all
1449 breakpoints, we need to make sure the breakpoint being stepped over
1450 isn't inserted then. We do that by only clearing the step-over
1451 info when the step-over is actually finished (or aborted).
1453 Presently GDB can only step over one breakpoint at any given time.
1454 Given threads that can't run code in the same address space as the
1455 breakpoint's can't really miss the breakpoint, GDB could be taught
1456 to step-over at most one breakpoint per address space (so this info
1457 could move to the address space object if/when GDB is extended).
1458 The set of breakpoints being stepped over will normally be much
1459 smaller than the set of all breakpoints, so a flag in the
1460 breakpoint location structure would be wasteful. A separate list
1461 also saves complexity and run-time, as otherwise we'd have to go
1462 through all breakpoint locations clearing their flag whenever we
1463 start a new sequence. Similar considerations weigh against storing
1464 this info in the thread object. Plus, not all step overs actually
1465 have breakpoint locations -- e.g., stepping past a single-step
1466 breakpoint, or stepping to complete a non-continuable
1468 static struct step_over_info step_over_info
;
1470 /* Record the address of the breakpoint/instruction we're currently
1472 N.B. We record the aspace and address now, instead of say just the thread,
1473 because when we need the info later the thread may be running. */
1476 set_step_over_info (const address_space
*aspace
, CORE_ADDR address
,
1477 int nonsteppable_watchpoint_p
,
1480 step_over_info
.aspace
= aspace
;
1481 step_over_info
.address
= address
;
1482 step_over_info
.nonsteppable_watchpoint_p
= nonsteppable_watchpoint_p
;
1483 step_over_info
.thread
= thread
;
1486 /* Called when we're not longer stepping over a breakpoint / an
1487 instruction, so all breakpoints are free to be (re)inserted. */
1490 clear_step_over_info (void)
1492 infrun_debug_printf ("clearing step over info");
1493 step_over_info
.aspace
= nullptr;
1494 step_over_info
.address
= 0;
1495 step_over_info
.nonsteppable_watchpoint_p
= 0;
1496 step_over_info
.thread
= -1;
1502 stepping_past_instruction_at (struct address_space
*aspace
,
1505 return (step_over_info
.aspace
!= nullptr
1506 && breakpoint_address_match (aspace
, address
,
1507 step_over_info
.aspace
,
1508 step_over_info
.address
));
1514 thread_is_stepping_over_breakpoint (int thread
)
1516 return (step_over_info
.thread
!= -1
1517 && thread
== step_over_info
.thread
);
1523 stepping_past_nonsteppable_watchpoint (void)
1525 return step_over_info
.nonsteppable_watchpoint_p
;
1528 /* Returns true if step-over info is valid. */
1531 step_over_info_valid_p (void)
1533 return (step_over_info
.aspace
!= nullptr
1534 || stepping_past_nonsteppable_watchpoint ());
1538 /* Displaced stepping. */
1540 /* In non-stop debugging mode, we must take special care to manage
1541 breakpoints properly; in particular, the traditional strategy for
1542 stepping a thread past a breakpoint it has hit is unsuitable.
1543 'Displaced stepping' is a tactic for stepping one thread past a
1544 breakpoint it has hit while ensuring that other threads running
1545 concurrently will hit the breakpoint as they should.
1547 The traditional way to step a thread T off a breakpoint in a
1548 multi-threaded program in all-stop mode is as follows:
1550 a0) Initially, all threads are stopped, and breakpoints are not
1552 a1) We single-step T, leaving breakpoints uninserted.
1553 a2) We insert breakpoints, and resume all threads.
1555 In non-stop debugging, however, this strategy is unsuitable: we
1556 don't want to have to stop all threads in the system in order to
1557 continue or step T past a breakpoint. Instead, we use displaced
1560 n0) Initially, T is stopped, other threads are running, and
1561 breakpoints are inserted.
1562 n1) We copy the instruction "under" the breakpoint to a separate
1563 location, outside the main code stream, making any adjustments
1564 to the instruction, register, and memory state as directed by
1566 n2) We single-step T over the instruction at its new location.
1567 n3) We adjust the resulting register and memory state as directed
1568 by T's architecture. This includes resetting T's PC to point
1569 back into the main instruction stream.
1572 This approach depends on the following gdbarch methods:
1574 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1575 indicate where to copy the instruction, and how much space must
1576 be reserved there. We use these in step n1.
1578 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1579 address, and makes any necessary adjustments to the instruction,
1580 register contents, and memory. We use this in step n1.
1582 - gdbarch_displaced_step_fixup adjusts registers and memory after
1583 we have successfully single-stepped the instruction, to yield the
1584 same effect the instruction would have had if we had executed it
1585 at its original address. We use this in step n3.
1587 The gdbarch_displaced_step_copy_insn and
1588 gdbarch_displaced_step_fixup functions must be written so that
1589 copying an instruction with gdbarch_displaced_step_copy_insn,
1590 single-stepping across the copied instruction, and then applying
1591 gdbarch_displaced_insn_fixup should have the same effects on the
1592 thread's memory and registers as stepping the instruction in place
1593 would have. Exactly which responsibilities fall to the copy and
1594 which fall to the fixup is up to the author of those functions.
1596 See the comments in gdbarch.sh for details.
1598 Note that displaced stepping and software single-step cannot
1599 currently be used in combination, although with some care I think
1600 they could be made to. Software single-step works by placing
1601 breakpoints on all possible subsequent instructions; if the
1602 displaced instruction is a PC-relative jump, those breakpoints
1603 could fall in very strange places --- on pages that aren't
1604 executable, or at addresses that are not proper instruction
1605 boundaries. (We do generally let other threads run while we wait
1606 to hit the software single-step breakpoint, and they might
1607 encounter such a corrupted instruction.) One way to work around
1608 this would be to have gdbarch_displaced_step_copy_insn fully
1609 simulate the effect of PC-relative instructions (and return NULL)
1610 on architectures that use software single-stepping.
1612 In non-stop mode, we can have independent and simultaneous step
1613 requests, so more than one thread may need to simultaneously step
1614 over a breakpoint. The current implementation assumes there is
1615 only one scratch space per process. In this case, we have to
1616 serialize access to the scratch space. If thread A wants to step
1617 over a breakpoint, but we are currently waiting for some other
1618 thread to complete a displaced step, we leave thread A stopped and
1619 place it in the displaced_step_request_queue. Whenever a displaced
1620 step finishes, we pick the next thread in the queue and start a new
1621 displaced step operation on it. See displaced_step_prepare and
1622 displaced_step_finish for details. */
1624 /* Return true if THREAD is doing a displaced step. */
1627 displaced_step_in_progress_thread (thread_info
*thread
)
1629 gdb_assert (thread
!= nullptr);
1631 return thread
->displaced_step_state
.in_progress ();
1634 /* Return true if INF has a thread doing a displaced step. */
1637 displaced_step_in_progress (inferior
*inf
)
1639 return inf
->displaced_step_state
.in_progress_count
> 0;
1642 /* Return true if any thread is doing a displaced step. */
1645 displaced_step_in_progress_any_thread ()
1647 for (inferior
*inf
: all_non_exited_inferiors ())
1649 if (displaced_step_in_progress (inf
))
1657 infrun_inferior_exit (struct inferior
*inf
)
1659 inf
->displaced_step_state
.reset ();
1660 inf
->thread_waiting_for_vfork_done
= nullptr;
1664 infrun_inferior_execd (inferior
*exec_inf
, inferior
*follow_inf
)
1666 /* If some threads where was doing a displaced step in this inferior at the
1667 moment of the exec, they no longer exist. Even if the exec'ing thread
1668 doing a displaced step, we don't want to to any fixup nor restore displaced
1669 stepping buffer bytes. */
1670 follow_inf
->displaced_step_state
.reset ();
1672 for (thread_info
*thread
: follow_inf
->threads ())
1673 thread
->displaced_step_state
.reset ();
1675 /* Since an in-line step is done with everything else stopped, if there was
1676 one in progress at the time of the exec, it must have been the exec'ing
1678 clear_step_over_info ();
1680 follow_inf
->thread_waiting_for_vfork_done
= nullptr;
1683 /* If ON, and the architecture supports it, GDB will use displaced
1684 stepping to step over breakpoints. If OFF, or if the architecture
1685 doesn't support it, GDB will instead use the traditional
1686 hold-and-step approach. If AUTO (which is the default), GDB will
1687 decide which technique to use to step over breakpoints depending on
1688 whether the target works in a non-stop way (see use_displaced_stepping). */
1690 static enum auto_boolean can_use_displaced_stepping
= AUTO_BOOLEAN_AUTO
;
1693 show_can_use_displaced_stepping (struct ui_file
*file
, int from_tty
,
1694 struct cmd_list_element
*c
,
1697 if (can_use_displaced_stepping
== AUTO_BOOLEAN_AUTO
)
1699 _("Debugger's willingness to use displaced stepping "
1700 "to step over breakpoints is %s (currently %s).\n"),
1701 value
, target_is_non_stop_p () ? "on" : "off");
1704 _("Debugger's willingness to use displaced stepping "
1705 "to step over breakpoints is %s.\n"), value
);
1708 /* Return true if the target behind THREAD supports displaced stepping. */
1711 target_supports_displaced_stepping (thread_info
*thread
)
1713 return thread
->inf
->top_target ()->supports_displaced_step (thread
);
1716 /* Return non-zero if displaced stepping can/should be used to step
1717 over breakpoints of thread TP. */
1720 use_displaced_stepping (thread_info
*tp
)
1722 /* If the user disabled it explicitly, don't use displaced stepping. */
1723 if (can_use_displaced_stepping
== AUTO_BOOLEAN_FALSE
)
1726 /* If "auto", only use displaced stepping if the target operates in a non-stop
1728 if (can_use_displaced_stepping
== AUTO_BOOLEAN_AUTO
1729 && !target_is_non_stop_p ())
1732 /* If the target doesn't support displaced stepping, don't use it. */
1733 if (!target_supports_displaced_stepping (tp
))
1736 /* If recording, don't use displaced stepping. */
1737 if (find_record_target () != nullptr)
1740 /* If displaced stepping failed before for this inferior, don't bother trying
1742 if (tp
->inf
->displaced_step_state
.failed_before
)
1748 /* Simple function wrapper around displaced_step_thread_state::reset. */
1751 displaced_step_reset (displaced_step_thread_state
*displaced
)
1753 displaced
->reset ();
1756 /* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1757 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1759 using displaced_step_reset_cleanup
= FORWARD_SCOPE_EXIT (displaced_step_reset
);
1761 /* Prepare to single-step, using displaced stepping.
1763 Note that we cannot use displaced stepping when we have a signal to
1764 deliver. If we have a signal to deliver and an instruction to step
1765 over, then after the step, there will be no indication from the
1766 target whether the thread entered a signal handler or ignored the
1767 signal and stepped over the instruction successfully --- both cases
1768 result in a simple SIGTRAP. In the first case we mustn't do a
1769 fixup, and in the second case we must --- but we can't tell which.
1770 Comments in the code for 'random signals' in handle_inferior_event
1771 explain how we handle this case instead.
1773 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1774 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1775 if displaced stepping this thread got queued; or
1776 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1779 static displaced_step_prepare_status
1780 displaced_step_prepare_throw (thread_info
*tp
)
1782 regcache
*regcache
= get_thread_regcache (tp
);
1783 struct gdbarch
*gdbarch
= regcache
->arch ();
1784 displaced_step_thread_state
&disp_step_thread_state
1785 = tp
->displaced_step_state
;
1787 /* We should never reach this function if the target does not
1788 support displaced stepping. */
1789 gdb_assert (target_supports_displaced_stepping (tp
));
1791 /* Nor if the thread isn't meant to step over a breakpoint. */
1792 gdb_assert (tp
->control
.trap_expected
);
1794 /* Disable range stepping while executing in the scratch pad. We
1795 want a single-step even if executing the displaced instruction in
1796 the scratch buffer lands within the stepping range (e.g., a
1798 tp
->control
.may_range_step
= 0;
1800 /* We are about to start a displaced step for this thread. If one is already
1801 in progress, something's wrong. */
1802 gdb_assert (!disp_step_thread_state
.in_progress ());
1804 if (tp
->inf
->displaced_step_state
.unavailable
)
1806 /* The gdbarch tells us it's not worth asking to try a prepare because
1807 it is likely that it will return unavailable, so don't bother asking. */
1809 displaced_debug_printf ("deferring step of %s",
1810 tp
->ptid
.to_string ().c_str ());
1812 global_thread_step_over_chain_enqueue (tp
);
1813 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
;
1816 displaced_debug_printf ("displaced-stepping %s now",
1817 tp
->ptid
.to_string ().c_str ());
1819 scoped_restore_current_thread restore_thread
;
1821 switch_to_thread (tp
);
1823 CORE_ADDR original_pc
= regcache_read_pc (regcache
);
1824 CORE_ADDR displaced_pc
;
1826 /* Display the instruction we are going to displaced step. */
1827 if (debug_displaced
)
1829 string_file tmp_stream
;
1830 int dislen
= gdb_print_insn (gdbarch
, original_pc
, &tmp_stream
,
1835 gdb::byte_vector
insn_buf (dislen
);
1836 read_memory (original_pc
, insn_buf
.data (), insn_buf
.size ());
1838 std::string insn_bytes
= bytes_to_string (insn_buf
);
1840 displaced_debug_printf ("original insn %s: %s \t %s",
1841 paddress (gdbarch
, original_pc
),
1842 insn_bytes
.c_str (),
1843 tmp_stream
.string ().c_str ());
1846 displaced_debug_printf ("original insn %s: invalid length: %d",
1847 paddress (gdbarch
, original_pc
), dislen
);
1851 = tp
->inf
->top_target ()->displaced_step_prepare (tp
, displaced_pc
);
1853 if (status
== DISPLACED_STEP_PREPARE_STATUS_CANT
)
1855 displaced_debug_printf ("failed to prepare (%s)",
1856 tp
->ptid
.to_string ().c_str ());
1858 return DISPLACED_STEP_PREPARE_STATUS_CANT
;
1860 else if (status
== DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
)
1862 /* Not enough displaced stepping resources available, defer this
1863 request by placing it the queue. */
1865 displaced_debug_printf ("not enough resources available, "
1866 "deferring step of %s",
1867 tp
->ptid
.to_string ().c_str ());
1869 global_thread_step_over_chain_enqueue (tp
);
1871 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
;
1874 gdb_assert (status
== DISPLACED_STEP_PREPARE_STATUS_OK
);
1876 /* Save the information we need to fix things up if the step
1878 disp_step_thread_state
.set (gdbarch
);
1880 tp
->inf
->displaced_step_state
.in_progress_count
++;
1882 displaced_debug_printf ("prepared successfully thread=%s, "
1883 "original_pc=%s, displaced_pc=%s",
1884 tp
->ptid
.to_string ().c_str (),
1885 paddress (gdbarch
, original_pc
),
1886 paddress (gdbarch
, displaced_pc
));
1888 /* Display the new displaced instruction(s). */
1889 if (debug_displaced
)
1891 string_file tmp_stream
;
1892 CORE_ADDR addr
= displaced_pc
;
1894 /* If displaced stepping is going to use h/w single step then we know
1895 that the replacement instruction can only be a single instruction,
1896 in that case set the end address at the next byte.
1898 Otherwise the displaced stepping copy instruction routine could
1899 have generated multiple instructions, and all we know is that they
1900 must fit within the LEN bytes of the buffer. */
1902 = addr
+ (gdbarch_displaced_step_hw_singlestep (gdbarch
)
1903 ? 1 : gdbarch_displaced_step_buffer_length (gdbarch
));
1907 int dislen
= gdb_print_insn (gdbarch
, addr
, &tmp_stream
, nullptr);
1910 displaced_debug_printf
1911 ("replacement insn %s: invalid length: %d",
1912 paddress (gdbarch
, addr
), dislen
);
1916 gdb::byte_vector
insn_buf (dislen
);
1917 read_memory (addr
, insn_buf
.data (), insn_buf
.size ());
1919 std::string insn_bytes
= bytes_to_string (insn_buf
);
1920 std::string insn_str
= tmp_stream
.release ();
1921 displaced_debug_printf ("replacement insn %s: %s \t %s",
1922 paddress (gdbarch
, addr
),
1923 insn_bytes
.c_str (),
1929 return DISPLACED_STEP_PREPARE_STATUS_OK
;
1932 /* Wrapper for displaced_step_prepare_throw that disabled further
1933 attempts at displaced stepping if we get a memory error. */
1935 static displaced_step_prepare_status
1936 displaced_step_prepare (thread_info
*thread
)
1938 displaced_step_prepare_status status
1939 = DISPLACED_STEP_PREPARE_STATUS_CANT
;
1943 status
= displaced_step_prepare_throw (thread
);
1945 catch (const gdb_exception_error
&ex
)
1947 if (ex
.error
!= MEMORY_ERROR
1948 && ex
.error
!= NOT_SUPPORTED_ERROR
)
1951 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1954 /* Be verbose if "set displaced-stepping" is "on", silent if
1956 if (can_use_displaced_stepping
== AUTO_BOOLEAN_TRUE
)
1958 warning (_("disabling displaced stepping: %s"),
1962 /* Disable further displaced stepping attempts. */
1963 thread
->inf
->displaced_step_state
.failed_before
= 1;
1969 /* True if any thread of TARGET that matches RESUME_PTID requires
1970 target_thread_events enabled. This assumes TARGET does not support
1971 target thread options. */
1974 any_thread_needs_target_thread_events (process_stratum_target
*target
,
1977 for (thread_info
*tp
: all_non_exited_threads (target
, resume_ptid
))
1978 if (displaced_step_in_progress_thread (tp
)
1979 || schedlock_applies (tp
)
1980 || tp
->thread_fsm () != nullptr)
1985 /* Maybe disable thread-{cloned,created,exited} event reporting after
1986 a step-over (either in-line or displaced) finishes. */
1989 update_thread_events_after_step_over (thread_info
*event_thread
,
1990 const target_waitstatus
&event_status
)
1992 if (schedlock_applies (event_thread
))
1994 /* If scheduler-locking applies, continue reporting
1995 thread-created/thread-cloned events. */
1998 else if (target_supports_set_thread_options (0))
2000 /* We can control per-thread options. Disable events for the
2001 event thread, unless the thread is gone. */
2002 if (event_status
.kind () != TARGET_WAITKIND_THREAD_EXITED
)
2003 event_thread
->set_thread_options (0);
2007 /* We can only control the target-wide target_thread_events
2008 setting. Disable it, but only if other threads in the target
2009 don't need it enabled. */
2010 process_stratum_target
*target
= event_thread
->inf
->process_target ();
2011 if (!any_thread_needs_target_thread_events (target
, minus_one_ptid
))
2012 target_thread_events (false);
2016 /* If we displaced stepped an instruction successfully, adjust registers and
2017 memory to yield the same effect the instruction would have had if we had
2018 executed it at its original address, and return
2019 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
2020 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
2022 If the thread wasn't displaced stepping, return
2023 DISPLACED_STEP_FINISH_STATUS_OK as well. */
2025 static displaced_step_finish_status
2026 displaced_step_finish (thread_info
*event_thread
,
2027 const target_waitstatus
&event_status
)
2029 /* Check whether the parent is displaced stepping. */
2030 inferior
*parent_inf
= event_thread
->inf
;
2031 target_ops
*top_target
= parent_inf
->top_target ();
2033 /* If this was a fork/vfork/clone, this event indicates that the
2034 displaced stepping of the syscall instruction has been done, so
2035 we perform cleanup for parent here. Also note that this
2036 operation also cleans up the child for vfork, because their pages
2039 /* If this is a fork (child gets its own address space copy) and
2040 some displaced step buffers were in use at the time of the fork,
2041 restore the displaced step buffer bytes in the child process.
2043 Architectures which support displaced stepping and fork events
2044 must supply an implementation of
2045 gdbarch_displaced_step_restore_all_in_ptid. This is not enforced
2046 during gdbarch validation to support architectures which support
2047 displaced stepping but not forks. */
2048 if (event_status
.kind () == TARGET_WAITKIND_FORKED
2049 && target_supports_displaced_stepping (event_thread
))
2050 top_target
->displaced_step_restore_all_in_ptid
2051 (parent_inf
, event_status
.child_ptid ());
2053 displaced_step_thread_state
*displaced
= &event_thread
->displaced_step_state
;
2055 /* Was this thread performing a displaced step? */
2056 if (!displaced
->in_progress ())
2057 return DISPLACED_STEP_FINISH_STATUS_OK
;
2059 update_thread_events_after_step_over (event_thread
, event_status
);
2061 gdb_assert (event_thread
->inf
->displaced_step_state
.in_progress_count
> 0);
2062 event_thread
->inf
->displaced_step_state
.in_progress_count
--;
2064 /* Fixup may need to read memory/registers. Switch to the thread
2065 that we're fixing up. Also, target_stopped_by_watchpoint checks
2066 the current thread, and displaced_step_restore performs ptid-dependent
2067 memory accesses using current_inferior(). */
2068 switch_to_thread (event_thread
);
2070 displaced_step_reset_cleanup
cleanup (displaced
);
2072 /* Do the fixup, and release the resources acquired to do the displaced
2074 auto status
= top_target
->displaced_step_finish (event_thread
, event_status
);
2076 if (event_status
.kind () == TARGET_WAITKIND_FORKED
2077 || event_status
.kind () == TARGET_WAITKIND_VFORKED
2078 || event_status
.kind () == TARGET_WAITKIND_THREAD_CLONED
)
2080 /* Since the vfork/fork/clone syscall instruction was executed
2081 in the scratchpad, the child's PC is also within the
2082 scratchpad. Set the child's PC to the parent's PC value,
2083 which has already been fixed up. Note: we use the parent's
2084 aspace here, although we're touching the child, because the
2085 child hasn't been added to the inferior list yet at this
2088 struct regcache
*parent_regcache
= get_thread_regcache (event_thread
);
2089 struct gdbarch
*gdbarch
= parent_regcache
->arch ();
2090 struct regcache
*child_regcache
2091 = get_thread_arch_regcache (parent_inf
, event_status
.child_ptid (),
2093 /* Read PC value of parent. */
2094 CORE_ADDR parent_pc
= regcache_read_pc (parent_regcache
);
2096 displaced_debug_printf ("write child pc from %s to %s",
2098 regcache_read_pc (child_regcache
)),
2099 paddress (gdbarch
, parent_pc
));
2101 regcache_write_pc (child_regcache
, parent_pc
);
2107 /* Data to be passed around while handling an event. This data is
2108 discarded between events. */
2109 struct execution_control_state
2111 explicit execution_control_state (thread_info
*thr
= nullptr)
2112 : ptid (thr
== nullptr ? null_ptid
: thr
->ptid
),
2117 process_stratum_target
*target
= nullptr;
2119 /* The thread that got the event, if this was a thread event; NULL
2121 struct thread_info
*event_thread
;
2123 struct target_waitstatus ws
;
2124 int stop_func_filled_in
= 0;
2125 CORE_ADDR stop_func_alt_start
= 0;
2126 CORE_ADDR stop_func_start
= 0;
2127 CORE_ADDR stop_func_end
= 0;
2128 const char *stop_func_name
= nullptr;
2129 int wait_some_more
= 0;
2131 /* True if the event thread hit the single-step breakpoint of
2132 another thread. Thus the event doesn't cause a stop, the thread
2133 needs to be single-stepped past the single-step breakpoint before
2134 we can switch back to the original stepping thread. */
2135 int hit_singlestep_breakpoint
= 0;
2138 static void keep_going_pass_signal (struct execution_control_state
*ecs
);
2139 static void prepare_to_wait (struct execution_control_state
*ecs
);
2140 static bool keep_going_stepped_thread (struct thread_info
*tp
);
2141 static step_over_what
thread_still_needs_step_over (struct thread_info
*tp
);
2143 /* Are there any pending step-over requests? If so, run all we can
2144 now and return true. Otherwise, return false. */
2147 start_step_over (void)
2149 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
2151 /* Don't start a new step-over if we already have an in-line
2152 step-over operation ongoing. */
2153 if (step_over_info_valid_p ())
2156 /* Steal the global thread step over chain. As we try to initiate displaced
2157 steps, threads will be enqueued in the global chain if no buffers are
2158 available. If we iterated on the global chain directly, we might iterate
2160 thread_step_over_list threads_to_step
2161 = std::move (global_thread_step_over_list
);
2163 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
2164 thread_step_over_chain_length (threads_to_step
));
2166 bool started
= false;
2168 /* On scope exit (whatever the reason, return or exception), if there are
2169 threads left in the THREADS_TO_STEP chain, put back these threads in the
2173 if (threads_to_step
.empty ())
2174 infrun_debug_printf ("step-over queue now empty");
2177 infrun_debug_printf ("putting back %d threads to step in global queue",
2178 thread_step_over_chain_length (threads_to_step
));
2180 global_thread_step_over_chain_enqueue_chain
2181 (std::move (threads_to_step
));
2185 thread_step_over_list_safe_range range
2186 = make_thread_step_over_list_safe_range (threads_to_step
);
2188 for (thread_info
*tp
: range
)
2190 step_over_what step_what
;
2191 int must_be_in_line
;
2193 gdb_assert (!tp
->stop_requested
);
2195 if (tp
->inf
->displaced_step_state
.unavailable
)
2197 /* The arch told us to not even try preparing another displaced step
2198 for this inferior. Just leave the thread in THREADS_TO_STEP, it
2199 will get moved to the global chain on scope exit. */
2203 if (tp
->inf
->thread_waiting_for_vfork_done
!= nullptr)
2205 /* When we stop all threads, handling a vfork, any thread in the step
2206 over chain remains there. A user could also try to continue a
2207 thread stopped at a breakpoint while another thread is waiting for
2208 a vfork-done event. In any case, we don't want to start a step
2213 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
2214 while we try to prepare the displaced step, we don't add it back to
2215 the global step over chain. This is to avoid a thread staying in the
2216 step over chain indefinitely if something goes wrong when resuming it
2217 If the error is intermittent and it still needs a step over, it will
2218 get enqueued again when we try to resume it normally. */
2219 threads_to_step
.erase (threads_to_step
.iterator_to (*tp
));
2221 step_what
= thread_still_needs_step_over (tp
);
2222 must_be_in_line
= ((step_what
& STEP_OVER_WATCHPOINT
)
2223 || ((step_what
& STEP_OVER_BREAKPOINT
)
2224 && !use_displaced_stepping (tp
)));
2226 /* We currently stop all threads of all processes to step-over
2227 in-line. If we need to start a new in-line step-over, let
2228 any pending displaced steps finish first. */
2229 if (must_be_in_line
&& displaced_step_in_progress_any_thread ())
2231 global_thread_step_over_chain_enqueue (tp
);
2235 if (tp
->control
.trap_expected
2237 || tp
->executing ())
2239 internal_error ("[%s] has inconsistent state: "
2240 "trap_expected=%d, resumed=%d, executing=%d\n",
2241 tp
->ptid
.to_string ().c_str (),
2242 tp
->control
.trap_expected
,
2247 infrun_debug_printf ("resuming [%s] for step-over",
2248 tp
->ptid
.to_string ().c_str ());
2250 /* keep_going_pass_signal skips the step-over if the breakpoint
2251 is no longer inserted. In all-stop, we want to keep looking
2252 for a thread that needs a step-over instead of resuming TP,
2253 because we wouldn't be able to resume anything else until the
2254 target stops again. In non-stop, the resume always resumes
2255 only TP, so it's OK to let the thread resume freely. */
2256 if (!target_is_non_stop_p () && !step_what
)
2259 switch_to_thread (tp
);
2260 execution_control_state
ecs (tp
);
2261 keep_going_pass_signal (&ecs
);
2263 if (!ecs
.wait_some_more
)
2264 error (_("Command aborted."));
2266 /* If the thread's step over could not be initiated because no buffers
2267 were available, it was re-added to the global step over chain. */
2270 infrun_debug_printf ("[%s] was resumed.",
2271 tp
->ptid
.to_string ().c_str ());
2272 gdb_assert (!thread_is_in_step_over_chain (tp
));
2276 infrun_debug_printf ("[%s] was NOT resumed.",
2277 tp
->ptid
.to_string ().c_str ());
2278 gdb_assert (thread_is_in_step_over_chain (tp
));
2281 /* If we started a new in-line step-over, we're done. */
2282 if (step_over_info_valid_p ())
2284 gdb_assert (tp
->control
.trap_expected
);
2289 if (!target_is_non_stop_p ())
2291 /* On all-stop, shouldn't have resumed unless we needed a
2293 gdb_assert (tp
->control
.trap_expected
2294 || tp
->step_after_step_resume_breakpoint
);
2296 /* With remote targets (at least), in all-stop, we can't
2297 issue any further remote commands until the program stops
2303 /* Either the thread no longer needed a step-over, or a new
2304 displaced stepping sequence started. Even in the latter
2305 case, continue looking. Maybe we can also start another
2306 displaced step on a thread of other process. */
2312 /* Update global variables holding ptids to hold NEW_PTID if they were
2313 holding OLD_PTID. */
2315 infrun_thread_ptid_changed (process_stratum_target
*target
,
2316 ptid_t old_ptid
, ptid_t new_ptid
)
2318 if (inferior_ptid
== old_ptid
2319 && current_inferior ()->process_target () == target
)
2320 inferior_ptid
= new_ptid
;
2325 static const char schedlock_off
[] = "off";
2326 static const char schedlock_on
[] = "on";
2327 static const char schedlock_step
[] = "step";
2328 static const char schedlock_replay
[] = "replay";
2329 static const char *const scheduler_enums
[] = {
2336 static const char *scheduler_mode
= schedlock_replay
;
2338 show_scheduler_mode (struct ui_file
*file
, int from_tty
,
2339 struct cmd_list_element
*c
, const char *value
)
2342 _("Mode for locking scheduler "
2343 "during execution is \"%s\".\n"),
2348 set_schedlock_func (const char *args
, int from_tty
, struct cmd_list_element
*c
)
2350 if (!target_can_lock_scheduler ())
2352 scheduler_mode
= schedlock_off
;
2353 error (_("Target '%s' cannot support this command."),
2354 target_shortname ());
2358 /* True if execution commands resume all threads of all processes by
2359 default; otherwise, resume only threads of the current inferior
2361 bool sched_multi
= false;
2363 /* Try to setup for software single stepping. Return true if target_resume()
2364 should use hardware single step.
2366 GDBARCH the current gdbarch. */
2369 maybe_software_singlestep (struct gdbarch
*gdbarch
)
2371 bool hw_step
= true;
2373 if (execution_direction
== EXEC_FORWARD
2374 && gdbarch_software_single_step_p (gdbarch
))
2375 hw_step
= !insert_single_step_breakpoints (gdbarch
);
2383 user_visible_resume_ptid (int step
)
2389 /* With non-stop mode on, threads are always handled
2391 resume_ptid
= inferior_ptid
;
2393 else if ((scheduler_mode
== schedlock_on
)
2394 || (scheduler_mode
== schedlock_step
&& step
))
2396 /* User-settable 'scheduler' mode requires solo thread
2398 resume_ptid
= inferior_ptid
;
2400 else if ((scheduler_mode
== schedlock_replay
)
2401 && target_record_will_replay (minus_one_ptid
, execution_direction
))
2403 /* User-settable 'scheduler' mode requires solo thread resume in replay
2405 resume_ptid
= inferior_ptid
;
2407 else if (inferior_ptid
!= null_ptid
2408 && inferior_thread ()->control
.in_cond_eval
)
2410 /* The inferior thread is evaluating a BP condition. Other threads
2411 might be stopped or running and we do not want to change their
2412 state, thus, resume only the current thread. */
2413 resume_ptid
= inferior_ptid
;
2415 else if (!sched_multi
&& target_supports_multi_process ())
2417 /* Resume all threads of the current process (and none of other
2419 resume_ptid
= ptid_t (inferior_ptid
.pid ());
2423 /* Resume all threads of all processes. */
2424 resume_ptid
= RESUME_ALL
;
2432 process_stratum_target
*
2433 user_visible_resume_target (ptid_t resume_ptid
)
2435 return (resume_ptid
== minus_one_ptid
&& sched_multi
2437 : current_inferior ()->process_target ());
2440 /* Find a thread from the inferiors that we'll resume that is waiting
2441 for a vfork-done event. */
2443 static thread_info
*
2444 find_thread_waiting_for_vfork_done ()
2446 gdb_assert (!target_is_non_stop_p ());
2450 for (inferior
*inf
: all_non_exited_inferiors ())
2451 if (inf
->thread_waiting_for_vfork_done
!= nullptr)
2452 return inf
->thread_waiting_for_vfork_done
;
2456 inferior
*cur_inf
= current_inferior ();
2457 if (cur_inf
->thread_waiting_for_vfork_done
!= nullptr)
2458 return cur_inf
->thread_waiting_for_vfork_done
;
2463 /* Return a ptid representing the set of threads that we will resume,
2464 in the perspective of the target, assuming run control handling
2465 does not require leaving some threads stopped (e.g., stepping past
2466 breakpoint). USER_STEP indicates whether we're about to start the
2467 target for a stepping command. */
2470 internal_resume_ptid (int user_step
)
2472 /* In non-stop, we always control threads individually. Note that
2473 the target may always work in non-stop mode even with "set
2474 non-stop off", in which case user_visible_resume_ptid could
2475 return a wildcard ptid. */
2476 if (target_is_non_stop_p ())
2477 return inferior_ptid
;
2479 /* The rest of the function assumes non-stop==off and
2480 target-non-stop==off.
2482 If a thread is waiting for a vfork-done event, it means breakpoints are out
2483 for this inferior (well, program space in fact). We don't want to resume
2484 any thread other than the one waiting for vfork done, otherwise these other
2485 threads could miss breakpoints. So if a thread in the resumption set is
2486 waiting for a vfork-done event, resume only that thread.
2488 The resumption set width depends on whether schedule-multiple is on or off.
2490 Note that if the target_resume interface was more flexible, we could be
2491 smarter here when schedule-multiple is on. For example, imagine 3
2492 inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
2493 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
2494 target(s) to resume:
2496 - All threads of inferior 1
2500 Since we don't have that flexibility (we can only pass one ptid), just
2501 resume the first thread waiting for a vfork-done event we find (e.g. thread
2503 thread_info
*thr
= find_thread_waiting_for_vfork_done ();
2506 /* If we have a thread that is waiting for a vfork-done event,
2507 then we should have switched to it earlier. Calling
2508 target_resume with thread scope is only possible when the
2509 current thread matches the thread scope. */
2510 gdb_assert (thr
->ptid
== inferior_ptid
);
2511 gdb_assert (thr
->inf
->process_target ()
2512 == inferior_thread ()->inf
->process_target ());
2516 return user_visible_resume_ptid (user_step
);
2519 /* Wrapper for target_resume, that handles infrun-specific
2523 do_target_resume (ptid_t resume_ptid
, bool step
, enum gdb_signal sig
)
2525 struct thread_info
*tp
= inferior_thread ();
2527 gdb_assert (!tp
->stop_requested
);
2529 /* Install inferior's terminal modes. */
2530 target_terminal::inferior ();
2532 /* Avoid confusing the next resume, if the next stop/resume
2533 happens to apply to another thread. */
2534 tp
->set_stop_signal (GDB_SIGNAL_0
);
2536 /* Advise target which signals may be handled silently.
2538 If we have removed breakpoints because we are stepping over one
2539 in-line (in any thread), we need to receive all signals to avoid
2540 accidentally skipping a breakpoint during execution of a signal
2543 Likewise if we're displaced stepping, otherwise a trap for a
2544 breakpoint in a signal handler might be confused with the
2545 displaced step finishing. We don't make the displaced_step_finish
2546 step distinguish the cases instead, because:
2548 - a backtrace while stopped in the signal handler would show the
2549 scratch pad as frame older than the signal handler, instead of
2550 the real mainline code.
2552 - when the thread is later resumed, the signal handler would
2553 return to the scratch pad area, which would no longer be
2555 if (step_over_info_valid_p ()
2556 || displaced_step_in_progress (tp
->inf
))
2557 target_pass_signals ({});
2559 target_pass_signals (signal_pass
);
2561 /* Request that the target report thread-{created,cloned,exited}
2562 events in the following situations:
2564 - If we are performing an in-line step-over-breakpoint, then we
2565 will remove a breakpoint from the target and only run the
2566 current thread. We don't want any new thread (spawned by the
2567 step) to start running, as it might miss the breakpoint. We
2568 need to clear the step-over state if the stepped thread exits,
2569 so we also enable thread-exit events.
2571 - If we are stepping over a breakpoint out of line (displaced
2572 stepping) then we won't remove a breakpoint from the target,
2573 but, if the step spawns a new clone thread, then we will need
2574 to fixup the $pc address in the clone child too, so we need it
2575 to start stopped. We need to release the displaced stepping
2576 buffer if the stepped thread exits, so we also enable
2579 - If scheduler-locking applies, threads that the current thread
2580 spawns should remain halted. It's not strictly necessary to
2581 enable thread-exit events in this case, but it doesn't hurt.
2583 if (step_over_info_valid_p ()
2584 || displaced_step_in_progress_thread (tp
)
2585 || schedlock_applies (tp
))
2587 gdb_thread_options options
2588 = GDB_THREAD_OPTION_CLONE
| GDB_THREAD_OPTION_EXIT
;
2589 if (target_supports_set_thread_options (options
))
2590 tp
->set_thread_options (options
);
2592 target_thread_events (true);
2594 else if (tp
->thread_fsm () != nullptr)
2596 gdb_thread_options options
= GDB_THREAD_OPTION_EXIT
;
2597 if (target_supports_set_thread_options (options
))
2598 tp
->set_thread_options (options
);
2600 target_thread_events (true);
2604 if (target_supports_set_thread_options (0))
2605 tp
->set_thread_options (0);
2608 process_stratum_target
*resume_target
= tp
->inf
->process_target ();
2609 if (!any_thread_needs_target_thread_events (resume_target
,
2611 target_thread_events (false);
2615 /* If we're resuming more than one thread simultaneously, then any
2616 thread other than the leader is being set to run free. Clear any
2617 previous thread option for those threads. */
2618 if (resume_ptid
!= inferior_ptid
&& target_supports_set_thread_options (0))
2620 process_stratum_target
*resume_target
= tp
->inf
->process_target ();
2621 for (thread_info
*thr_iter
: all_non_exited_threads (resume_target
,
2624 thr_iter
->set_thread_options (0);
2627 infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
2628 resume_ptid
.to_string ().c_str (),
2629 step
, gdb_signal_to_symbol_string (sig
));
2631 target_resume (resume_ptid
, step
, sig
);
2634 /* Resume the inferior. SIG is the signal to give the inferior
2635 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2636 call 'resume', which handles exceptions. */
2639 resume_1 (enum gdb_signal sig
)
2641 struct thread_info
*tp
= inferior_thread ();
2642 regcache
*regcache
= get_thread_regcache (tp
);
2643 struct gdbarch
*gdbarch
= regcache
->arch ();
2645 /* This represents the user's step vs continue request. When
2646 deciding whether "set scheduler-locking step" applies, it's the
2647 user's intention that counts. */
2648 const int user_step
= tp
->control
.stepping_command
;
2649 /* This represents what we'll actually request the target to do.
2650 This can decay from a step to a continue, if e.g., we need to
2651 implement single-stepping with breakpoints (software
2655 gdb_assert (!tp
->stop_requested
);
2656 gdb_assert (!thread_is_in_step_over_chain (tp
));
2658 if (tp
->has_pending_waitstatus ())
2661 ("thread %s has pending wait "
2662 "status %s (currently_stepping=%d).",
2663 tp
->ptid
.to_string ().c_str (),
2664 tp
->pending_waitstatus ().to_string ().c_str (),
2665 currently_stepping (tp
));
2667 tp
->inf
->process_target ()->threads_executing
= true;
2668 tp
->set_resumed (true);
2670 /* FIXME: What should we do if we are supposed to resume this
2671 thread with a signal? Maybe we should maintain a queue of
2672 pending signals to deliver. */
2673 if (sig
!= GDB_SIGNAL_0
)
2675 warning (_("Couldn't deliver signal %s to %s."),
2676 gdb_signal_to_name (sig
),
2677 tp
->ptid
.to_string ().c_str ());
2680 tp
->set_stop_signal (GDB_SIGNAL_0
);
2682 if (target_can_async_p ())
2684 target_async (true);
2685 /* Tell the event loop we have an event to process. */
2686 mark_async_event_handler (infrun_async_inferior_event_token
);
2691 tp
->stepped_breakpoint
= 0;
2693 /* Depends on stepped_breakpoint. */
2694 step
= currently_stepping (tp
);
2696 if (current_inferior ()->thread_waiting_for_vfork_done
!= nullptr)
2698 /* Don't try to single-step a vfork parent that is waiting for
2699 the child to get out of the shared memory region (by exec'ing
2700 or exiting). This is particularly important on software
2701 single-step archs, as the child process would trip on the
2702 software single step breakpoint inserted for the parent
2703 process. Since the parent will not actually execute any
2704 instruction until the child is out of the shared region (such
2705 are vfork's semantics), it is safe to simply continue it.
2706 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2707 the parent, and tell it to `keep_going', which automatically
2708 re-sets it stepping. */
2709 infrun_debug_printf ("resume : clear step");
2713 CORE_ADDR pc
= regcache_read_pc (regcache
);
2715 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2716 "current thread [%s] at %s",
2717 step
, gdb_signal_to_symbol_string (sig
),
2718 tp
->control
.trap_expected
,
2719 inferior_ptid
.to_string ().c_str (),
2720 paddress (gdbarch
, pc
));
2722 const address_space
*aspace
= tp
->inf
->aspace
.get ();
2724 /* Normally, by the time we reach `resume', the breakpoints are either
2725 removed or inserted, as appropriate. The exception is if we're sitting
2726 at a permanent breakpoint; we need to step over it, but permanent
2727 breakpoints can't be removed. So we have to test for it here. */
2728 if (breakpoint_here_p (aspace
, pc
) == permanent_breakpoint_here
)
2730 if (sig
!= GDB_SIGNAL_0
)
2732 /* We have a signal to pass to the inferior. The resume
2733 may, or may not take us to the signal handler. If this
2734 is a step, we'll need to stop in the signal handler, if
2735 there's one, (if the target supports stepping into
2736 handlers), or in the next mainline instruction, if
2737 there's no handler. If this is a continue, we need to be
2738 sure to run the handler with all breakpoints inserted.
2739 In all cases, set a breakpoint at the current address
2740 (where the handler returns to), and once that breakpoint
2741 is hit, resume skipping the permanent breakpoint. If
2742 that breakpoint isn't hit, then we've stepped into the
2743 signal handler (or hit some other event). We'll delete
2744 the step-resume breakpoint then. */
2746 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2747 "deliver signal first");
2749 clear_step_over_info ();
2750 tp
->control
.trap_expected
= 0;
2752 if (tp
->control
.step_resume_breakpoint
== nullptr)
2754 /* Set a "high-priority" step-resume, as we don't want
2755 user breakpoints at PC to trigger (again) when this
2757 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2758 gdb_assert (tp
->control
.step_resume_breakpoint
->first_loc ()
2761 tp
->step_after_step_resume_breakpoint
= step
;
2764 insert_breakpoints ();
2768 /* There's no signal to pass, we can go ahead and skip the
2769 permanent breakpoint manually. */
2770 infrun_debug_printf ("skipping permanent breakpoint");
2771 gdbarch_skip_permanent_breakpoint (gdbarch
, regcache
);
2772 /* Update pc to reflect the new address from which we will
2773 execute instructions. */
2774 pc
= regcache_read_pc (regcache
);
2778 /* We've already advanced the PC, so the stepping part
2779 is done. Now we need to arrange for a trap to be
2780 reported to handle_inferior_event. Set a breakpoint
2781 at the current PC, and run to it. Don't update
2782 prev_pc, because if we end in
2783 switch_back_to_stepped_thread, we want the "expected
2784 thread advanced also" branch to be taken. IOW, we
2785 don't want this thread to step further from PC
2787 gdb_assert (!step_over_info_valid_p ());
2788 insert_single_step_breakpoint (gdbarch
, aspace
, pc
);
2789 insert_breakpoints ();
2791 resume_ptid
= internal_resume_ptid (user_step
);
2792 do_target_resume (resume_ptid
, false, GDB_SIGNAL_0
);
2793 tp
->set_resumed (true);
2799 /* If we have a breakpoint to step over, make sure to do a single
2800 step only. Same if we have software watchpoints. */
2801 if (tp
->control
.trap_expected
|| bpstat_should_step ())
2802 tp
->control
.may_range_step
= 0;
2804 /* If displaced stepping is enabled, step over breakpoints by executing a
2805 copy of the instruction at a different address.
2807 We can't use displaced stepping when we have a signal to deliver;
2808 the comments for displaced_step_prepare explain why. The
2809 comments in the handle_inferior event for dealing with 'random
2810 signals' explain what we do instead.
2812 We can't use displaced stepping when we are waiting for vfork_done
2813 event, displaced stepping breaks the vfork child similarly as single
2814 step software breakpoint. */
2815 if (tp
->control
.trap_expected
2816 && use_displaced_stepping (tp
)
2817 && !step_over_info_valid_p ()
2818 && sig
== GDB_SIGNAL_0
2819 && current_inferior ()->thread_waiting_for_vfork_done
== nullptr)
2821 displaced_step_prepare_status prepare_status
2822 = displaced_step_prepare (tp
);
2824 if (prepare_status
== DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
)
2826 infrun_debug_printf ("Got placed in step-over queue");
2828 tp
->control
.trap_expected
= 0;
2831 else if (prepare_status
== DISPLACED_STEP_PREPARE_STATUS_CANT
)
2833 /* Fallback to stepping over the breakpoint in-line. */
2835 if (target_is_non_stop_p ())
2836 stop_all_threads ("displaced stepping falling back on inline stepping");
2838 set_step_over_info (aspace
, regcache_read_pc (regcache
), 0,
2841 step
= maybe_software_singlestep (gdbarch
);
2843 insert_breakpoints ();
2845 else if (prepare_status
== DISPLACED_STEP_PREPARE_STATUS_OK
)
2847 /* Update pc to reflect the new address from which we will
2848 execute instructions due to displaced stepping. */
2849 pc
= regcache_read_pc (get_thread_regcache (tp
));
2851 step
= gdbarch_displaced_step_hw_singlestep (gdbarch
);
2854 gdb_assert_not_reached ("Invalid displaced_step_prepare_status "
2858 /* Do we need to do it the hard way, w/temp breakpoints? */
2860 step
= maybe_software_singlestep (gdbarch
);
2862 /* Currently, our software single-step implementation leads to different
2863 results than hardware single-stepping in one situation: when stepping
2864 into delivering a signal which has an associated signal handler,
2865 hardware single-step will stop at the first instruction of the handler,
2866 while software single-step will simply skip execution of the handler.
2868 For now, this difference in behavior is accepted since there is no
2869 easy way to actually implement single-stepping into a signal handler
2870 without kernel support.
2872 However, there is one scenario where this difference leads to follow-on
2873 problems: if we're stepping off a breakpoint by removing all breakpoints
2874 and then single-stepping. In this case, the software single-step
2875 behavior means that even if there is a *breakpoint* in the signal
2876 handler, GDB still would not stop.
2878 Fortunately, we can at least fix this particular issue. We detect
2879 here the case where we are about to deliver a signal while software
2880 single-stepping with breakpoints removed. In this situation, we
2881 revert the decisions to remove all breakpoints and insert single-
2882 step breakpoints, and instead we install a step-resume breakpoint
2883 at the current address, deliver the signal without stepping, and
2884 once we arrive back at the step-resume breakpoint, actually step
2885 over the breakpoint we originally wanted to step over. */
2886 if (thread_has_single_step_breakpoints_set (tp
)
2887 && sig
!= GDB_SIGNAL_0
2888 && step_over_info_valid_p ())
2890 /* If we have nested signals or a pending signal is delivered
2891 immediately after a handler returns, might already have
2892 a step-resume breakpoint set on the earlier handler. We cannot
2893 set another step-resume breakpoint; just continue on until the
2894 original breakpoint is hit. */
2895 if (tp
->control
.step_resume_breakpoint
== nullptr)
2897 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2898 tp
->step_after_step_resume_breakpoint
= 1;
2901 delete_single_step_breakpoints (tp
);
2903 clear_step_over_info ();
2904 tp
->control
.trap_expected
= 0;
2906 insert_breakpoints ();
2909 /* If STEP is set, it's a request to use hardware stepping
2910 facilities. But in that case, we should never
2911 use singlestep breakpoint. */
2912 gdb_assert (!(thread_has_single_step_breakpoints_set (tp
) && step
));
2914 /* Decide the set of threads to ask the target to resume. */
2915 if (tp
->control
.trap_expected
)
2917 /* We're allowing a thread to run past a breakpoint it has
2918 hit, either by single-stepping the thread with the breakpoint
2919 removed, or by displaced stepping, with the breakpoint inserted.
2920 In the former case, we need to single-step only this thread,
2921 and keep others stopped, as they can miss this breakpoint if
2922 allowed to run. That's not really a problem for displaced
2923 stepping, but, we still keep other threads stopped, in case
2924 another thread is also stopped for a breakpoint waiting for
2925 its turn in the displaced stepping queue. */
2926 resume_ptid
= inferior_ptid
;
2929 resume_ptid
= internal_resume_ptid (user_step
);
2931 if (execution_direction
!= EXEC_REVERSE
2932 && step
&& breakpoint_inserted_here_p (aspace
, pc
))
2934 /* There are two cases where we currently need to step a
2935 breakpoint instruction when we have a signal to deliver:
2937 - See handle_signal_stop where we handle random signals that
2938 could take out us out of the stepping range. Normally, in
2939 that case we end up continuing (instead of stepping) over the
2940 signal handler with a breakpoint at PC, but there are cases
2941 where we should _always_ single-step, even if we have a
2942 step-resume breakpoint, like when a software watchpoint is
2943 set. Assuming single-stepping and delivering a signal at the
2944 same time would takes us to the signal handler, then we could
2945 have removed the breakpoint at PC to step over it. However,
2946 some hardware step targets (like e.g., Mac OS) can't step
2947 into signal handlers, and for those, we need to leave the
2948 breakpoint at PC inserted, as otherwise if the handler
2949 recurses and executes PC again, it'll miss the breakpoint.
2950 So we leave the breakpoint inserted anyway, but we need to
2951 record that we tried to step a breakpoint instruction, so
2952 that adjust_pc_after_break doesn't end up confused.
2954 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2955 in one thread after another thread that was stepping had been
2956 momentarily paused for a step-over. When we re-resume the
2957 stepping thread, it may be resumed from that address with a
2958 breakpoint that hasn't trapped yet. Seen with
2959 gdb.threads/non-stop-fair-events.exp, on targets that don't
2960 do displaced stepping. */
2962 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2963 tp
->ptid
.to_string ().c_str ());
2965 tp
->stepped_breakpoint
= 1;
2967 /* Most targets can step a breakpoint instruction, thus
2968 executing it normally. But if this one cannot, just
2969 continue and we will hit it anyway. */
2970 if (gdbarch_cannot_step_breakpoint (gdbarch
))
2974 if (tp
->control
.may_range_step
)
2976 /* If we're resuming a thread with the PC out of the step
2977 range, then we're doing some nested/finer run control
2978 operation, like stepping the thread out of the dynamic
2979 linker or the displaced stepping scratch pad. We
2980 shouldn't have allowed a range step then. */
2981 gdb_assert (pc_in_thread_step_range (pc
, tp
));
2984 do_target_resume (resume_ptid
, step
, sig
);
2985 tp
->set_resumed (true);
2988 /* Resume the inferior. SIG is the signal to give the inferior
2989 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2990 rolls back state on error. */
2993 resume (gdb_signal sig
)
2999 catch (const gdb_exception
&ex
)
3001 /* If resuming is being aborted for any reason, delete any
3002 single-step breakpoint resume_1 may have created, to avoid
3003 confusing the following resumption, and to avoid leaving
3004 single-step breakpoints perturbing other threads, in case
3005 we're running in non-stop mode. */
3006 if (inferior_ptid
!= null_ptid
)
3007 delete_single_step_breakpoints (inferior_thread ());
3017 /* Counter that tracks number of user visible stops. This can be used
3018 to tell whether a command has proceeded the inferior past the
3019 current location. This allows e.g., inferior function calls in
3020 breakpoint commands to not interrupt the command list. When the
3021 call finishes successfully, the inferior is standing at the same
3022 breakpoint as if nothing happened (and so we don't call
3024 static ULONGEST current_stop_id
;
3031 return current_stop_id
;
3034 /* Called when we report a user visible stop. */
3042 /* Clear out all variables saying what to do when inferior is continued.
3043 First do this, then set the ones you want, then call `proceed'. */
3046 clear_proceed_status_thread (struct thread_info
*tp
)
3048 infrun_debug_printf ("%s", tp
->ptid
.to_string ().c_str ());
3050 /* If we're starting a new sequence, then the previous finished
3051 single-step is no longer relevant. */
3052 if (tp
->has_pending_waitstatus ())
3054 if (tp
->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP
)
3056 infrun_debug_printf ("pending event of %s was a finished step. "
3058 tp
->ptid
.to_string ().c_str ());
3060 tp
->clear_pending_waitstatus ();
3061 tp
->set_stop_reason (TARGET_STOPPED_BY_NO_REASON
);
3066 ("thread %s has pending wait status %s (currently_stepping=%d).",
3067 tp
->ptid
.to_string ().c_str (),
3068 tp
->pending_waitstatus ().to_string ().c_str (),
3069 currently_stepping (tp
));
3073 /* If this signal should not be seen by program, give it zero.
3074 Used for debugging signals. */
3075 if (!signal_pass_state (tp
->stop_signal ()))
3076 tp
->set_stop_signal (GDB_SIGNAL_0
);
3078 tp
->release_thread_fsm ();
3080 tp
->control
.trap_expected
= 0;
3081 tp
->control
.step_range_start
= 0;
3082 tp
->control
.step_range_end
= 0;
3083 tp
->control
.may_range_step
= 0;
3084 tp
->control
.step_frame_id
= null_frame_id
;
3085 tp
->control
.step_stack_frame_id
= null_frame_id
;
3086 tp
->control
.step_over_calls
= STEP_OVER_UNDEBUGGABLE
;
3087 tp
->control
.step_start_function
= nullptr;
3088 tp
->stop_requested
= false;
3090 tp
->control
.stop_step
= 0;
3092 tp
->control
.proceed_to_finish
= 0;
3094 tp
->control
.stepping_command
= 0;
3096 /* Discard any remaining commands or status from previous stop. */
3097 bpstat_clear (&tp
->control
.stop_bpstat
);
3100 /* Notify the current interpreter and observers that the target is about to
3104 notify_about_to_proceed ()
3106 top_level_interpreter ()->on_about_to_proceed ();
3107 gdb::observers::about_to_proceed
.notify ();
3111 clear_proceed_status (int step
)
3113 /* With scheduler-locking replay, stop replaying other threads if we're
3114 not replaying the user-visible resume ptid.
3116 This is a convenience feature to not require the user to explicitly
3117 stop replaying the other threads. We're assuming that the user's
3118 intent is to resume tracing the recorded process. */
3119 if (!non_stop
&& scheduler_mode
== schedlock_replay
3120 && target_record_is_replaying (minus_one_ptid
)
3121 && !target_record_will_replay (user_visible_resume_ptid (step
),
3122 execution_direction
))
3123 target_record_stop_replaying ();
3125 if (!non_stop
&& inferior_ptid
!= null_ptid
)
3127 ptid_t resume_ptid
= user_visible_resume_ptid (step
);
3128 process_stratum_target
*resume_target
3129 = user_visible_resume_target (resume_ptid
);
3131 /* In all-stop mode, delete the per-thread status of all threads
3132 we're about to resume, implicitly and explicitly. */
3133 for (thread_info
*tp
: all_non_exited_threads (resume_target
, resume_ptid
))
3134 clear_proceed_status_thread (tp
);
3137 if (inferior_ptid
!= null_ptid
)
3139 struct inferior
*inferior
;
3143 /* If in non-stop mode, only delete the per-thread status of
3144 the current thread. */
3145 clear_proceed_status_thread (inferior_thread ());
3148 inferior
= current_inferior ();
3149 inferior
->control
.stop_soon
= NO_STOP_QUIETLY
;
3152 notify_about_to_proceed ();
3155 /* Returns true if TP is still stopped at a breakpoint that needs
3156 stepping-over in order to make progress. If the breakpoint is gone
3157 meanwhile, we can skip the whole step-over dance. */
3160 thread_still_needs_step_over_bp (struct thread_info
*tp
)
3162 if (tp
->stepping_over_breakpoint
)
3164 struct regcache
*regcache
= get_thread_regcache (tp
);
3166 if (breakpoint_here_p (tp
->inf
->aspace
.get (),
3167 regcache_read_pc (regcache
))
3168 == ordinary_breakpoint_here
)
3171 tp
->stepping_over_breakpoint
= 0;
3177 /* Check whether thread TP still needs to start a step-over in order
3178 to make progress when resumed. Returns an bitwise or of enum
3179 step_over_what bits, indicating what needs to be stepped over. */
3181 static step_over_what
3182 thread_still_needs_step_over (struct thread_info
*tp
)
3184 step_over_what what
= 0;
3186 if (thread_still_needs_step_over_bp (tp
))
3187 what
|= STEP_OVER_BREAKPOINT
;
3189 if (tp
->stepping_over_watchpoint
3190 && !target_have_steppable_watchpoint ())
3191 what
|= STEP_OVER_WATCHPOINT
;
3196 /* Returns true if scheduler locking applies. STEP indicates whether
3197 we're about to do a step/next-like command to a thread. */
3200 schedlock_applies (struct thread_info
*tp
)
3202 return (scheduler_mode
== schedlock_on
3203 || (scheduler_mode
== schedlock_step
3204 && tp
->control
.stepping_command
)
3205 || (scheduler_mode
== schedlock_replay
3206 && target_record_will_replay (minus_one_ptid
,
3207 execution_direction
)));
3210 /* When FORCE_P is false, set process_stratum_target::COMMIT_RESUMED_STATE
3211 in all target stacks that have threads executing and don't have threads
3212 with pending events.
3214 When FORCE_P is true, set process_stratum_target::COMMIT_RESUMED_STATE
3215 in all target stacks that have threads executing regardless of whether
3216 there are pending events or not.
3218 Passing FORCE_P as false makes sense when GDB is going to wait for
3219 events from all threads and will therefore spot the pending events.
3220 However, if GDB is only going to wait for events from select threads
3221 (i.e. when performing an inferior call) then a pending event on some
3222 other thread will not be spotted, and if we fail to commit the resume
3223 state for the thread performing the inferior call, then the inferior
3224 call will never complete (or even start). */
3227 maybe_set_commit_resumed_all_targets (bool force_p
)
3229 scoped_restore_current_thread restore_thread
;
3231 for (inferior
*inf
: all_non_exited_inferiors ())
3233 process_stratum_target
*proc_target
= inf
->process_target ();
3235 if (proc_target
->commit_resumed_state
)
3237 /* We already set this in a previous iteration, via another
3238 inferior sharing the process_stratum target. */
3242 /* If the target has no resumed threads, it would be useless to
3243 ask it to commit the resumed threads. */
3244 if (!proc_target
->threads_executing
)
3246 infrun_debug_printf ("not requesting commit-resumed for target "
3247 "%s, no resumed threads",
3248 proc_target
->shortname ());
3252 /* As an optimization, if a thread from this target has some
3253 status to report, handle it before requiring the target to
3254 commit its resumed threads: handling the status might lead to
3255 resuming more threads. */
3256 if (!force_p
&& proc_target
->has_resumed_with_pending_wait_status ())
3258 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
3259 " thread has a pending waitstatus",
3260 proc_target
->shortname ());
3264 switch_to_inferior_no_thread (inf
);
3266 if (!force_p
&& target_has_pending_events ())
3268 infrun_debug_printf ("not requesting commit-resumed for target %s, "
3269 "target has pending events",
3270 proc_target
->shortname ());
3274 infrun_debug_printf ("enabling commit-resumed for target %s",
3275 proc_target
->shortname ());
3277 proc_target
->commit_resumed_state
= true;
3284 maybe_call_commit_resumed_all_targets ()
3286 scoped_restore_current_thread restore_thread
;
3288 for (inferior
*inf
: all_non_exited_inferiors ())
3290 process_stratum_target
*proc_target
= inf
->process_target ();
3292 if (!proc_target
->commit_resumed_state
)
3295 switch_to_inferior_no_thread (inf
);
3297 infrun_debug_printf ("calling commit_resumed for target %s",
3298 proc_target
->shortname());
3300 target_commit_resumed ();
3304 /* To track nesting of scoped_disable_commit_resumed objects, ensuring
3305 that only the outermost one attempts to re-enable
3307 static bool enable_commit_resumed
= true;
3311 scoped_disable_commit_resumed::scoped_disable_commit_resumed
3312 (const char *reason
)
3313 : m_reason (reason
),
3314 m_prev_enable_commit_resumed (enable_commit_resumed
)
3316 infrun_debug_printf ("reason=%s", m_reason
);
3318 enable_commit_resumed
= false;
3320 for (inferior
*inf
: all_non_exited_inferiors ())
3322 process_stratum_target
*proc_target
= inf
->process_target ();
3324 if (m_prev_enable_commit_resumed
)
3326 /* This is the outermost instance: force all
3327 COMMIT_RESUMED_STATE to false. */
3328 proc_target
->commit_resumed_state
= false;
3332 /* This is not the outermost instance, we expect
3333 COMMIT_RESUMED_STATE to have been cleared by the
3334 outermost instance. */
3335 gdb_assert (!proc_target
->commit_resumed_state
);
3343 scoped_disable_commit_resumed::reset ()
3349 infrun_debug_printf ("reason=%s", m_reason
);
3351 gdb_assert (!enable_commit_resumed
);
3353 enable_commit_resumed
= m_prev_enable_commit_resumed
;
3355 if (m_prev_enable_commit_resumed
)
3357 /* This is the outermost instance, re-enable
3358 COMMIT_RESUMED_STATE on the targets where it's possible. */
3359 maybe_set_commit_resumed_all_targets (false);
3363 /* This is not the outermost instance, we expect
3364 COMMIT_RESUMED_STATE to still be false. */
3365 for (inferior
*inf
: all_non_exited_inferiors ())
3367 process_stratum_target
*proc_target
= inf
->process_target ();
3368 gdb_assert (!proc_target
->commit_resumed_state
);
3375 scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
3383 scoped_disable_commit_resumed::reset_and_commit ()
3386 maybe_call_commit_resumed_all_targets ();
3391 scoped_enable_commit_resumed::scoped_enable_commit_resumed
3392 (const char *reason
, bool force_p
)
3393 : m_reason (reason
),
3394 m_prev_enable_commit_resumed (enable_commit_resumed
)
3396 infrun_debug_printf ("reason=%s", m_reason
);
3398 if (!enable_commit_resumed
)
3400 enable_commit_resumed
= true;
3402 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
3404 maybe_set_commit_resumed_all_targets (force_p
);
3406 maybe_call_commit_resumed_all_targets ();
3412 scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
3414 infrun_debug_printf ("reason=%s", m_reason
);
3416 gdb_assert (enable_commit_resumed
);
3418 enable_commit_resumed
= m_prev_enable_commit_resumed
;
3420 if (!enable_commit_resumed
)
3422 /* Force all COMMIT_RESUMED_STATE back to false. */
3423 for (inferior
*inf
: all_non_exited_inferiors ())
3425 process_stratum_target
*proc_target
= inf
->process_target ();
3426 proc_target
->commit_resumed_state
= false;
3431 /* Check that all the targets we're about to resume are in non-stop
3432 mode. Ideally, we'd only care whether all targets support
3433 target-async, but we're not there yet. E.g., stop_all_threads
3434 doesn't know how to handle all-stop targets. Also, the remote
3435 protocol in all-stop mode is synchronous, irrespective of
3436 target-async, which means that things like a breakpoint re-set
3437 triggered by one target would try to read memory from all targets
3441 check_multi_target_resumption (process_stratum_target
*resume_target
)
3443 if (!non_stop
&& resume_target
== nullptr)
3445 scoped_restore_current_thread restore_thread
;
3447 /* This is used to track whether we're resuming more than one
3449 process_stratum_target
*first_connection
= nullptr;
3451 /* The first inferior we see with a target that does not work in
3452 always-non-stop mode. */
3453 inferior
*first_not_non_stop
= nullptr;
3455 for (inferior
*inf
: all_non_exited_inferiors ())
3457 switch_to_inferior_no_thread (inf
);
3459 if (!target_has_execution ())
3462 process_stratum_target
*proc_target
3463 = current_inferior ()->process_target();
3465 if (!target_is_non_stop_p ())
3466 first_not_non_stop
= inf
;
3468 if (first_connection
== nullptr)
3469 first_connection
= proc_target
;
3470 else if (first_connection
!= proc_target
3471 && first_not_non_stop
!= nullptr)
3473 switch_to_inferior_no_thread (first_not_non_stop
);
3475 proc_target
= current_inferior ()->process_target();
3477 error (_("Connection %d (%s) does not support "
3478 "multi-target resumption."),
3479 proc_target
->connection_number
,
3480 make_target_connection_string (proc_target
).c_str ());
3486 /* Helper function for `proceed`. Check if thread TP is suitable for
3487 resuming, and, if it is, switch to the thread and call
3488 `keep_going_pass_signal`. If TP is not suitable for resuming then this
3489 function will just return without switching threads. */
3492 proceed_resume_thread_checked (thread_info
*tp
)
3494 if (!tp
->inf
->has_execution ())
3496 infrun_debug_printf ("[%s] target has no execution",
3497 tp
->ptid
.to_string ().c_str ());
3503 infrun_debug_printf ("[%s] resumed",
3504 tp
->ptid
.to_string ().c_str ());
3505 gdb_assert (tp
->executing () || tp
->has_pending_waitstatus ());
3509 if (thread_is_in_step_over_chain (tp
))
3511 infrun_debug_printf ("[%s] needs step-over",
3512 tp
->ptid
.to_string ().c_str ());
3516 /* When handling a vfork GDB removes all breakpoints from the program
3517 space in which the vfork is being handled. If we are following the
3518 parent then GDB will set the thread_waiting_for_vfork_done member of
3519 the parent inferior. In this case we should take care to only resume
3520 the vfork parent thread, the kernel will hold this thread suspended
3521 until the vfork child has exited or execd, at which point the parent
3522 will be resumed and a VFORK_DONE event sent to GDB. */
3523 if (tp
->inf
->thread_waiting_for_vfork_done
!= nullptr)
3525 if (target_is_non_stop_p ())
3527 /* For non-stop targets, regardless of whether GDB is using
3528 all-stop or non-stop mode, threads are controlled
3531 When a thread is handling a vfork, breakpoints are removed
3532 from the inferior (well, program space in fact), so it is
3533 critical that we don't try to resume any thread other than the
3535 if (tp
!= tp
->inf
->thread_waiting_for_vfork_done
)
3537 infrun_debug_printf ("[%s] thread %s of this inferior is "
3538 "waiting for vfork-done",
3539 tp
->ptid
.to_string ().c_str (),
3540 tp
->inf
->thread_waiting_for_vfork_done
3541 ->ptid
.to_string ().c_str ());
3547 /* For all-stop targets, when we attempt to resume the inferior,
3548 we will only resume the vfork parent thread, this is handled
3549 in internal_resume_ptid.
3551 Additionally, we will always be called with the vfork parent
3552 thread as the current thread (TP) thanks to follow_fork, as
3553 such the following assertion should hold.
3555 Beyond this there is nothing more that needs to be done
3557 gdb_assert (tp
== tp
->inf
->thread_waiting_for_vfork_done
);
3561 /* When handling a vfork GDB removes all breakpoints from the program
3562 space in which the vfork is being handled. If we are following the
3563 child then GDB will set vfork_child member of the vfork parent
3564 inferior. Once the child has either exited or execd then GDB will
3565 detach from the parent process. Until that point GDB should not
3566 resume any thread in the parent process. */
3567 if (tp
->inf
->vfork_child
!= nullptr)
3569 infrun_debug_printf ("[%s] thread is part of a vfork parent, child is %d",
3570 tp
->ptid
.to_string ().c_str (),
3571 tp
->inf
->vfork_child
->pid
);
3575 infrun_debug_printf ("resuming %s",
3576 tp
->ptid
.to_string ().c_str ());
3578 execution_control_state
ecs (tp
);
3579 switch_to_thread (tp
);
3580 keep_going_pass_signal (&ecs
);
3581 if (!ecs
.wait_some_more
)
3582 error (_("Command aborted."));
3585 /* Basic routine for continuing the program in various fashions.
3587 ADDR is the address to resume at, or -1 for resume where stopped.
3588 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3589 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
3591 You should call clear_proceed_status before calling proceed. */
3594 proceed (CORE_ADDR addr
, enum gdb_signal siggnal
)
3596 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
3598 struct gdbarch
*gdbarch
;
3601 /* If we're stopped at a fork/vfork, switch to either the parent or child
3602 thread as defined by the "set follow-fork-mode" command, or, if both
3603 the parent and child are controlled by GDB, and schedule-multiple is
3604 on, follow the child. If none of the above apply then we just proceed
3605 resuming the current thread. */
3606 if (!follow_fork ())
3608 /* The target for some reason decided not to resume. */
3610 if (target_can_async_p ())
3611 inferior_event_handler (INF_EXEC_COMPLETE
);
3615 /* We'll update this if & when we switch to a new thread. */
3616 update_previous_thread ();
3618 thread_info
*cur_thr
= inferior_thread ();
3619 infrun_debug_printf ("cur_thr = %s", cur_thr
->ptid
.to_string ().c_str ());
3621 regcache
*regcache
= get_thread_regcache (cur_thr
);
3622 gdbarch
= regcache
->arch ();
3623 pc
= regcache_read_pc_protected (regcache
);
3625 /* Fill in with reasonable starting values. */
3626 init_thread_stepping_state (cur_thr
);
3628 gdb_assert (!thread_is_in_step_over_chain (cur_thr
));
3631 = user_visible_resume_ptid (cur_thr
->control
.stepping_command
);
3632 process_stratum_target
*resume_target
3633 = user_visible_resume_target (resume_ptid
);
3635 check_multi_target_resumption (resume_target
);
3637 if (addr
== (CORE_ADDR
) -1)
3639 const address_space
*aspace
= cur_thr
->inf
->aspace
.get ();
3641 if (cur_thr
->stop_pc_p ()
3642 && pc
== cur_thr
->stop_pc ()
3643 && breakpoint_here_p (aspace
, pc
) == ordinary_breakpoint_here
3644 && execution_direction
!= EXEC_REVERSE
)
3645 /* There is a breakpoint at the address we will resume at,
3646 step one instruction before inserting breakpoints so that
3647 we do not stop right away (and report a second hit at this
3650 Note, we don't do this in reverse, because we won't
3651 actually be executing the breakpoint insn anyway.
3652 We'll be (un-)executing the previous instruction. */
3653 cur_thr
->stepping_over_breakpoint
= 1;
3654 else if (gdbarch_single_step_through_delay_p (gdbarch
)
3655 && gdbarch_single_step_through_delay (gdbarch
,
3656 get_current_frame ()))
3657 /* We stepped onto an instruction that needs to be stepped
3658 again before re-inserting the breakpoint, do so. */
3659 cur_thr
->stepping_over_breakpoint
= 1;
3663 regcache_write_pc (regcache
, addr
);
3666 if (siggnal
!= GDB_SIGNAL_DEFAULT
)
3667 cur_thr
->set_stop_signal (siggnal
);
3669 /* If an exception is thrown from this point on, make sure to
3670 propagate GDB's knowledge of the executing state to the
3671 frontend/user running state. */
3672 scoped_finish_thread_state
finish_state (resume_target
, resume_ptid
);
3674 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3675 threads (e.g., we might need to set threads stepping over
3676 breakpoints first), from the user/frontend's point of view, all
3677 threads in RESUME_PTID are now running. Unless we're calling an
3678 inferior function, as in that case we pretend the inferior
3679 doesn't run at all. */
3680 if (!cur_thr
->control
.in_infcall
)
3681 set_running (resume_target
, resume_ptid
, true);
3683 infrun_debug_printf ("addr=%s, signal=%s, resume_ptid=%s",
3684 paddress (gdbarch
, addr
),
3685 gdb_signal_to_symbol_string (siggnal
),
3686 resume_ptid
.to_string ().c_str ());
3688 annotate_starting ();
3690 /* Make sure that output from GDB appears before output from the
3692 gdb_flush (gdb_stdout
);
3694 /* Since we've marked the inferior running, give it the terminal. A
3695 QUIT/Ctrl-C from here on is forwarded to the target (which can
3696 still detect attempts to unblock a stuck connection with repeated
3697 Ctrl-C from within target_pass_ctrlc). */
3698 target_terminal::inferior ();
3700 /* In a multi-threaded task we may select another thread and
3701 then continue or step.
3703 But if a thread that we're resuming had stopped at a breakpoint,
3704 it will immediately cause another breakpoint stop without any
3705 execution (i.e. it will report a breakpoint hit incorrectly). So
3706 we must step over it first.
3708 Look for threads other than the current (TP) that reported a
3709 breakpoint hit and haven't been resumed yet since. */
3711 /* If scheduler locking applies, we can avoid iterating over all
3713 if (!non_stop
&& !schedlock_applies (cur_thr
))
3715 for (thread_info
*tp
: all_non_exited_threads (resume_target
,
3718 switch_to_thread_no_regs (tp
);
3720 /* Ignore the current thread here. It's handled
3725 if (!thread_still_needs_step_over (tp
))
3728 gdb_assert (!thread_is_in_step_over_chain (tp
));
3730 infrun_debug_printf ("need to step-over [%s] first",
3731 tp
->ptid
.to_string ().c_str ());
3733 global_thread_step_over_chain_enqueue (tp
);
3736 switch_to_thread (cur_thr
);
3739 /* Enqueue the current thread last, so that we move all other
3740 threads over their breakpoints first. */
3741 if (cur_thr
->stepping_over_breakpoint
)
3742 global_thread_step_over_chain_enqueue (cur_thr
);
3744 /* If the thread isn't started, we'll still need to set its prev_pc,
3745 so that switch_back_to_stepped_thread knows the thread hasn't
3746 advanced. Must do this before resuming any thread, as in
3747 all-stop/remote, once we resume we can't send any other packet
3748 until the target stops again. */
3749 cur_thr
->prev_pc
= regcache_read_pc_protected (regcache
);
3752 scoped_disable_commit_resumed
disable_commit_resumed ("proceeding");
3753 bool step_over_started
= start_step_over ();
3755 if (step_over_info_valid_p ())
3757 /* Either this thread started a new in-line step over, or some
3758 other thread was already doing one. In either case, don't
3759 resume anything else until the step-over is finished. */
3761 else if (step_over_started
&& !target_is_non_stop_p ())
3763 /* A new displaced stepping sequence was started. In all-stop,
3764 we can't talk to the target anymore until it next stops. */
3766 else if (!non_stop
&& target_is_non_stop_p ())
3768 INFRUN_SCOPED_DEBUG_START_END
3769 ("resuming threads, all-stop-on-top-of-non-stop");
3771 /* In all-stop, but the target is always in non-stop mode.
3772 Start all other threads that are implicitly resumed too. */
3773 for (thread_info
*tp
: all_non_exited_threads (resume_target
,
3776 switch_to_thread_no_regs (tp
);
3777 proceed_resume_thread_checked (tp
);
3781 proceed_resume_thread_checked (cur_thr
);
3783 disable_commit_resumed
.reset_and_commit ();
3786 finish_state
.release ();
3788 /* If we've switched threads above, switch back to the previously
3789 current thread. We don't want the user to see a different
3791 switch_to_thread (cur_thr
);
3793 /* Tell the event loop to wait for it to stop. If the target
3794 supports asynchronous execution, it'll do this from within
3796 if (!target_can_async_p ())
3797 mark_async_event_handler (infrun_async_inferior_event_token
);
3801 /* Start remote-debugging of a machine over a serial link. */
3804 start_remote (int from_tty
)
3806 inferior
*inf
= current_inferior ();
3807 inf
->control
.stop_soon
= STOP_QUIETLY_REMOTE
;
3809 /* Always go on waiting for the target, regardless of the mode. */
3810 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3811 indicate to wait_for_inferior that a target should timeout if
3812 nothing is returned (instead of just blocking). Because of this,
3813 targets expecting an immediate response need to, internally, set
3814 things up so that the target_wait() is forced to eventually
3816 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3817 differentiate to its caller what the state of the target is after
3818 the initial open has been performed. Here we're assuming that
3819 the target has stopped. It should be possible to eventually have
3820 target_open() return to the caller an indication that the target
3821 is currently running and GDB state should be set to the same as
3822 for an async run. */
3823 wait_for_inferior (inf
);
3825 /* Now that the inferior has stopped, do any bookkeeping like
3826 loading shared libraries. We want to do this before normal_stop,
3827 so that the displayed frame is up to date. */
3828 post_create_inferior (from_tty
, true);
3833 /* Initialize static vars when a new inferior begins. */
3836 init_wait_for_inferior (void)
3838 /* These are meaningless until the first time through wait_for_inferior. */
3840 breakpoint_init_inferior (current_inferior (), inf_starting
);
3842 clear_proceed_status (0);
3844 nullify_last_target_wait_ptid ();
3846 update_previous_thread ();
3851 static void handle_inferior_event (struct execution_control_state
*ecs
);
3853 static void handle_step_into_function (struct gdbarch
*gdbarch
,
3854 struct execution_control_state
*ecs
);
3855 static void handle_step_into_function_backward (struct gdbarch
*gdbarch
,
3856 struct execution_control_state
*ecs
);
3857 static void handle_signal_stop (struct execution_control_state
*ecs
);
3858 static void check_exception_resume (struct execution_control_state
*,
3859 const frame_info_ptr
&);
3861 static void end_stepping_range (struct execution_control_state
*ecs
);
3862 static void stop_waiting (struct execution_control_state
*ecs
);
3863 static void keep_going (struct execution_control_state
*ecs
);
3864 static void process_event_stop_test (struct execution_control_state
*ecs
);
3865 static bool switch_back_to_stepped_thread (struct execution_control_state
*ecs
);
3867 /* This function is attached as a "thread_stop_requested" observer.
3868 Cleanup local state that assumed the PTID was to be resumed, and
3869 report the stop to the frontend. */
3872 infrun_thread_stop_requested (ptid_t ptid
)
3874 process_stratum_target
*curr_target
= current_inferior ()->process_target ();
3876 /* PTID was requested to stop. If the thread was already stopped,
3877 but the user/frontend doesn't know about that yet (e.g., the
3878 thread had been temporarily paused for some step-over), set up
3879 for reporting the stop now. */
3880 for (thread_info
*tp
: all_threads (curr_target
, ptid
))
3882 if (tp
->state
!= THREAD_RUNNING
)
3884 if (tp
->executing ())
3887 /* Remove matching threads from the step-over queue, so
3888 start_step_over doesn't try to resume them
3890 if (thread_is_in_step_over_chain (tp
))
3891 global_thread_step_over_chain_remove (tp
);
3893 /* If the thread is stopped, but the user/frontend doesn't
3894 know about that yet, queue a pending event, as if the
3895 thread had just stopped now. Unless the thread already had
3897 if (!tp
->has_pending_waitstatus ())
3899 target_waitstatus ws
;
3900 ws
.set_stopped (GDB_SIGNAL_0
);
3901 tp
->set_pending_waitstatus (ws
);
3904 /* Clear the inline-frame state, since we're re-processing the
3906 clear_inline_frame_state (tp
);
3908 /* If this thread was paused because some other thread was
3909 doing an inline-step over, let that finish first. Once
3910 that happens, we'll restart all threads and consume pending
3911 stop events then. */
3912 if (step_over_info_valid_p ())
3915 /* Otherwise we can process the (new) pending event now. Set
3916 it so this pending event is considered by
3918 tp
->set_resumed (true);
3922 /* Delete the step resume, single-step and longjmp/exception resume
3923 breakpoints of TP. */
3926 delete_thread_infrun_breakpoints (struct thread_info
*tp
)
3928 delete_step_resume_breakpoint (tp
);
3929 delete_exception_resume_breakpoint (tp
);
3930 delete_single_step_breakpoints (tp
);
3933 /* If the target still has execution, call FUNC for each thread that
3934 just stopped. In all-stop, that's all the non-exited threads; in
3935 non-stop, that's the current thread, only. */
3937 typedef void (*for_each_just_stopped_thread_callback_func
)
3938 (struct thread_info
*tp
);
3941 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func
)
3943 if (!target_has_execution () || inferior_ptid
== null_ptid
)
3946 if (target_is_non_stop_p ())
3948 /* If in non-stop mode, only the current thread stopped. */
3949 func (inferior_thread ());
3953 /* In all-stop mode, all threads have stopped. */
3954 for (thread_info
*tp
: all_non_exited_threads ())
3959 /* Delete the step resume and longjmp/exception resume breakpoints of
3960 the threads that just stopped. */
3963 delete_just_stopped_threads_infrun_breakpoints (void)
3965 for_each_just_stopped_thread (delete_thread_infrun_breakpoints
);
3968 /* Delete the single-step breakpoints of the threads that just
3972 delete_just_stopped_threads_single_step_breakpoints (void)
3974 for_each_just_stopped_thread (delete_single_step_breakpoints
);
3980 print_target_wait_results (ptid_t waiton_ptid
, ptid_t result_ptid
,
3981 const struct target_waitstatus
&ws
,
3982 process_stratum_target
*proc_target
)
3984 infrun_debug_printf ("target_wait (%s [%s], status) =",
3985 waiton_ptid
.to_string ().c_str (),
3986 target_pid_to_str (waiton_ptid
).c_str ());
3987 infrun_debug_printf (" %s [%s],",
3988 result_ptid
.to_string ().c_str (),
3989 target_pid_to_str (result_ptid
).c_str ());
3990 infrun_debug_printf (" %s", ws
.to_string ().c_str ());
3992 if (proc_target
!= nullptr)
3993 infrun_debug_printf (" from target %d (%s)",
3994 proc_target
->connection_number
,
3995 proc_target
->shortname ());
3998 /* Wrapper for print_target_wait_results above for convenience. */
4001 print_target_wait_results (ptid_t waiton_ptid
,
4002 const execution_control_state
&ecs
)
4004 print_target_wait_results (waiton_ptid
, ecs
.ptid
, ecs
.ws
, ecs
.target
);
4007 /* Select a thread at random, out of those which are resumed and have
4010 static struct thread_info
*
4011 random_pending_event_thread (inferior
*inf
, ptid_t waiton_ptid
)
4013 process_stratum_target
*proc_target
= inf
->process_target ();
4015 = proc_target
->random_resumed_with_pending_wait_status (inf
, waiton_ptid
);
4017 if (thread
== nullptr)
4019 infrun_debug_printf ("None found.");
4023 infrun_debug_printf ("Found %s.", thread
->ptid
.to_string ().c_str ());
4024 gdb_assert (thread
->resumed ());
4025 gdb_assert (thread
->has_pending_waitstatus ());
4030 /* Wrapper for target_wait that first checks whether threads have
4031 pending statuses to report before actually asking the target for
4032 more events. INF is the inferior we're using to call target_wait
4036 do_target_wait_1 (inferior
*inf
, ptid_t ptid
,
4037 target_waitstatus
*status
, target_wait_flags options
)
4039 struct thread_info
*tp
;
4041 /* We know that we are looking for an event in the target of inferior
4042 INF, but we don't know which thread the event might come from. As
4043 such we want to make sure that INFERIOR_PTID is reset so that none of
4044 the wait code relies on it - doing so is always a mistake. */
4045 switch_to_inferior_no_thread (inf
);
4047 /* First check if there is a resumed thread with a wait status
4049 if (ptid
== minus_one_ptid
|| ptid
.is_pid ())
4051 tp
= random_pending_event_thread (inf
, ptid
);
4055 infrun_debug_printf ("Waiting for specific thread %s.",
4056 ptid
.to_string ().c_str ());
4058 /* We have a specific thread to check. */
4059 tp
= inf
->find_thread (ptid
);
4060 gdb_assert (tp
!= nullptr);
4061 if (!tp
->has_pending_waitstatus ())
4066 && (tp
->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4067 || tp
->stop_reason () == TARGET_STOPPED_BY_HW_BREAKPOINT
))
4069 struct regcache
*regcache
= get_thread_regcache (tp
);
4070 struct gdbarch
*gdbarch
= regcache
->arch ();
4074 pc
= regcache_read_pc (regcache
);
4076 if (pc
!= tp
->stop_pc ())
4078 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
4079 tp
->ptid
.to_string ().c_str (),
4080 paddress (gdbarch
, tp
->stop_pc ()),
4081 paddress (gdbarch
, pc
));
4084 else if (!breakpoint_inserted_here_p (tp
->inf
->aspace
.get (), pc
))
4086 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
4087 tp
->ptid
.to_string ().c_str (),
4088 paddress (gdbarch
, pc
));
4095 infrun_debug_printf ("pending event of %s cancelled.",
4096 tp
->ptid
.to_string ().c_str ());
4098 tp
->clear_pending_waitstatus ();
4099 target_waitstatus ws
;
4101 tp
->set_pending_waitstatus (ws
);
4102 tp
->set_stop_reason (TARGET_STOPPED_BY_NO_REASON
);
4108 infrun_debug_printf ("Using pending wait status %s for %s.",
4109 tp
->pending_waitstatus ().to_string ().c_str (),
4110 tp
->ptid
.to_string ().c_str ());
4112 /* Now that we've selected our final event LWP, un-adjust its PC
4113 if it was a software breakpoint (and the target doesn't
4114 always adjust the PC itself). */
4115 if (tp
->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4116 && !target_supports_stopped_by_sw_breakpoint ())
4118 struct regcache
*regcache
;
4119 struct gdbarch
*gdbarch
;
4122 regcache
= get_thread_regcache (tp
);
4123 gdbarch
= regcache
->arch ();
4125 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
4130 pc
= regcache_read_pc (regcache
);
4131 regcache_write_pc (regcache
, pc
+ decr_pc
);
4135 tp
->set_stop_reason (TARGET_STOPPED_BY_NO_REASON
);
4136 *status
= tp
->pending_waitstatus ();
4137 tp
->clear_pending_waitstatus ();
4139 /* Wake up the event loop again, until all pending events are
4141 if (target_is_async_p ())
4142 mark_async_event_handler (infrun_async_inferior_event_token
);
4146 /* But if we don't find one, we'll have to wait. */
4148 /* We can't ask a non-async target to do a non-blocking wait, so this will be
4150 if (!target_can_async_p ())
4151 options
&= ~TARGET_WNOHANG
;
4153 return target_wait (ptid
, status
, options
);
4156 /* Wrapper for target_wait that first checks whether threads have
4157 pending statuses to report before actually asking the target for
4158 more events. Polls for events from all inferiors/targets. */
4161 do_target_wait (ptid_t wait_ptid
, execution_control_state
*ecs
,
4162 target_wait_flags options
)
4164 int num_inferiors
= 0;
4165 int random_selector
;
4167 /* For fairness, we pick the first inferior/target to poll at random
4168 out of all inferiors that may report events, and then continue
4169 polling the rest of the inferior list starting from that one in a
4170 circular fashion until the whole list is polled once. */
4172 ptid_t wait_ptid_pid
{wait_ptid
.pid ()};
4173 auto inferior_matches
= [&wait_ptid_pid
] (inferior
*inf
)
4175 return (inf
->process_target () != nullptr
4176 && ptid_t (inf
->pid
).matches (wait_ptid_pid
));
4179 /* First see how many matching inferiors we have. */
4180 for (inferior
*inf
: all_inferiors ())
4181 if (inferior_matches (inf
))
4184 if (num_inferiors
== 0)
4186 ecs
->ws
.set_ignore ();
4190 /* Now randomly pick an inferior out of those that matched. */
4191 random_selector
= (int)
4192 ((num_inferiors
* (double) rand ()) / (RAND_MAX
+ 1.0));
4194 if (num_inferiors
> 1)
4195 infrun_debug_printf ("Found %d inferiors, starting at #%d",
4196 num_inferiors
, random_selector
);
4198 /* Select the Nth inferior that matched. */
4200 inferior
*selected
= nullptr;
4202 for (inferior
*inf
: all_inferiors ())
4203 if (inferior_matches (inf
))
4204 if (random_selector
-- == 0)
4210 /* Now poll for events out of each of the matching inferior's
4211 targets, starting from the selected one. */
4213 auto do_wait
= [&] (inferior
*inf
)
4215 ecs
->ptid
= do_target_wait_1 (inf
, wait_ptid
, &ecs
->ws
, options
);
4216 ecs
->target
= inf
->process_target ();
4217 return (ecs
->ws
.kind () != TARGET_WAITKIND_IGNORE
);
4220 /* Needed in 'all-stop + target-non-stop' mode, because we end up
4221 here spuriously after the target is all stopped and we've already
4222 reported the stop to the user, polling for events. */
4223 scoped_restore_current_thread restore_thread
;
4225 intrusive_list_iterator
<inferior
> start
4226 = inferior_list
.iterator_to (*selected
);
4228 for (intrusive_list_iterator
<inferior
> it
= start
;
4229 it
!= inferior_list
.end ();
4232 inferior
*inf
= &*it
;
4234 if (inferior_matches (inf
) && do_wait (inf
))
4238 for (intrusive_list_iterator
<inferior
> it
= inferior_list
.begin ();
4242 inferior
*inf
= &*it
;
4244 if (inferior_matches (inf
) && do_wait (inf
))
4248 ecs
->ws
.set_ignore ();
4252 /* An event reported by wait_one. */
4254 struct wait_one_event
4256 /* The target the event came out of. */
4257 process_stratum_target
*target
;
4259 /* The PTID the event was for. */
4262 /* The waitstatus. */
4263 target_waitstatus ws
;
4266 static bool handle_one (const wait_one_event
&event
);
4267 static int finish_step_over (struct execution_control_state
*ecs
);
4269 /* Prepare and stabilize the inferior for detaching it. E.g.,
4270 detaching while a thread is displaced stepping is a recipe for
4271 crashing it, as nothing would readjust the PC out of the scratch
4275 prepare_for_detach (void)
4277 struct inferior
*inf
= current_inferior ();
4278 ptid_t pid_ptid
= ptid_t (inf
->pid
);
4279 scoped_restore_current_thread restore_thread
;
4281 scoped_restore restore_detaching
= make_scoped_restore (&inf
->detaching
, true);
4283 /* Remove all threads of INF from the global step-over chain. We
4284 want to stop any ongoing step-over, not start any new one. */
4285 thread_step_over_list_safe_range range
4286 = make_thread_step_over_list_safe_range (global_thread_step_over_list
);
4288 for (thread_info
*tp
: range
)
4291 infrun_debug_printf ("removing thread %s from global step over chain",
4292 tp
->ptid
.to_string ().c_str ());
4293 global_thread_step_over_chain_remove (tp
);
4296 /* If we were already in the middle of an inline step-over, and the
4297 thread stepping belongs to the inferior we're detaching, we need
4298 to restart the threads of other inferiors. */
4299 if (step_over_info
.thread
!= -1)
4301 infrun_debug_printf ("inline step-over in-process while detaching");
4303 thread_info
*thr
= find_thread_global_id (step_over_info
.thread
);
4304 if (thr
->inf
== inf
)
4306 /* Since we removed threads of INF from the step-over chain,
4307 we know this won't start a step-over for INF. */
4308 clear_step_over_info ();
4310 if (target_is_non_stop_p ())
4312 /* Start a new step-over in another thread if there's
4313 one that needs it. */
4316 /* Restart all other threads (except the
4317 previously-stepping thread, since that one is still
4319 if (!step_over_info_valid_p ())
4320 restart_threads (thr
);
4325 if (displaced_step_in_progress (inf
))
4327 infrun_debug_printf ("displaced-stepping in-process while detaching");
4329 /* Stop threads currently displaced stepping, aborting it. */
4331 for (thread_info
*thr
: inf
->non_exited_threads ())
4333 if (thr
->displaced_step_state
.in_progress ())
4335 if (thr
->executing ())
4337 if (!thr
->stop_requested
)
4339 target_stop (thr
->ptid
);
4340 thr
->stop_requested
= true;
4344 thr
->set_resumed (false);
4348 while (displaced_step_in_progress (inf
))
4350 wait_one_event event
;
4352 event
.target
= inf
->process_target ();
4353 event
.ptid
= do_target_wait_1 (inf
, pid_ptid
, &event
.ws
, 0);
4356 print_target_wait_results (pid_ptid
, event
.ptid
, event
.ws
,
4362 /* It's OK to leave some of the threads of INF stopped, since
4363 they'll be detached shortly. */
4367 /* If all-stop, but there exists a non-stop target, stop all threads
4368 now that we're presenting the stop to the user. */
4371 stop_all_threads_if_all_stop_mode ()
4373 if (!non_stop
&& exists_non_stop_target ())
4374 stop_all_threads ("presenting stop to user in all-stop");
4377 /* Wait for control to return from inferior to debugger.
4379 If inferior gets a signal, we may decide to start it up again
4380 instead of returning. That is why there is a loop in this function.
4381 When this function actually returns it means the inferior
4382 should be left stopped and GDB should read more commands. */
4385 wait_for_inferior (inferior
*inf
)
4387 infrun_debug_printf ("wait_for_inferior ()");
4389 SCOPE_EXIT
{ delete_just_stopped_threads_infrun_breakpoints (); };
4391 /* If an error happens while handling the event, propagate GDB's
4392 knowledge of the executing state to the frontend/user running
4394 scoped_finish_thread_state finish_state
4395 (inf
->process_target (), minus_one_ptid
);
4399 execution_control_state ecs
;
4401 overlay_cache_invalid
= 1;
4403 /* Flush target cache before starting to handle each event.
4404 Target was running and cache could be stale. This is just a
4405 heuristic. Running threads may modify target memory, but we
4406 don't get any event. */
4407 target_dcache_invalidate (current_program_space
->aspace
);
4409 ecs
.ptid
= do_target_wait_1 (inf
, minus_one_ptid
, &ecs
.ws
, 0);
4410 ecs
.target
= inf
->process_target ();
4413 print_target_wait_results (minus_one_ptid
, ecs
);
4415 /* Now figure out what to do with the result of the result. */
4416 handle_inferior_event (&ecs
);
4418 if (!ecs
.wait_some_more
)
4422 stop_all_threads_if_all_stop_mode ();
4424 /* No error, don't finish the state yet. */
4425 finish_state
.release ();
4428 /* Cleanup that reinstalls the readline callback handler, if the
4429 target is running in the background. If while handling the target
4430 event something triggered a secondary prompt, like e.g., a
4431 pagination prompt, we'll have removed the callback handler (see
4432 gdb_readline_wrapper_line). Need to do this as we go back to the
4433 event loop, ready to process further input. Note this has no
4434 effect if the handler hasn't actually been removed, because calling
4435 rl_callback_handler_install resets the line buffer, thus losing
4439 reinstall_readline_callback_handler_cleanup ()
4441 struct ui
*ui
= current_ui
;
4445 /* We're not going back to the top level event loop yet. Don't
4446 install the readline callback, as it'd prep the terminal,
4447 readline-style (raw, noecho) (e.g., --batch). We'll install
4448 it the next time the prompt is displayed, when we're ready
4453 if (ui
->command_editing
&& ui
->prompt_state
!= PROMPT_BLOCKED
)
4454 gdb_rl_callback_handler_reinstall ();
4457 /* Clean up the FSMs of threads that are now stopped. In non-stop,
4458 that's just the event thread. In all-stop, that's all threads. In
4459 all-stop, threads that had a pending exit no longer have a reason
4460 to be around, as their FSMs/commands are canceled, so we delete
4461 them. This avoids "info threads" listing such threads as if they
4462 were alive (and failing to read their registers), the user being
4463 able to select and resume them (and that failing), etc. */
4466 clean_up_just_stopped_threads_fsms (struct execution_control_state
*ecs
)
4468 /* The first clean_up call below assumes the event thread is the current
4470 if (ecs
->event_thread
!= nullptr)
4471 gdb_assert (ecs
->event_thread
== inferior_thread ());
4473 if (ecs
->event_thread
!= nullptr
4474 && ecs
->event_thread
->thread_fsm () != nullptr)
4475 ecs
->event_thread
->thread_fsm ()->clean_up (ecs
->event_thread
);
4479 scoped_restore_current_thread restore_thread
;
4481 for (thread_info
*thr
: all_threads_safe ())
4483 if (thr
->state
== THREAD_EXITED
)
4486 if (thr
== ecs
->event_thread
)
4489 if (thr
->thread_fsm () != nullptr)
4491 switch_to_thread (thr
);
4492 thr
->thread_fsm ()->clean_up (thr
);
4495 /* As we are cancelling the command/FSM of this thread,
4496 whatever was the reason we needed to report a thread
4497 exited event to the user, that reason is gone. Delete
4498 the thread, so that the user doesn't see it in the thread
4499 list, the next proceed doesn't try to resume it, etc. */
4500 if (thr
->has_pending_waitstatus ()
4501 && (thr
->pending_waitstatus ().kind ()
4502 == TARGET_WAITKIND_THREAD_EXITED
))
4503 delete_thread (thr
);
4508 /* Helper for all_uis_check_sync_execution_done that works on the
4512 check_curr_ui_sync_execution_done (void)
4514 struct ui
*ui
= current_ui
;
4516 if (ui
->prompt_state
== PROMPT_NEEDED
4518 && !gdb_in_secondary_prompt_p (ui
))
4520 target_terminal::ours ();
4521 top_level_interpreter ()->on_sync_execution_done ();
4522 ui
->register_file_handler ();
4529 all_uis_check_sync_execution_done (void)
4531 SWITCH_THRU_ALL_UIS ()
4533 check_curr_ui_sync_execution_done ();
4540 all_uis_on_sync_execution_starting (void)
4542 SWITCH_THRU_ALL_UIS ()
4544 if (current_ui
->prompt_state
== PROMPT_NEEDED
)
4545 async_disable_stdin ();
4549 /* A quit_handler callback installed while we're handling inferior
4553 infrun_quit_handler ()
4555 if (target_terminal::is_ours ())
4559 default_quit_handler would throw a quit in this case, but if
4560 we're handling an event while we have the terminal, it means
4561 the target is running a background execution command, and
4562 thus when users press Ctrl-C, they're wanting to interrupt
4563 whatever command they were executing in the command line.
4567 (gdb) foo bar whatever<ctrl-c>
4569 That Ctrl-C should clear the input line, not interrupt event
4570 handling if it happens that the user types Ctrl-C at just the
4573 It's as-if background event handling was handled by a
4574 separate background thread.
4576 To be clear, the Ctrl-C is not lost -- it will be processed
4577 by the next QUIT call once we're out of fetch_inferior_event
4582 if (check_quit_flag ())
4583 target_pass_ctrlc ();
4587 /* Asynchronous version of wait_for_inferior. It is called by the
4588 event loop whenever a change of state is detected on the file
4589 descriptor corresponding to the target. It can be called more than
4590 once to complete a single execution command. In such cases we need
4591 to keep the state in a global variable ECSS. If it is the last time
4592 that this function is called for a single execution command, then
4593 report to the user that the inferior has stopped, and do the
4594 necessary cleanups. */
4597 fetch_inferior_event ()
4599 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
4601 execution_control_state ecs
;
4604 /* Events are always processed with the main UI as current UI. This
4605 way, warnings, debug output, etc. are always consistently sent to
4606 the main console. */
4607 scoped_restore save_ui
= make_scoped_restore (¤t_ui
, main_ui
);
4609 /* Temporarily disable pagination. Otherwise, the user would be
4610 given an option to press 'q' to quit, which would cause an early
4611 exit and could leave GDB in a half-baked state. */
4612 scoped_restore save_pagination
4613 = make_scoped_restore (&pagination_enabled
, false);
4615 /* Install a quit handler that does nothing if we have the terminal
4616 (meaning the target is running a background execution command),
4617 so that Ctrl-C never interrupts GDB before the event is fully
4619 scoped_restore restore_quit_handler
4620 = make_scoped_restore (&quit_handler
, infrun_quit_handler
);
4622 /* Make sure a SIGINT does not interrupt an extension language while
4623 we're handling an event. That could interrupt a Python unwinder
4624 or a Python observer or some such. A Ctrl-C should either be
4625 forwarded to the inferior if the inferior has the terminal, or,
4626 if GDB has the terminal, should interrupt the command the user is
4627 typing in the CLI. */
4628 scoped_disable_cooperative_sigint_handling restore_coop_sigint
;
4630 /* End up with readline processing input, if necessary. */
4632 SCOPE_EXIT
{ reinstall_readline_callback_handler_cleanup (); };
4634 /* We're handling a live event, so make sure we're doing live
4635 debugging. If we're looking at traceframes while the target is
4636 running, we're going to need to get back to that mode after
4637 handling the event. */
4638 std::optional
<scoped_restore_current_traceframe
> maybe_restore_traceframe
;
4641 maybe_restore_traceframe
.emplace ();
4642 set_current_traceframe (-1);
4645 /* The user/frontend should not notice a thread switch due to
4646 internal events. Make sure we revert to the user selected
4647 thread and frame after handling the event and running any
4648 breakpoint commands. */
4649 scoped_restore_current_thread restore_thread
;
4651 overlay_cache_invalid
= 1;
4652 /* Flush target cache before starting to handle each event. Target
4653 was running and cache could be stale. This is just a heuristic.
4654 Running threads may modify target memory, but we don't get any
4656 target_dcache_invalidate (current_program_space
->aspace
);
4658 scoped_restore save_exec_dir
4659 = make_scoped_restore (&execution_direction
,
4660 target_execution_direction ());
4662 /* Allow targets to pause their resumed threads while we handle
4664 scoped_disable_commit_resumed
disable_commit_resumed ("handling event");
4666 /* Is the current thread performing an inferior function call as part
4667 of a breakpoint condition evaluation? */
4668 bool in_cond_eval
= (inferior_ptid
!= null_ptid
4669 && inferior_thread ()->control
.in_cond_eval
);
4671 /* If the thread is in the middle of the condition evaluation, wait for
4672 an event from the current thread. Otherwise, wait for an event from
4674 ptid_t waiton_ptid
= in_cond_eval
? inferior_ptid
: minus_one_ptid
;
4676 if (!do_target_wait (waiton_ptid
, &ecs
, TARGET_WNOHANG
))
4678 infrun_debug_printf ("do_target_wait returned no event");
4679 disable_commit_resumed
.reset_and_commit ();
4683 gdb_assert (ecs
.ws
.kind () != TARGET_WAITKIND_IGNORE
);
4685 /* Switch to the inferior that generated the event, so we can do
4686 target calls. If the event was not associated to a ptid, */
4687 if (ecs
.ptid
!= null_ptid
4688 && ecs
.ptid
!= minus_one_ptid
)
4689 switch_to_inferior_no_thread (find_inferior_ptid (ecs
.target
, ecs
.ptid
));
4691 switch_to_target_no_thread (ecs
.target
);
4694 print_target_wait_results (minus_one_ptid
, ecs
);
4696 /* If an error happens while handling the event, propagate GDB's
4697 knowledge of the executing state to the frontend/user running
4699 ptid_t finish_ptid
= !target_is_non_stop_p () ? minus_one_ptid
: ecs
.ptid
;
4700 scoped_finish_thread_state
finish_state (ecs
.target
, finish_ptid
);
4702 /* Get executed before scoped_restore_current_thread above to apply
4703 still for the thread which has thrown the exception. */
4704 auto defer_bpstat_clear
4705 = make_scope_exit (bpstat_clear_actions
);
4706 auto defer_delete_threads
4707 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints
);
4709 int stop_id
= get_stop_id ();
4711 /* Now figure out what to do with the result of the result. */
4712 handle_inferior_event (&ecs
);
4714 if (!ecs
.wait_some_more
)
4716 struct inferior
*inf
= find_inferior_ptid (ecs
.target
, ecs
.ptid
);
4717 bool should_stop
= true;
4718 struct thread_info
*thr
= ecs
.event_thread
;
4720 delete_just_stopped_threads_infrun_breakpoints ();
4722 if (thr
!= nullptr && thr
->thread_fsm () != nullptr)
4723 should_stop
= thr
->thread_fsm ()->should_stop (thr
);
4731 bool should_notify_stop
= true;
4732 bool proceeded
= false;
4734 /* If the thread that stopped just completed an inferior
4735 function call as part of a condition evaluation, then we
4736 don't want to stop all the other threads. */
4737 if (ecs
.event_thread
== nullptr
4738 || !ecs
.event_thread
->control
.in_cond_eval
)
4739 stop_all_threads_if_all_stop_mode ();
4741 clean_up_just_stopped_threads_fsms (&ecs
);
4743 if (stop_id
!= get_stop_id ())
4745 /* If the stop-id has changed then a stop has already been
4746 presented to the user in handle_inferior_event, this is
4747 likely a failed inferior call. As the stop has already
4748 been announced then we should not notify again.
4750 Also, if the prompt state is not PROMPT_NEEDED then GDB
4751 will not be ready for user input after this function. */
4752 should_notify_stop
= false;
4753 gdb_assert (current_ui
->prompt_state
== PROMPT_NEEDED
);
4755 else if (thr
!= nullptr && thr
->thread_fsm () != nullptr)
4757 = thr
->thread_fsm ()->should_notify_stop ();
4759 if (should_notify_stop
)
4761 /* We may not find an inferior if this was a process exit. */
4762 if (inf
== nullptr || inf
->control
.stop_soon
== NO_STOP_QUIETLY
)
4763 proceeded
= normal_stop ();
4766 if (!proceeded
&& !in_cond_eval
)
4768 inferior_event_handler (INF_EXEC_COMPLETE
);
4772 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4773 previously selected thread is gone. We have two
4774 choices - switch to no thread selected, or restore the
4775 previously selected thread (now exited). We chose the
4776 later, just because that's what GDB used to do. After
4777 this, "info threads" says "The current thread <Thread
4778 ID 2> has terminated." instead of "No thread
4782 && ecs
.ws
.kind () != TARGET_WAITKIND_NO_RESUMED
)
4783 restore_thread
.dont_restore ();
4787 defer_delete_threads
.release ();
4788 defer_bpstat_clear
.release ();
4790 /* No error, don't finish the thread states yet. */
4791 finish_state
.release ();
4793 disable_commit_resumed
.reset_and_commit ();
4795 /* This scope is used to ensure that readline callbacks are
4796 reinstalled here. */
4799 /* Handling this event might have caused some inferiors to become prunable.
4800 For example, the exit of an inferior that was automatically added. Try
4801 to get rid of them. Keeping those around slows down things linearly.
4803 Note that this never removes the current inferior. Therefore, call this
4804 after RESTORE_THREAD went out of scope, in case the event inferior (which was
4805 temporarily made the current inferior) is meant to be deleted.
4807 Call this before all_uis_check_sync_execution_done, so that notifications about
4808 removed inferiors appear before the prompt. */
4811 /* If a UI was in sync execution mode, and now isn't, restore its
4812 prompt (a synchronous execution command has finished, and we're
4813 ready for input). */
4814 all_uis_check_sync_execution_done ();
4817 && exec_done_display_p
4818 && (inferior_ptid
== null_ptid
4819 || inferior_thread ()->state
!= THREAD_RUNNING
))
4820 gdb_printf (_("completed.\n"));
4826 set_step_info (thread_info
*tp
, const frame_info_ptr
&frame
,
4827 struct symtab_and_line sal
)
4829 /* This can be removed once this function no longer implicitly relies on the
4830 inferior_ptid value. */
4831 gdb_assert (inferior_ptid
== tp
->ptid
);
4833 tp
->control
.step_frame_id
= get_frame_id (frame
);
4834 tp
->control
.step_stack_frame_id
= get_stack_frame_id (frame
);
4836 tp
->current_symtab
= sal
.symtab
;
4837 tp
->current_line
= sal
.line
;
4840 ("symtab = %s, line = %d, step_frame_id = %s, step_stack_frame_id = %s",
4841 tp
->current_symtab
!= nullptr ? tp
->current_symtab
->filename
: "<null>",
4843 tp
->control
.step_frame_id
.to_string ().c_str (),
4844 tp
->control
.step_stack_frame_id
.to_string ().c_str ());
4847 /* Clear context switchable stepping state. */
4850 init_thread_stepping_state (struct thread_info
*tss
)
4852 tss
->stepped_breakpoint
= 0;
4853 tss
->stepping_over_breakpoint
= 0;
4854 tss
->stepping_over_watchpoint
= 0;
4855 tss
->step_after_step_resume_breakpoint
= 0;
4861 set_last_target_status (process_stratum_target
*target
, ptid_t ptid
,
4862 const target_waitstatus
&status
)
4864 target_last_proc_target
= target
;
4865 target_last_wait_ptid
= ptid
;
4866 target_last_waitstatus
= status
;
4872 get_last_target_status (process_stratum_target
**target
, ptid_t
*ptid
,
4873 target_waitstatus
*status
)
4875 if (target
!= nullptr)
4876 *target
= target_last_proc_target
;
4877 if (ptid
!= nullptr)
4878 *ptid
= target_last_wait_ptid
;
4879 if (status
!= nullptr)
4880 *status
= target_last_waitstatus
;
4886 nullify_last_target_wait_ptid (void)
4888 target_last_proc_target
= nullptr;
4889 target_last_wait_ptid
= minus_one_ptid
;
4890 target_last_waitstatus
= {};
4893 /* Switch thread contexts. */
4896 context_switch (execution_control_state
*ecs
)
4898 if (ecs
->ptid
!= inferior_ptid
4899 && (inferior_ptid
== null_ptid
4900 || ecs
->event_thread
!= inferior_thread ()))
4902 infrun_debug_printf ("Switching context from %s to %s",
4903 inferior_ptid
.to_string ().c_str (),
4904 ecs
->ptid
.to_string ().c_str ());
4907 switch_to_thread (ecs
->event_thread
);
4910 /* If the target can't tell whether we've hit breakpoints
4911 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4912 check whether that could have been caused by a breakpoint. If so,
4913 adjust the PC, per gdbarch_decr_pc_after_break. */
4916 adjust_pc_after_break (struct thread_info
*thread
,
4917 const target_waitstatus
&ws
)
4919 struct regcache
*regcache
;
4920 struct gdbarch
*gdbarch
;
4921 CORE_ADDR breakpoint_pc
, decr_pc
;
4923 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4924 we aren't, just return.
4926 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4927 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4928 implemented by software breakpoints should be handled through the normal
4931 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4932 different signals (SIGILL or SIGEMT for instance), but it is less
4933 clear where the PC is pointing afterwards. It may not match
4934 gdbarch_decr_pc_after_break. I don't know any specific target that
4935 generates these signals at breakpoints (the code has been in GDB since at
4936 least 1992) so I can not guess how to handle them here.
4938 In earlier versions of GDB, a target with
4939 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4940 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4941 target with both of these set in GDB history, and it seems unlikely to be
4942 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4944 if (ws
.kind () != TARGET_WAITKIND_STOPPED
)
4947 if (ws
.sig () != GDB_SIGNAL_TRAP
)
4950 /* In reverse execution, when a breakpoint is hit, the instruction
4951 under it has already been de-executed. The reported PC always
4952 points at the breakpoint address, so adjusting it further would
4953 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4956 B1 0x08000000 : INSN1
4957 B2 0x08000001 : INSN2
4959 PC -> 0x08000003 : INSN4
4961 Say you're stopped at 0x08000003 as above. Reverse continuing
4962 from that point should hit B2 as below. Reading the PC when the
4963 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4964 been de-executed already.
4966 B1 0x08000000 : INSN1
4967 B2 PC -> 0x08000001 : INSN2
4971 We can't apply the same logic as for forward execution, because
4972 we would wrongly adjust the PC to 0x08000000, since there's a
4973 breakpoint at PC - 1. We'd then report a hit on B1, although
4974 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4976 if (execution_direction
== EXEC_REVERSE
)
4979 /* If the target can tell whether the thread hit a SW breakpoint,
4980 trust it. Targets that can tell also adjust the PC
4982 if (target_supports_stopped_by_sw_breakpoint ())
4985 /* Note that relying on whether a breakpoint is planted in memory to
4986 determine this can fail. E.g,. the breakpoint could have been
4987 removed since. Or the thread could have been told to step an
4988 instruction the size of a breakpoint instruction, and only
4989 _after_ was a breakpoint inserted at its address. */
4991 /* If this target does not decrement the PC after breakpoints, then
4992 we have nothing to do. */
4993 regcache
= get_thread_regcache (thread
);
4994 gdbarch
= regcache
->arch ();
4996 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
5000 const address_space
*aspace
= thread
->inf
->aspace
.get ();
5002 /* Find the location where (if we've hit a breakpoint) the
5003 breakpoint would be. */
5004 breakpoint_pc
= regcache_read_pc (regcache
) - decr_pc
;
5006 /* If the target can't tell whether a software breakpoint triggered,
5007 fallback to figuring it out based on breakpoints we think were
5008 inserted in the target, and on whether the thread was stepped or
5011 /* Check whether there actually is a software breakpoint inserted at
5014 If in non-stop mode, a race condition is possible where we've
5015 removed a breakpoint, but stop events for that breakpoint were
5016 already queued and arrive later. To suppress those spurious
5017 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
5018 and retire them after a number of stop events are reported. Note
5019 this is an heuristic and can thus get confused. The real fix is
5020 to get the "stopped by SW BP and needs adjustment" info out of
5021 the target/kernel (and thus never reach here; see above). */
5022 if (software_breakpoint_inserted_here_p (aspace
, breakpoint_pc
)
5023 || (target_is_non_stop_p ()
5024 && moribund_breakpoint_here_p (aspace
, breakpoint_pc
)))
5026 std::optional
<scoped_restore_tmpl
<int>> restore_operation_disable
;
5028 if (record_full_is_used ())
5029 restore_operation_disable
.emplace
5030 (record_full_gdb_operation_disable_set ());
5032 /* When using hardware single-step, a SIGTRAP is reported for both
5033 a completed single-step and a software breakpoint. Need to
5034 differentiate between the two, as the latter needs adjusting
5035 but the former does not.
5037 The SIGTRAP can be due to a completed hardware single-step only if
5038 - we didn't insert software single-step breakpoints
5039 - this thread is currently being stepped
5041 If any of these events did not occur, we must have stopped due
5042 to hitting a software breakpoint, and have to back up to the
5045 As a special case, we could have hardware single-stepped a
5046 software breakpoint. In this case (prev_pc == breakpoint_pc),
5047 we also need to back up to the breakpoint address. */
5049 if (thread_has_single_step_breakpoints_set (thread
)
5050 || !currently_stepping (thread
)
5051 || (thread
->stepped_breakpoint
5052 && thread
->prev_pc
== breakpoint_pc
))
5053 regcache_write_pc (regcache
, breakpoint_pc
);
5058 stepped_in_from (const frame_info_ptr
&initial_frame
, frame_id step_frame_id
)
5060 frame_info_ptr frame
= initial_frame
;
5062 for (frame
= get_prev_frame (frame
);
5064 frame
= get_prev_frame (frame
))
5066 if (get_frame_id (frame
) == step_frame_id
)
5069 if (get_frame_type (frame
) != INLINE_FRAME
)
5076 /* Look for an inline frame that is marked for skip.
5077 If PREV_FRAME is TRUE start at the previous frame,
5078 otherwise start at the current frame. Stop at the
5079 first non-inline frame, or at the frame where the
5083 inline_frame_is_marked_for_skip (bool prev_frame
, struct thread_info
*tp
)
5085 frame_info_ptr frame
= get_current_frame ();
5088 frame
= get_prev_frame (frame
);
5090 for (; frame
!= nullptr; frame
= get_prev_frame (frame
))
5092 const char *fn
= nullptr;
5093 symtab_and_line sal
;
5096 if (get_frame_id (frame
) == tp
->control
.step_frame_id
)
5098 if (get_frame_type (frame
) != INLINE_FRAME
)
5101 sal
= find_frame_sal (frame
);
5102 sym
= get_frame_function (frame
);
5105 fn
= sym
->print_name ();
5108 && function_name_is_marked_for_skip (fn
, sal
))
5115 /* If the event thread has the stop requested flag set, pretend it
5116 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
5120 handle_stop_requested (struct execution_control_state
*ecs
)
5122 if (ecs
->event_thread
->stop_requested
)
5124 ecs
->ws
.set_stopped (GDB_SIGNAL_0
);
5125 handle_signal_stop (ecs
);
5131 /* Auxiliary function that handles syscall entry/return events.
5132 It returns true if the inferior should keep going (and GDB
5133 should ignore the event), or false if the event deserves to be
5137 handle_syscall_event (struct execution_control_state
*ecs
)
5139 struct regcache
*regcache
;
5142 context_switch (ecs
);
5144 regcache
= get_thread_regcache (ecs
->event_thread
);
5145 syscall_number
= ecs
->ws
.syscall_number ();
5146 ecs
->event_thread
->set_stop_pc (regcache_read_pc (regcache
));
5148 if (catch_syscall_enabled ()
5149 && catching_syscall_number (syscall_number
))
5151 infrun_debug_printf ("syscall number=%d", syscall_number
);
5153 ecs
->event_thread
->control
.stop_bpstat
5154 = bpstat_stop_status_nowatch (ecs
->event_thread
->inf
->aspace
.get (),
5155 ecs
->event_thread
->stop_pc (),
5156 ecs
->event_thread
, ecs
->ws
);
5158 if (handle_stop_requested (ecs
))
5161 if (bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
5163 /* Catchpoint hit. */
5168 if (handle_stop_requested (ecs
))
5171 /* If no catchpoint triggered for this, then keep going. */
5177 /* Lazily fill in the execution_control_state's stop_func_* fields. */
5180 fill_in_stop_func (struct gdbarch
*gdbarch
,
5181 struct execution_control_state
*ecs
)
5183 if (!ecs
->stop_func_filled_in
)
5186 const general_symbol_info
*gsi
;
5188 /* Don't care about return value; stop_func_start and stop_func_name
5189 will both be 0 if it doesn't work. */
5190 find_pc_partial_function_sym (ecs
->event_thread
->stop_pc (),
5192 &ecs
->stop_func_start
,
5193 &ecs
->stop_func_end
,
5195 ecs
->stop_func_name
= gsi
== nullptr ? nullptr : gsi
->print_name ();
5197 /* The call to find_pc_partial_function, above, will set
5198 stop_func_start and stop_func_end to the start and end
5199 of the range containing the stop pc. If this range
5200 contains the entry pc for the block (which is always the
5201 case for contiguous blocks), advance stop_func_start past
5202 the function's start offset and entrypoint. Note that
5203 stop_func_start is NOT advanced when in a range of a
5204 non-contiguous block that does not contain the entry pc. */
5205 if (block
!= nullptr
5206 && ecs
->stop_func_start
<= block
->entry_pc ()
5207 && block
->entry_pc () < ecs
->stop_func_end
)
5209 ecs
->stop_func_start
5210 += gdbarch_deprecated_function_start_offset (gdbarch
);
5212 /* PowerPC functions have a Local Entry Point (LEP) and a Global
5213 Entry Point (GEP). There is only one Entry Point (GEP = LEP) for
5214 other architectures. */
5215 ecs
->stop_func_alt_start
= ecs
->stop_func_start
;
5217 if (gdbarch_skip_entrypoint_p (gdbarch
))
5218 ecs
->stop_func_start
5219 = gdbarch_skip_entrypoint (gdbarch
, ecs
->stop_func_start
);
5222 ecs
->stop_func_filled_in
= 1;
5227 /* Return the STOP_SOON field of the inferior pointed at by ECS. */
5229 static enum stop_kind
5230 get_inferior_stop_soon (execution_control_state
*ecs
)
5232 struct inferior
*inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
5234 gdb_assert (inf
!= nullptr);
5235 return inf
->control
.stop_soon
;
5238 /* Poll for one event out of the current target. Store the resulting
5239 waitstatus in WS, and return the event ptid. Does not block. */
5242 poll_one_curr_target (struct target_waitstatus
*ws
)
5246 overlay_cache_invalid
= 1;
5248 /* Flush target cache before starting to handle each event.
5249 Target was running and cache could be stale. This is just a
5250 heuristic. Running threads may modify target memory, but we
5251 don't get any event. */
5252 target_dcache_invalidate (current_program_space
->aspace
);
5254 event_ptid
= target_wait (minus_one_ptid
, ws
, TARGET_WNOHANG
);
5257 print_target_wait_results (minus_one_ptid
, event_ptid
, *ws
,
5258 current_inferior ()->process_target ());
5263 /* Wait for one event out of any target. */
5265 static wait_one_event
5270 for (inferior
*inf
: all_inferiors ())
5272 process_stratum_target
*target
= inf
->process_target ();
5273 if (target
== nullptr
5274 || !target
->is_async_p ()
5275 || !target
->threads_executing
)
5278 switch_to_inferior_no_thread (inf
);
5280 wait_one_event event
;
5281 event
.target
= target
;
5282 event
.ptid
= poll_one_curr_target (&event
.ws
);
5284 if (event
.ws
.kind () == TARGET_WAITKIND_NO_RESUMED
)
5286 /* If nothing is resumed, remove the target from the
5288 target_async (false);
5290 else if (event
.ws
.kind () != TARGET_WAITKIND_IGNORE
)
5294 /* Block waiting for some event. */
5301 for (inferior
*inf
: all_inferiors ())
5303 process_stratum_target
*target
= inf
->process_target ();
5304 if (target
== nullptr
5305 || !target
->is_async_p ()
5306 || !target
->threads_executing
)
5309 int fd
= target
->async_wait_fd ();
5310 FD_SET (fd
, &readfds
);
5317 /* No waitable targets left. All must be stopped. */
5318 infrun_debug_printf ("no waitable targets left");
5320 target_waitstatus ws
;
5321 ws
.set_no_resumed ();
5322 return {nullptr, minus_one_ptid
, std::move (ws
)};
5327 int numfds
= interruptible_select (nfds
, &readfds
, 0, nullptr, 0);
5333 perror_with_name ("interruptible_select");
5338 /* Save the thread's event and stop reason to process it later. */
5341 save_waitstatus (struct thread_info
*tp
, const target_waitstatus
&ws
)
5343 infrun_debug_printf ("saving status %s for %s",
5344 ws
.to_string ().c_str (),
5345 tp
->ptid
.to_string ().c_str ());
5347 /* Record for later. */
5348 tp
->set_pending_waitstatus (ws
);
5350 if (ws
.kind () == TARGET_WAITKIND_STOPPED
5351 && ws
.sig () == GDB_SIGNAL_TRAP
)
5353 struct regcache
*regcache
= get_thread_regcache (tp
);
5354 const address_space
*aspace
= tp
->inf
->aspace
.get ();
5355 CORE_ADDR pc
= regcache_read_pc (regcache
);
5357 adjust_pc_after_break (tp
, tp
->pending_waitstatus ());
5359 scoped_restore_current_thread restore_thread
;
5360 switch_to_thread (tp
);
5362 if (target_stopped_by_watchpoint ())
5363 tp
->set_stop_reason (TARGET_STOPPED_BY_WATCHPOINT
);
5364 else if (target_supports_stopped_by_sw_breakpoint ()
5365 && target_stopped_by_sw_breakpoint ())
5366 tp
->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT
);
5367 else if (target_supports_stopped_by_hw_breakpoint ()
5368 && target_stopped_by_hw_breakpoint ())
5369 tp
->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT
);
5370 else if (!target_supports_stopped_by_hw_breakpoint ()
5371 && hardware_breakpoint_inserted_here_p (aspace
, pc
))
5372 tp
->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT
);
5373 else if (!target_supports_stopped_by_sw_breakpoint ()
5374 && software_breakpoint_inserted_here_p (aspace
, pc
))
5375 tp
->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT
);
5376 else if (!thread_has_single_step_breakpoints_set (tp
)
5377 && currently_stepping (tp
))
5378 tp
->set_stop_reason (TARGET_STOPPED_BY_SINGLE_STEP
);
5382 /* Mark the non-executing threads accordingly. In all-stop, all
5383 threads of all processes are stopped when we get any event
5384 reported. In non-stop mode, only the event thread stops. */
5387 mark_non_executing_threads (process_stratum_target
*target
,
5389 const target_waitstatus
&ws
)
5393 if (!target_is_non_stop_p ())
5394 mark_ptid
= minus_one_ptid
;
5395 else if (ws
.kind () == TARGET_WAITKIND_SIGNALLED
5396 || ws
.kind () == TARGET_WAITKIND_EXITED
)
5398 /* If we're handling a process exit in non-stop mode, even
5399 though threads haven't been deleted yet, one would think
5400 that there is nothing to do, as threads of the dead process
5401 will be soon deleted, and threads of any other process were
5402 left running. However, on some targets, threads survive a
5403 process exit event. E.g., for the "checkpoint" command,
5404 when the current checkpoint/fork exits, linux-fork.c
5405 automatically switches to another fork from within
5406 target_mourn_inferior, by associating the same
5407 inferior/thread to another fork. We haven't mourned yet at
5408 this point, but we must mark any threads left in the
5409 process as not-executing so that finish_thread_state marks
5410 them stopped (in the user's perspective) if/when we present
5411 the stop to the user. */
5412 mark_ptid
= ptid_t (event_ptid
.pid ());
5415 mark_ptid
= event_ptid
;
5417 set_executing (target
, mark_ptid
, false);
5419 /* Likewise the resumed flag. */
5420 set_resumed (target
, mark_ptid
, false);
5423 /* Handle one event after stopping threads. If the eventing thread
5424 reports back any interesting event, we leave it pending. If the
5425 eventing thread was in the middle of a displaced step, we
5426 cancel/finish it, and unless the thread's inferior is being
5427 detached, put the thread back in the step-over chain. Returns true
5428 if there are no resumed threads left in the target (thus there's no
5429 point in waiting further), false otherwise. */
5432 handle_one (const wait_one_event
&event
)
5435 ("%s %s", event
.ws
.to_string ().c_str (),
5436 event
.ptid
.to_string ().c_str ());
5438 if (event
.ws
.kind () == TARGET_WAITKIND_NO_RESUMED
)
5440 /* All resumed threads exited. */
5443 else if (event
.ws
.kind () == TARGET_WAITKIND_THREAD_EXITED
5444 || event
.ws
.kind () == TARGET_WAITKIND_EXITED
5445 || event
.ws
.kind () == TARGET_WAITKIND_SIGNALLED
)
5447 /* One thread/process exited/signalled. */
5449 thread_info
*t
= nullptr;
5451 /* The target may have reported just a pid. If so, try
5452 the first non-exited thread. */
5453 if (event
.ptid
.is_pid ())
5455 int pid
= event
.ptid
.pid ();
5456 inferior
*inf
= find_inferior_pid (event
.target
, pid
);
5457 for (thread_info
*tp
: inf
->non_exited_threads ())
5463 /* If there is no available thread, the event would
5464 have to be appended to a per-inferior event list,
5465 which does not exist (and if it did, we'd have
5466 to adjust run control command to be able to
5467 resume such an inferior). We assert here instead
5468 of going into an infinite loop. */
5469 gdb_assert (t
!= nullptr);
5472 ("using %s", t
->ptid
.to_string ().c_str ());
5476 t
= event
.target
->find_thread (event
.ptid
);
5477 /* Check if this is the first time we see this thread.
5478 Don't bother adding if it individually exited. */
5480 && event
.ws
.kind () != TARGET_WAITKIND_THREAD_EXITED
)
5481 t
= add_thread (event
.target
, event
.ptid
);
5486 /* Set the threads as non-executing to avoid
5487 another stop attempt on them. */
5488 switch_to_thread_no_regs (t
);
5489 mark_non_executing_threads (event
.target
, event
.ptid
,
5491 save_waitstatus (t
, event
.ws
);
5492 t
->stop_requested
= false;
5494 if (event
.ws
.kind () == TARGET_WAITKIND_THREAD_EXITED
)
5496 if (displaced_step_finish (t
, event
.ws
)
5497 != DISPLACED_STEP_FINISH_STATUS_OK
)
5499 gdb_assert_not_reached ("displaced_step_finish on "
5500 "exited thread failed");
5507 thread_info
*t
= event
.target
->find_thread (event
.ptid
);
5509 t
= add_thread (event
.target
, event
.ptid
);
5511 t
->stop_requested
= false;
5512 t
->set_executing (false);
5513 t
->set_resumed (false);
5514 t
->control
.may_range_step
= 0;
5516 /* This may be the first time we see the inferior report
5518 if (t
->inf
->needs_setup
)
5520 switch_to_thread_no_regs (t
);
5524 if (event
.ws
.kind () == TARGET_WAITKIND_STOPPED
5525 && event
.ws
.sig () == GDB_SIGNAL_0
)
5527 /* We caught the event that we intended to catch, so
5528 there's no event to save as pending. */
5530 if (displaced_step_finish (t
, event
.ws
)
5531 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED
)
5533 /* Add it back to the step-over queue. */
5535 ("displaced-step of %s canceled",
5536 t
->ptid
.to_string ().c_str ());
5538 t
->control
.trap_expected
= 0;
5539 if (!t
->inf
->detaching
)
5540 global_thread_step_over_chain_enqueue (t
);
5545 struct regcache
*regcache
;
5548 ("target_wait %s, saving status for %s",
5549 event
.ws
.to_string ().c_str (),
5550 t
->ptid
.to_string ().c_str ());
5552 /* Record for later. */
5553 save_waitstatus (t
, event
.ws
);
5555 if (displaced_step_finish (t
, event
.ws
)
5556 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED
)
5558 /* Add it back to the step-over queue. */
5559 t
->control
.trap_expected
= 0;
5560 if (!t
->inf
->detaching
)
5561 global_thread_step_over_chain_enqueue (t
);
5564 regcache
= get_thread_regcache (t
);
5565 t
->set_stop_pc (regcache_read_pc (regcache
));
5567 infrun_debug_printf ("saved stop_pc=%s for %s "
5568 "(currently_stepping=%d)",
5569 paddress (current_inferior ()->arch (),
5571 t
->ptid
.to_string ().c_str (),
5572 currently_stepping (t
));
5579 /* Helper for stop_all_threads. wait_one waits for events until it
5580 sees a TARGET_WAITKIND_NO_RESUMED event. When it sees one, it
5581 disables target_async for the target to stop waiting for events
5582 from it. TARGET_WAITKIND_NO_RESUMED can be delayed though,
5583 consider, debugging against gdbserver:
5585 #1 - Threads 1-5 are running, and thread 1 hits a breakpoint.
5587 #2 - gdb processes the breakpoint hit for thread 1, stops all
5588 threads, and steps thread 1 over the breakpoint. while
5589 stopping threads, some other threads reported interesting
5590 events, which were left pending in the thread's objects
5593 #2 - Thread 1 exits (it stepped an exit syscall), and gdbserver
5594 reports the thread exit for thread 1. The event ends up in
5595 remote's stop reply queue.
5597 #3 - That was the last resumed thread, so gdbserver reports
5598 no-resumed, and that event also ends up in remote's stop
5599 reply queue, queued after the thread exit from #2.
5601 #4 - gdb processes the thread exit event, which finishes the
5602 step-over, and so gdb restarts all threads (threads with
5603 pending events are left marked resumed, but aren't set
5604 executing). The no-resumed event is still left pending in
5605 the remote stop reply queue.
5607 #5 - Since there are now resumed threads with pending breakpoint
5608 hits, gdb picks one at random to process next.
5610 #5 - gdb picks the breakpoint hit for thread 2 this time, and that
5611 breakpoint also needs to be stepped over, so gdb stops all
5614 #6 - stop_all_threads counts number of expected stops and calls
5615 wait_one once for each.
5617 #7 - The first wait_one call collects the no-resumed event from #3
5620 #9 - Seeing the no-resumed event, wait_one disables target async
5621 for the remote target, to stop waiting for events from it.
5622 wait_one from here on always return no-resumed directly
5623 without reaching the target.
5625 #10 - stop_all_threads still hasn't seen all the stops it expects,
5626 so it does another pass.
5628 #11 - Since the remote target is not async (disabled in #9),
5629 wait_one doesn't wait on it, so it won't see the expected
5630 stops, and instead returns no-resumed directly.
5632 #12 - stop_all_threads still haven't seen all the stops, so it
5633 does another pass. goto #11, looping forever.
5635 To handle this, we explicitly (re-)enable target async on all
5636 targets that can async every time stop_all_threads goes wait for
5637 the expected stops. */
5640 reenable_target_async ()
5642 for (inferior
*inf
: all_inferiors ())
5644 process_stratum_target
*target
= inf
->process_target ();
5645 if (target
!= nullptr
5646 && target
->threads_executing
5647 && target
->can_async_p ()
5648 && !target
->is_async_p ())
5650 switch_to_inferior_no_thread (inf
);
5659 stop_all_threads (const char *reason
, inferior
*inf
)
5661 /* We may need multiple passes to discover all threads. */
5665 gdb_assert (exists_non_stop_target ());
5667 INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason
,
5668 inf
!= nullptr ? inf
->num
: -1);
5670 infrun_debug_show_threads ("non-exited threads",
5671 all_non_exited_threads ());
5673 scoped_restore_current_thread restore_thread
;
5675 /* Enable thread events on relevant targets. */
5676 for (auto *target
: all_non_exited_process_targets ())
5678 if (inf
!= nullptr && inf
->process_target () != target
)
5681 switch_to_target_no_thread (target
);
5682 target_thread_events (true);
5687 /* Disable thread events on relevant targets. */
5688 for (auto *target
: all_non_exited_process_targets ())
5690 if (inf
!= nullptr && inf
->process_target () != target
)
5693 switch_to_target_no_thread (target
);
5694 target_thread_events (false);
5697 /* Use debug_prefixed_printf directly to get a meaningful function
5700 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
5703 /* Request threads to stop, and then wait for the stops. Because
5704 threads we already know about can spawn more threads while we're
5705 trying to stop them, and we only learn about new threads when we
5706 update the thread list, do this in a loop, and keep iterating
5707 until two passes find no threads that need to be stopped. */
5708 for (pass
= 0; pass
< 2; pass
++, iterations
++)
5710 infrun_debug_printf ("pass=%d, iterations=%d", pass
, iterations
);
5713 int waits_needed
= 0;
5715 for (auto *target
: all_non_exited_process_targets ())
5717 if (inf
!= nullptr && inf
->process_target () != target
)
5720 switch_to_target_no_thread (target
);
5721 update_thread_list ();
5724 /* Go through all threads looking for threads that we need
5725 to tell the target to stop. */
5726 for (thread_info
*t
: all_non_exited_threads ())
5728 if (inf
!= nullptr && t
->inf
!= inf
)
5731 /* For a single-target setting with an all-stop target,
5732 we would not even arrive here. For a multi-target
5733 setting, until GDB is able to handle a mixture of
5734 all-stop and non-stop targets, simply skip all-stop
5735 targets' threads. This should be fine due to the
5736 protection of 'check_multi_target_resumption'. */
5738 switch_to_thread_no_regs (t
);
5739 if (!target_is_non_stop_p ())
5742 if (t
->executing ())
5744 /* If already stopping, don't request a stop again.
5745 We just haven't seen the notification yet. */
5746 if (!t
->stop_requested
)
5748 infrun_debug_printf (" %s executing, need stop",
5749 t
->ptid
.to_string ().c_str ());
5750 target_stop (t
->ptid
);
5751 t
->stop_requested
= true;
5755 infrun_debug_printf (" %s executing, already stopping",
5756 t
->ptid
.to_string ().c_str ());
5759 if (t
->stop_requested
)
5764 infrun_debug_printf (" %s not executing",
5765 t
->ptid
.to_string ().c_str ());
5767 /* The thread may be not executing, but still be
5768 resumed with a pending status to process. */
5769 t
->set_resumed (false);
5773 if (waits_needed
== 0)
5776 /* If we find new threads on the second iteration, restart
5777 over. We want to see two iterations in a row with all
5782 reenable_target_async ();
5784 for (int i
= 0; i
< waits_needed
; i
++)
5786 wait_one_event event
= wait_one ();
5787 if (handle_one (event
))
5794 /* Handle a TARGET_WAITKIND_NO_RESUMED event. Return true if we
5795 handled the event and should continue waiting. Return false if we
5796 should stop and report the event to the user. */
5799 handle_no_resumed (struct execution_control_state
*ecs
)
5801 if (target_can_async_p ())
5803 bool any_sync
= false;
5805 for (ui
*ui
: all_uis ())
5807 if (ui
->prompt_state
== PROMPT_BLOCKED
)
5815 /* There were no unwaited-for children left in the target, but,
5816 we're not synchronously waiting for events either. Just
5819 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
5820 prepare_to_wait (ecs
);
5825 /* Otherwise, if we were running a synchronous execution command, we
5826 may need to cancel it and give the user back the terminal.
5828 In non-stop mode, the target can't tell whether we've already
5829 consumed previous stop events, so it can end up sending us a
5830 no-resumed event like so:
5832 #0 - thread 1 is left stopped
5834 #1 - thread 2 is resumed and hits breakpoint
5835 -> TARGET_WAITKIND_STOPPED
5837 #2 - thread 3 is resumed and exits
5838 this is the last resumed thread, so
5839 -> TARGET_WAITKIND_NO_RESUMED
5841 #3 - gdb processes stop for thread 2 and decides to re-resume
5844 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
5845 thread 2 is now resumed, so the event should be ignored.
5847 IOW, if the stop for thread 2 doesn't end a foreground command,
5848 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5849 event. But it could be that the event meant that thread 2 itself
5850 (or whatever other thread was the last resumed thread) exited.
5852 To address this we refresh the thread list and check whether we
5853 have resumed threads _now_. In the example above, this removes
5854 thread 3 from the thread list. If thread 2 was re-resumed, we
5855 ignore this event. If we find no thread resumed, then we cancel
5856 the synchronous command and show "no unwaited-for " to the
5859 inferior
*curr_inf
= current_inferior ();
5861 scoped_restore_current_thread restore_thread
;
5862 update_thread_list ();
5866 - the current target has no thread executing, and
5867 - the current inferior is native, and
5868 - the current inferior is the one which has the terminal, and
5871 then a Ctrl-C from this point on would remain stuck in the
5872 kernel, until a thread resumes and dequeues it. That would
5873 result in the GDB CLI not reacting to Ctrl-C, not able to
5874 interrupt the program. To address this, if the current inferior
5875 no longer has any thread executing, we give the terminal to some
5876 other inferior that has at least one thread executing. */
5877 bool swap_terminal
= true;
5879 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5880 whether to report it to the user. */
5881 bool ignore_event
= false;
5883 for (thread_info
*thread
: all_non_exited_threads ())
5885 if (swap_terminal
&& thread
->executing ())
5887 if (thread
->inf
!= curr_inf
)
5889 target_terminal::ours ();
5891 switch_to_thread (thread
);
5892 target_terminal::inferior ();
5894 swap_terminal
= false;
5897 if (!ignore_event
&& thread
->resumed ())
5899 /* Either there were no unwaited-for children left in the
5900 target at some point, but there are now, or some target
5901 other than the eventing one has unwaited-for children
5902 left. Just ignore. */
5903 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5904 "(ignoring: found resumed)");
5906 ignore_event
= true;
5909 if (ignore_event
&& !swap_terminal
)
5915 switch_to_inferior_no_thread (curr_inf
);
5916 prepare_to_wait (ecs
);
5920 /* Go ahead and report the event. */
5924 /* Handle a TARGET_WAITKIND_THREAD_EXITED event. Return true if we
5925 handled the event and should continue waiting. Return false if we
5926 should stop and report the event to the user. */
5929 handle_thread_exited (execution_control_state
*ecs
)
5931 context_switch (ecs
);
5933 /* Clear these so we don't re-start the thread stepping over a
5934 breakpoint/watchpoint. */
5935 ecs
->event_thread
->stepping_over_breakpoint
= 0;
5936 ecs
->event_thread
->stepping_over_watchpoint
= 0;
5938 /* If the thread had an FSM, then abort the command. But only after
5939 finishing the step over, as in non-stop mode, aborting this
5940 thread's command should not interfere with other threads. We
5941 must check this before finish_step over, however, which may
5942 update the thread list and delete the event thread. */
5943 bool abort_cmd
= (ecs
->event_thread
->thread_fsm () != nullptr);
5945 /* Mark the thread exited right now, because finish_step_over may
5946 update the thread list and that may delete the thread silently
5947 (depending on target), while we always want to emit the "[Thread
5948 ... exited]" notification. Don't actually delete the thread yet,
5949 because we need to pass its pointer down to finish_step_over. */
5950 set_thread_exited (ecs
->event_thread
);
5952 /* Maybe the thread was doing a step-over, if so release
5953 resources and start any further pending step-overs.
5955 If we are on a non-stop target and the thread was doing an
5956 in-line step, this also restarts the other threads. */
5957 int ret
= finish_step_over (ecs
);
5959 /* finish_step_over returns true if it moves ecs' wait status
5960 back into the thread, so that we go handle another pending
5961 event before this one. But we know it never does that if
5962 the event thread has exited. */
5963 gdb_assert (ret
== 0);
5967 /* We're stopping for the thread exit event. Switch to the
5968 event thread again, as finish_step_over may have switched
5970 switch_to_thread (ecs
->event_thread
);
5971 ecs
->event_thread
= nullptr;
5975 /* If finish_step_over started a new in-line step-over, don't
5976 try to restart anything else. */
5977 if (step_over_info_valid_p ())
5979 delete_thread (ecs
->event_thread
);
5983 /* Maybe we are on an all-stop target and we got this event
5984 while doing a step-like command on another thread. If so,
5985 go back to doing that. If this thread was stepping,
5986 switch_back_to_stepped_thread will consider that the thread
5987 was interrupted mid-step and will try keep stepping it. We
5988 don't want that, the thread is gone. So clear the proceed
5989 status so it doesn't do that. */
5990 clear_proceed_status_thread (ecs
->event_thread
);
5991 if (switch_back_to_stepped_thread (ecs
))
5993 delete_thread (ecs
->event_thread
);
5997 inferior
*inf
= ecs
->event_thread
->inf
;
5998 bool slock_applies
= schedlock_applies (ecs
->event_thread
);
6000 delete_thread (ecs
->event_thread
);
6001 ecs
->event_thread
= nullptr;
6003 /* Continue handling the event as if we had gotten a
6004 TARGET_WAITKIND_NO_RESUMED. */
6005 auto handle_as_no_resumed
= [ecs
] ()
6007 /* handle_no_resumed doesn't really look at the event kind, but
6008 normal_stop does. */
6009 ecs
->ws
.set_no_resumed ();
6010 ecs
->event_thread
= nullptr;
6011 ecs
->ptid
= minus_one_ptid
;
6013 /* Re-record the last target status. */
6014 set_last_target_status (ecs
->target
, ecs
->ptid
, ecs
->ws
);
6016 return handle_no_resumed (ecs
);
6019 /* If we are on an all-stop target, the target has stopped all
6020 threads to report the event. We don't actually want to
6021 stop, so restart the threads. */
6022 if (!target_is_non_stop_p ())
6026 /* Since the target is !non-stop, then everything is stopped
6027 at this point, and we can't assume we'll get further
6028 events until we resume the target again. Handle this
6029 event like if it were a TARGET_WAITKIND_NO_RESUMED. Note
6030 this refreshes the thread list and checks whether there
6031 are other resumed threads before deciding whether to
6032 print "no-unwaited-for left". This is important because
6033 the user could have done:
6035 (gdb) set scheduler-locking on
6041 ... and only one of the threads exited. */
6042 return handle_as_no_resumed ();
6046 /* Switch to the first non-exited thread we can find, and
6048 auto range
= inf
->non_exited_threads ();
6049 if (range
.begin () == range
.end ())
6051 /* Looks like the target reported a
6052 TARGET_WAITKIND_THREAD_EXITED for its last known
6054 return handle_as_no_resumed ();
6056 thread_info
*non_exited_thread
= *range
.begin ();
6057 switch_to_thread (non_exited_thread
);
6058 insert_breakpoints ();
6059 resume (GDB_SIGNAL_0
);
6063 prepare_to_wait (ecs
);
6067 /* Given an execution control state that has been freshly filled in by
6068 an event from the inferior, figure out what it means and take
6071 The alternatives are:
6073 1) stop_waiting and return; to really stop and return to the
6076 2) keep_going and return; to wait for the next event (set
6077 ecs->event_thread->stepping_over_breakpoint to 1 to single step
6081 handle_inferior_event (struct execution_control_state
*ecs
)
6083 /* Make sure that all temporary struct value objects that were
6084 created during the handling of the event get deleted at the
6086 scoped_value_mark free_values
;
6088 infrun_debug_printf ("%s", ecs
->ws
.to_string ().c_str ());
6090 if (ecs
->ws
.kind () == TARGET_WAITKIND_IGNORE
)
6092 /* We had an event in the inferior, but we are not interested in
6093 handling it at this level. The lower layers have already
6094 done what needs to be done, if anything.
6096 One of the possible circumstances for this is when the
6097 inferior produces output for the console. The inferior has
6098 not stopped, and we are ignoring the event. Another possible
6099 circumstance is any event which the lower level knows will be
6100 reported multiple times without an intervening resume. */
6101 prepare_to_wait (ecs
);
6105 if (ecs
->ws
.kind () == TARGET_WAITKIND_NO_RESUMED
6106 && handle_no_resumed (ecs
))
6109 /* Cache the last target/ptid/waitstatus. */
6110 set_last_target_status (ecs
->target
, ecs
->ptid
, ecs
->ws
);
6112 /* Always clear state belonging to the previous time we stopped. */
6113 stop_stack_dummy
= STOP_NONE
;
6115 if (ecs
->ws
.kind () == TARGET_WAITKIND_NO_RESUMED
)
6117 /* No unwaited-for children left. IOW, all resumed children
6123 if (ecs
->ws
.kind () != TARGET_WAITKIND_EXITED
6124 && ecs
->ws
.kind () != TARGET_WAITKIND_SIGNALLED
)
6126 ecs
->event_thread
= ecs
->target
->find_thread (ecs
->ptid
);
6127 /* If it's a new thread, add it to the thread database. */
6128 if (ecs
->event_thread
== nullptr)
6129 ecs
->event_thread
= add_thread (ecs
->target
, ecs
->ptid
);
6131 /* Disable range stepping. If the next step request could use a
6132 range, this will be end up re-enabled then. */
6133 ecs
->event_thread
->control
.may_range_step
= 0;
6136 /* Dependent on valid ECS->EVENT_THREAD. */
6137 adjust_pc_after_break (ecs
->event_thread
, ecs
->ws
);
6139 /* Dependent on the current PC value modified by adjust_pc_after_break. */
6140 reinit_frame_cache ();
6142 breakpoint_retire_moribund ();
6144 /* First, distinguish signals caused by the debugger from signals
6145 that have to do with the program's own actions. Note that
6146 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
6147 on the operating system version. Here we detect when a SIGILL or
6148 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
6149 something similar for SIGSEGV, since a SIGSEGV will be generated
6150 when we're trying to execute a breakpoint instruction on a
6151 non-executable stack. This happens for call dummy breakpoints
6152 for architectures like SPARC that place call dummies on the
6154 if (ecs
->ws
.kind () == TARGET_WAITKIND_STOPPED
6155 && (ecs
->ws
.sig () == GDB_SIGNAL_ILL
6156 || ecs
->ws
.sig () == GDB_SIGNAL_SEGV
6157 || ecs
->ws
.sig () == GDB_SIGNAL_EMT
))
6159 struct regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
6161 if (breakpoint_inserted_here_p (ecs
->event_thread
->inf
->aspace
.get (),
6162 regcache_read_pc (regcache
)))
6164 infrun_debug_printf ("Treating signal as SIGTRAP");
6165 ecs
->ws
.set_stopped (GDB_SIGNAL_TRAP
);
6169 mark_non_executing_threads (ecs
->target
, ecs
->ptid
, ecs
->ws
);
6171 switch (ecs
->ws
.kind ())
6173 case TARGET_WAITKIND_LOADED
:
6175 context_switch (ecs
);
6176 /* Ignore gracefully during startup of the inferior, as it might
6177 be the shell which has just loaded some objects, otherwise
6178 add the symbols for the newly loaded objects. Also ignore at
6179 the beginning of an attach or remote session; we will query
6180 the full list of libraries once the connection is
6183 stop_kind stop_soon
= get_inferior_stop_soon (ecs
);
6184 if (stop_soon
== NO_STOP_QUIETLY
)
6186 struct regcache
*regcache
;
6188 regcache
= get_thread_regcache (ecs
->event_thread
);
6190 handle_solib_event ();
6192 ecs
->event_thread
->set_stop_pc (regcache_read_pc (regcache
));
6193 address_space
*aspace
= ecs
->event_thread
->inf
->aspace
.get ();
6194 ecs
->event_thread
->control
.stop_bpstat
6195 = bpstat_stop_status_nowatch (aspace
,
6196 ecs
->event_thread
->stop_pc (),
6197 ecs
->event_thread
, ecs
->ws
);
6199 if (handle_stop_requested (ecs
))
6202 if (bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
6204 /* A catchpoint triggered. */
6205 process_event_stop_test (ecs
);
6209 /* If requested, stop when the dynamic linker notifies
6210 gdb of events. This allows the user to get control
6211 and place breakpoints in initializer routines for
6212 dynamically loaded objects (among other things). */
6213 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6214 if (stop_on_solib_events
)
6216 /* Make sure we print "Stopped due to solib-event" in
6218 stop_print_frame
= true;
6225 /* If we are skipping through a shell, or through shared library
6226 loading that we aren't interested in, resume the program. If
6227 we're running the program normally, also resume. */
6228 if (stop_soon
== STOP_QUIETLY
|| stop_soon
== NO_STOP_QUIETLY
)
6230 /* Loading of shared libraries might have changed breakpoint
6231 addresses. Make sure new breakpoints are inserted. */
6232 if (stop_soon
== NO_STOP_QUIETLY
)
6233 insert_breakpoints ();
6234 resume (GDB_SIGNAL_0
);
6235 prepare_to_wait (ecs
);
6239 /* But stop if we're attaching or setting up a remote
6241 if (stop_soon
== STOP_QUIETLY_NO_SIGSTOP
6242 || stop_soon
== STOP_QUIETLY_REMOTE
)
6244 infrun_debug_printf ("quietly stopped");
6249 internal_error (_("unhandled stop_soon: %d"), (int) stop_soon
);
6252 case TARGET_WAITKIND_SPURIOUS
:
6253 if (handle_stop_requested (ecs
))
6255 context_switch (ecs
);
6256 resume (GDB_SIGNAL_0
);
6257 prepare_to_wait (ecs
);
6260 case TARGET_WAITKIND_THREAD_CREATED
:
6261 if (handle_stop_requested (ecs
))
6263 context_switch (ecs
);
6264 if (!switch_back_to_stepped_thread (ecs
))
6268 case TARGET_WAITKIND_THREAD_EXITED
:
6269 if (handle_thread_exited (ecs
))
6274 case TARGET_WAITKIND_EXITED
:
6275 case TARGET_WAITKIND_SIGNALLED
:
6277 /* Depending on the system, ecs->ptid may point to a thread or
6278 to a process. On some targets, target_mourn_inferior may
6279 need to have access to the just-exited thread. That is the
6280 case of GNU/Linux's "checkpoint" support, for example.
6281 Call the switch_to_xxx routine as appropriate. */
6282 thread_info
*thr
= ecs
->target
->find_thread (ecs
->ptid
);
6284 switch_to_thread (thr
);
6287 inferior
*inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
6288 switch_to_inferior_no_thread (inf
);
6291 handle_vfork_child_exec_or_exit (0);
6292 target_terminal::ours (); /* Must do this before mourn anyway. */
6294 /* Clearing any previous state of convenience variables. */
6295 clear_exit_convenience_vars ();
6297 if (ecs
->ws
.kind () == TARGET_WAITKIND_EXITED
)
6299 /* Record the exit code in the convenience variable $_exitcode, so
6300 that the user can inspect this again later. */
6301 set_internalvar_integer (lookup_internalvar ("_exitcode"),
6302 (LONGEST
) ecs
->ws
.exit_status ());
6304 /* Also record this in the inferior itself. */
6305 current_inferior ()->has_exit_code
= true;
6306 current_inferior ()->exit_code
= (LONGEST
) ecs
->ws
.exit_status ();
6308 /* Support the --return-child-result option. */
6309 return_child_result_value
= ecs
->ws
.exit_status ();
6311 interps_notify_exited (ecs
->ws
.exit_status ());
6315 struct gdbarch
*gdbarch
= current_inferior ()->arch ();
6317 if (gdbarch_gdb_signal_to_target_p (gdbarch
))
6319 /* Set the value of the internal variable $_exitsignal,
6320 which holds the signal uncaught by the inferior. */
6321 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
6322 gdbarch_gdb_signal_to_target (gdbarch
,
6327 /* We don't have access to the target's method used for
6328 converting between signal numbers (GDB's internal
6329 representation <-> target's representation).
6330 Therefore, we cannot do a good job at displaying this
6331 information to the user. It's better to just warn
6332 her about it (if infrun debugging is enabled), and
6334 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
6338 interps_notify_signal_exited (ecs
->ws
.sig ());
6341 gdb_flush (gdb_stdout
);
6342 target_mourn_inferior (inferior_ptid
);
6343 stop_print_frame
= false;
6347 case TARGET_WAITKIND_FORKED
:
6348 case TARGET_WAITKIND_VFORKED
:
6349 case TARGET_WAITKIND_THREAD_CLONED
:
6351 displaced_step_finish (ecs
->event_thread
, ecs
->ws
);
6353 /* Start a new step-over in another thread if there's one that
6357 context_switch (ecs
);
6359 /* Immediately detach breakpoints from the child before there's
6360 any chance of letting the user delete breakpoints from the
6361 breakpoint lists. If we don't do this early, it's easy to
6362 leave left over traps in the child, vis: "break foo; catch
6363 fork; c; <fork>; del; c; <child calls foo>". We only follow
6364 the fork on the last `continue', and by that time the
6365 breakpoint at "foo" is long gone from the breakpoint table.
6366 If we vforked, then we don't need to unpatch here, since both
6367 parent and child are sharing the same memory pages; we'll
6368 need to unpatch at follow/detach time instead to be certain
6369 that new breakpoints added between catchpoint hit time and
6370 vfork follow are detached. */
6371 if (ecs
->ws
.kind () == TARGET_WAITKIND_FORKED
)
6373 /* This won't actually modify the breakpoint list, but will
6374 physically remove the breakpoints from the child. */
6375 detach_breakpoints (ecs
->ws
.child_ptid ());
6378 delete_just_stopped_threads_single_step_breakpoints ();
6380 /* In case the event is caught by a catchpoint, remember that
6381 the event is to be followed at the next resume of the thread,
6382 and not immediately. */
6383 ecs
->event_thread
->pending_follow
= ecs
->ws
;
6385 ecs
->event_thread
->set_stop_pc
6386 (regcache_read_pc (get_thread_regcache (ecs
->event_thread
)));
6388 ecs
->event_thread
->control
.stop_bpstat
6389 = bpstat_stop_status_nowatch (ecs
->event_thread
->inf
->aspace
.get (),
6390 ecs
->event_thread
->stop_pc (),
6391 ecs
->event_thread
, ecs
->ws
);
6393 if (handle_stop_requested (ecs
))
6396 /* If no catchpoint triggered for this, then keep going. Note
6397 that we're interested in knowing the bpstat actually causes a
6398 stop, not just if it may explain the signal. Software
6399 watchpoints, for example, always appear in the bpstat. */
6400 if (!bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
6403 = (ecs
->ws
.kind () != TARGET_WAITKIND_THREAD_CLONED
6404 && follow_fork_mode_string
== follow_fork_mode_child
);
6406 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6408 process_stratum_target
*targ
6409 = ecs
->event_thread
->inf
->process_target ();
6412 if (ecs
->ws
.kind () != TARGET_WAITKIND_THREAD_CLONED
)
6413 should_resume
= follow_fork ();
6416 should_resume
= true;
6417 inferior
*inf
= ecs
->event_thread
->inf
;
6418 inf
->top_target ()->follow_clone (ecs
->ws
.child_ptid ());
6419 ecs
->event_thread
->pending_follow
.set_spurious ();
6422 /* Note that one of these may be an invalid pointer,
6423 depending on detach_fork. */
6424 thread_info
*parent
= ecs
->event_thread
;
6425 thread_info
*child
= targ
->find_thread (ecs
->ws
.child_ptid ());
6427 /* At this point, the parent is marked running, and the
6428 child is marked stopped. */
6430 /* If not resuming the parent, mark it stopped. */
6431 if (ecs
->ws
.kind () != TARGET_WAITKIND_THREAD_CLONED
6432 && follow_child
&& !detach_fork
&& !non_stop
&& !sched_multi
)
6433 parent
->set_running (false);
6435 /* If resuming the child, mark it running. */
6436 if ((ecs
->ws
.kind () == TARGET_WAITKIND_THREAD_CLONED
6437 && !schedlock_applies (ecs
->event_thread
))
6438 || (ecs
->ws
.kind () != TARGET_WAITKIND_THREAD_CLONED
6440 || (!detach_fork
&& (non_stop
|| sched_multi
)))))
6441 child
->set_running (true);
6443 /* In non-stop mode, also resume the other branch. */
6444 if ((ecs
->ws
.kind () == TARGET_WAITKIND_THREAD_CLONED
6445 && target_is_non_stop_p ()
6446 && !schedlock_applies (ecs
->event_thread
))
6447 || (ecs
->ws
.kind () != TARGET_WAITKIND_THREAD_CLONED
6448 && (!detach_fork
&& (non_stop
6450 && target_is_non_stop_p ())))))
6453 switch_to_thread (parent
);
6455 switch_to_thread (child
);
6457 ecs
->event_thread
= inferior_thread ();
6458 ecs
->ptid
= inferior_ptid
;
6463 switch_to_thread (child
);
6465 switch_to_thread (parent
);
6467 ecs
->event_thread
= inferior_thread ();
6468 ecs
->ptid
= inferior_ptid
;
6472 /* Never call switch_back_to_stepped_thread if we are waiting for
6473 vfork-done (waiting for an external vfork child to exec or
6474 exit). We will resume only the vforking thread for the purpose
6475 of collecting the vfork-done event, and we will restart any
6476 step once the critical shared address space window is done. */
6479 && parent
->inf
->thread_waiting_for_vfork_done
!= nullptr)
6480 || !switch_back_to_stepped_thread (ecs
))
6487 process_event_stop_test (ecs
);
6490 case TARGET_WAITKIND_VFORK_DONE
:
6491 /* Done with the shared memory region. Re-insert breakpoints in
6492 the parent, and keep going. */
6494 context_switch (ecs
);
6496 handle_vfork_done (ecs
->event_thread
);
6497 gdb_assert (inferior_thread () == ecs
->event_thread
);
6499 if (handle_stop_requested (ecs
))
6502 if (!switch_back_to_stepped_thread (ecs
))
6504 gdb_assert (inferior_thread () == ecs
->event_thread
);
6505 /* This also takes care of reinserting breakpoints in the
6506 previously locked inferior. */
6511 case TARGET_WAITKIND_EXECD
:
6513 /* Note we can't read registers yet (the stop_pc), because we
6514 don't yet know the inferior's post-exec architecture.
6515 'stop_pc' is explicitly read below instead. */
6516 switch_to_thread_no_regs (ecs
->event_thread
);
6518 /* Do whatever is necessary to the parent branch of the vfork. */
6519 handle_vfork_child_exec_or_exit (1);
6521 /* This causes the eventpoints and symbol table to be reset.
6522 Must do this now, before trying to determine whether to
6524 follow_exec (inferior_ptid
, ecs
->ws
.execd_pathname ());
6526 /* In follow_exec we may have deleted the original thread and
6527 created a new one. Make sure that the event thread is the
6528 execd thread for that case (this is a nop otherwise). */
6529 ecs
->event_thread
= inferior_thread ();
6531 ecs
->event_thread
->set_stop_pc
6532 (regcache_read_pc (get_thread_regcache (ecs
->event_thread
)));
6534 ecs
->event_thread
->control
.stop_bpstat
6535 = bpstat_stop_status_nowatch (ecs
->event_thread
->inf
->aspace
.get (),
6536 ecs
->event_thread
->stop_pc (),
6537 ecs
->event_thread
, ecs
->ws
);
6539 if (handle_stop_requested (ecs
))
6542 /* If no catchpoint triggered for this, then keep going. */
6543 if (!bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
6545 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6549 process_event_stop_test (ecs
);
6552 /* Be careful not to try to gather much state about a thread
6553 that's in a syscall. It's frequently a losing proposition. */
6554 case TARGET_WAITKIND_SYSCALL_ENTRY
:
6555 /* Getting the current syscall number. */
6556 if (handle_syscall_event (ecs
) == 0)
6557 process_event_stop_test (ecs
);
6560 /* Before examining the threads further, step this thread to
6561 get it entirely out of the syscall. (We get notice of the
6562 event when the thread is just on the verge of exiting a
6563 syscall. Stepping one instruction seems to get it back
6565 case TARGET_WAITKIND_SYSCALL_RETURN
:
6566 if (handle_syscall_event (ecs
) == 0)
6567 process_event_stop_test (ecs
);
6570 case TARGET_WAITKIND_STOPPED
:
6571 handle_signal_stop (ecs
);
6574 case TARGET_WAITKIND_NO_HISTORY
:
6575 /* Reverse execution: target ran out of history info. */
6577 /* Switch to the stopped thread. */
6578 context_switch (ecs
);
6579 infrun_debug_printf ("stopped");
6581 delete_just_stopped_threads_single_step_breakpoints ();
6582 ecs
->event_thread
->set_stop_pc
6583 (regcache_read_pc (get_thread_regcache (inferior_thread ())));
6585 if (handle_stop_requested (ecs
))
6588 interps_notify_no_history ();
6594 /* Restart threads back to what they were trying to do back when we
6595 paused them (because of an in-line step-over or vfork, for example).
6596 The EVENT_THREAD thread is ignored (not restarted).
6598 If INF is non-nullptr, only resume threads from INF. */
6601 restart_threads (struct thread_info
*event_thread
, inferior
*inf
)
6603 INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d",
6604 event_thread
->ptid
.to_string ().c_str (),
6605 inf
!= nullptr ? inf
->num
: -1);
6607 gdb_assert (!step_over_info_valid_p ());
6609 /* In case the instruction just stepped spawned a new thread. */
6610 update_thread_list ();
6612 for (thread_info
*tp
: all_non_exited_threads ())
6614 if (inf
!= nullptr && tp
->inf
!= inf
)
6617 if (tp
->inf
->detaching
)
6619 infrun_debug_printf ("restart threads: [%s] inferior detaching",
6620 tp
->ptid
.to_string ().c_str ());
6624 switch_to_thread_no_regs (tp
);
6626 if (tp
== event_thread
)
6628 infrun_debug_printf ("restart threads: [%s] is event thread",
6629 tp
->ptid
.to_string ().c_str ());
6633 if (!(tp
->state
== THREAD_RUNNING
|| tp
->control
.in_infcall
))
6635 infrun_debug_printf ("restart threads: [%s] not meant to be running",
6636 tp
->ptid
.to_string ().c_str ());
6642 infrun_debug_printf ("restart threads: [%s] resumed",
6643 tp
->ptid
.to_string ().c_str ());
6644 gdb_assert (tp
->executing () || tp
->has_pending_waitstatus ());
6648 if (thread_is_in_step_over_chain (tp
))
6650 infrun_debug_printf ("restart threads: [%s] needs step-over",
6651 tp
->ptid
.to_string ().c_str ());
6652 gdb_assert (!tp
->resumed ());
6657 if (tp
->has_pending_waitstatus ())
6659 infrun_debug_printf ("restart threads: [%s] has pending status",
6660 tp
->ptid
.to_string ().c_str ());
6661 tp
->set_resumed (true);
6665 gdb_assert (!tp
->stop_requested
);
6667 /* If some thread needs to start a step-over at this point, it
6668 should still be in the step-over queue, and thus skipped
6670 if (thread_still_needs_step_over (tp
))
6672 internal_error ("thread [%s] needs a step-over, but not in "
6673 "step-over queue\n",
6674 tp
->ptid
.to_string ().c_str ());
6677 if (currently_stepping (tp
))
6679 infrun_debug_printf ("restart threads: [%s] was stepping",
6680 tp
->ptid
.to_string ().c_str ());
6681 keep_going_stepped_thread (tp
);
6685 infrun_debug_printf ("restart threads: [%s] continuing",
6686 tp
->ptid
.to_string ().c_str ());
6687 execution_control_state
ecs (tp
);
6688 switch_to_thread (tp
);
6689 keep_going_pass_signal (&ecs
);
6694 /* Callback for iterate_over_threads. Find a resumed thread that has
6695 a pending waitstatus. */
6698 resumed_thread_with_pending_status (struct thread_info
*tp
)
6700 return tp
->resumed () && tp
->has_pending_waitstatus ();
6703 /* Called when we get an event that may finish an in-line or
6704 out-of-line (displaced stepping) step-over started previously.
6705 Return true if the event is processed and we should go back to the
6706 event loop; false if the caller should continue processing the
6710 finish_step_over (struct execution_control_state
*ecs
)
6712 displaced_step_finish (ecs
->event_thread
, ecs
->ws
);
6714 bool had_step_over_info
= step_over_info_valid_p ();
6716 if (had_step_over_info
)
6718 /* If we're stepping over a breakpoint with all threads locked,
6719 then only the thread that was stepped should be reporting
6721 gdb_assert (ecs
->event_thread
->control
.trap_expected
);
6723 update_thread_events_after_step_over (ecs
->event_thread
, ecs
->ws
);
6725 clear_step_over_info ();
6728 if (!target_is_non_stop_p ())
6731 /* Start a new step-over in another thread if there's one that
6735 /* If we were stepping over a breakpoint before, and haven't started
6736 a new in-line step-over sequence, then restart all other threads
6737 (except the event thread). We can't do this in all-stop, as then
6738 e.g., we wouldn't be able to issue any other remote packet until
6739 these other threads stop. */
6740 if (had_step_over_info
&& !step_over_info_valid_p ())
6742 struct thread_info
*pending
;
6744 /* If we only have threads with pending statuses, the restart
6745 below won't restart any thread and so nothing re-inserts the
6746 breakpoint we just stepped over. But we need it inserted
6747 when we later process the pending events, otherwise if
6748 another thread has a pending event for this breakpoint too,
6749 we'd discard its event (because the breakpoint that
6750 originally caused the event was no longer inserted). */
6751 context_switch (ecs
);
6752 insert_breakpoints ();
6754 restart_threads (ecs
->event_thread
);
6756 /* If we have events pending, go through handle_inferior_event
6757 again, picking up a pending event at random. This avoids
6758 thread starvation. */
6760 /* But not if we just stepped over a watchpoint in order to let
6761 the instruction execute so we can evaluate its expression.
6762 The set of watchpoints that triggered is recorded in the
6763 breakpoint objects themselves (see bp->watchpoint_triggered).
6764 If we processed another event first, that other event could
6765 clobber this info. */
6766 if (ecs
->event_thread
->stepping_over_watchpoint
)
6769 /* The code below is meant to avoid one thread hogging the event
6770 loop by doing constant in-line step overs. If the stepping
6771 thread exited, there's no risk for this to happen, so we can
6772 safely let our caller process the event immediately. */
6773 if (ecs
->ws
.kind () == TARGET_WAITKIND_THREAD_EXITED
)
6776 pending
= iterate_over_threads (resumed_thread_with_pending_status
);
6777 if (pending
!= nullptr)
6779 struct thread_info
*tp
= ecs
->event_thread
;
6780 struct regcache
*regcache
;
6782 infrun_debug_printf ("found resumed threads with "
6783 "pending events, saving status");
6785 gdb_assert (pending
!= tp
);
6787 /* Record the event thread's event for later. */
6788 save_waitstatus (tp
, ecs
->ws
);
6789 /* This was cleared early, by handle_inferior_event. Set it
6790 so this pending event is considered by
6792 tp
->set_resumed (true);
6794 gdb_assert (!tp
->executing ());
6796 regcache
= get_thread_regcache (tp
);
6797 tp
->set_stop_pc (regcache_read_pc (regcache
));
6799 infrun_debug_printf ("saved stop_pc=%s for %s "
6800 "(currently_stepping=%d)",
6801 paddress (current_inferior ()->arch (),
6803 tp
->ptid
.to_string ().c_str (),
6804 currently_stepping (tp
));
6806 /* This in-line step-over finished; clear this so we won't
6807 start a new one. This is what handle_signal_stop would
6808 do, if we returned false. */
6809 tp
->stepping_over_breakpoint
= 0;
6811 /* Wake up the event loop again. */
6812 mark_async_event_handler (infrun_async_inferior_event_token
);
6814 prepare_to_wait (ecs
);
6825 notify_signal_received (gdb_signal sig
)
6827 interps_notify_signal_received (sig
);
6828 gdb::observers::signal_received
.notify (sig
);
6834 notify_normal_stop (bpstat
*bs
, int print_frame
)
6836 interps_notify_normal_stop (bs
, print_frame
);
6837 gdb::observers::normal_stop
.notify (bs
, print_frame
);
6842 void notify_user_selected_context_changed (user_selected_what selection
)
6844 interps_notify_user_selected_context_changed (selection
);
6845 gdb::observers::user_selected_context_changed
.notify (selection
);
6848 /* Come here when the program has stopped with a signal. */
6851 handle_signal_stop (struct execution_control_state
*ecs
)
6853 frame_info_ptr frame
;
6854 struct gdbarch
*gdbarch
;
6855 int stopped_by_watchpoint
;
6856 enum stop_kind stop_soon
;
6859 gdb_assert (ecs
->ws
.kind () == TARGET_WAITKIND_STOPPED
);
6861 ecs
->event_thread
->set_stop_signal (ecs
->ws
.sig ());
6863 /* Do we need to clean up the state of a thread that has
6864 completed a displaced single-step? (Doing so usually affects
6865 the PC, so do it here, before we set stop_pc.) */
6866 if (finish_step_over (ecs
))
6869 /* If we either finished a single-step or hit a breakpoint, but
6870 the user wanted this thread to be stopped, pretend we got a
6871 SIG0 (generic unsignaled stop). */
6872 if (ecs
->event_thread
->stop_requested
6873 && ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
)
6874 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6876 ecs
->event_thread
->set_stop_pc
6877 (regcache_read_pc (get_thread_regcache (ecs
->event_thread
)));
6879 context_switch (ecs
);
6881 if (deprecated_context_hook
)
6882 deprecated_context_hook (ecs
->event_thread
->global_num
);
6886 struct regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
6887 struct gdbarch
*reg_gdbarch
= regcache
->arch ();
6890 ("stop_pc=%s", paddress (reg_gdbarch
, ecs
->event_thread
->stop_pc ()));
6891 if (target_stopped_by_watchpoint ())
6895 infrun_debug_printf ("stopped by watchpoint");
6897 if (target_stopped_data_address (current_inferior ()->top_target (),
6899 infrun_debug_printf ("stopped data address=%s",
6900 paddress (reg_gdbarch
, addr
));
6902 infrun_debug_printf ("(no data address available)");
6906 /* This is originated from start_remote(), start_inferior() and
6907 shared libraries hook functions. */
6908 stop_soon
= get_inferior_stop_soon (ecs
);
6909 if (stop_soon
== STOP_QUIETLY
|| stop_soon
== STOP_QUIETLY_REMOTE
)
6911 infrun_debug_printf ("quietly stopped");
6912 stop_print_frame
= true;
6917 /* This originates from attach_command(). We need to overwrite
6918 the stop_signal here, because some kernels don't ignore a
6919 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6920 See more comments in inferior.h. On the other hand, if we
6921 get a non-SIGSTOP, report it to the user - assume the backend
6922 will handle the SIGSTOP if it should show up later.
6924 Also consider that the attach is complete when we see a
6925 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6926 target extended-remote report it instead of a SIGSTOP
6927 (e.g. gdbserver). We already rely on SIGTRAP being our
6928 signal, so this is no exception.
6930 Also consider that the attach is complete when we see a
6931 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6932 the target to stop all threads of the inferior, in case the
6933 low level attach operation doesn't stop them implicitly. If
6934 they weren't stopped implicitly, then the stub will report a
6935 GDB_SIGNAL_0, meaning: stopped for no particular reason
6936 other than GDB's request. */
6937 if (stop_soon
== STOP_QUIETLY_NO_SIGSTOP
6938 && (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_STOP
6939 || ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6940 || ecs
->event_thread
->stop_signal () == GDB_SIGNAL_0
))
6942 stop_print_frame
= true;
6944 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6948 /* At this point, get hold of the now-current thread's frame. */
6949 frame
= get_current_frame ();
6950 gdbarch
= get_frame_arch (frame
);
6952 /* Pull the single step breakpoints out of the target. */
6953 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
)
6955 struct regcache
*regcache
;
6958 regcache
= get_thread_regcache (ecs
->event_thread
);
6959 const address_space
*aspace
= ecs
->event_thread
->inf
->aspace
.get ();
6961 pc
= regcache_read_pc (regcache
);
6963 /* However, before doing so, if this single-step breakpoint was
6964 actually for another thread, set this thread up for moving
6966 if (!thread_has_single_step_breakpoint_here (ecs
->event_thread
,
6969 if (single_step_breakpoint_inserted_here_p (aspace
, pc
))
6971 infrun_debug_printf ("[%s] hit another thread's single-step "
6973 ecs
->ptid
.to_string ().c_str ());
6974 ecs
->hit_singlestep_breakpoint
= 1;
6979 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6980 ecs
->ptid
.to_string ().c_str ());
6983 delete_just_stopped_threads_single_step_breakpoints ();
6985 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6986 && ecs
->event_thread
->control
.trap_expected
6987 && ecs
->event_thread
->stepping_over_watchpoint
)
6988 stopped_by_watchpoint
= 0;
6990 stopped_by_watchpoint
= watchpoints_triggered (ecs
->ws
);
6992 /* If necessary, step over this watchpoint. We'll be back to display
6994 if (stopped_by_watchpoint
6995 && (target_have_steppable_watchpoint ()
6996 || gdbarch_have_nonsteppable_watchpoint (gdbarch
)))
6998 /* At this point, we are stopped at an instruction which has
6999 attempted to write to a piece of memory under control of
7000 a watchpoint. The instruction hasn't actually executed
7001 yet. If we were to evaluate the watchpoint expression
7002 now, we would get the old value, and therefore no change
7003 would seem to have occurred.
7005 In order to make watchpoints work `right', we really need
7006 to complete the memory write, and then evaluate the
7007 watchpoint expression. We do this by single-stepping the
7010 It may not be necessary to disable the watchpoint to step over
7011 it. For example, the PA can (with some kernel cooperation)
7012 single step over a watchpoint without disabling the watchpoint.
7014 It is far more common to need to disable a watchpoint to step
7015 the inferior over it. If we have non-steppable watchpoints,
7016 we must disable the current watchpoint; it's simplest to
7017 disable all watchpoints.
7019 Any breakpoint at PC must also be stepped over -- if there's
7020 one, it will have already triggered before the watchpoint
7021 triggered, and we either already reported it to the user, or
7022 it didn't cause a stop and we called keep_going. In either
7023 case, if there was a breakpoint at PC, we must be trying to
7025 ecs
->event_thread
->stepping_over_watchpoint
= 1;
7030 ecs
->event_thread
->stepping_over_breakpoint
= 0;
7031 ecs
->event_thread
->stepping_over_watchpoint
= 0;
7032 bpstat_clear (&ecs
->event_thread
->control
.stop_bpstat
);
7033 ecs
->event_thread
->control
.stop_step
= 0;
7034 stop_print_frame
= true;
7035 stopped_by_random_signal
= 0;
7036 bpstat
*stop_chain
= nullptr;
7038 /* Hide inlined functions starting here, unless we just performed stepi or
7039 nexti. After stepi and nexti, always show the innermost frame (not any
7040 inline function call sites). */
7041 if (ecs
->event_thread
->control
.step_range_end
!= 1)
7043 const address_space
*aspace
= ecs
->event_thread
->inf
->aspace
.get ();
7045 /* skip_inline_frames is expensive, so we avoid it if we can
7046 determine that the address is one where functions cannot have
7047 been inlined. This improves performance with inferiors that
7048 load a lot of shared libraries, because the solib event
7049 breakpoint is defined as the address of a function (i.e. not
7050 inline). Note that we have to check the previous PC as well
7051 as the current one to catch cases when we have just
7052 single-stepped off a breakpoint prior to reinstating it.
7053 Note that we're assuming that the code we single-step to is
7054 not inline, but that's not definitive: there's nothing
7055 preventing the event breakpoint function from containing
7056 inlined code, and the single-step ending up there. If the
7057 user had set a breakpoint on that inlined code, the missing
7058 skip_inline_frames call would break things. Fortunately
7059 that's an extremely unlikely scenario. */
7060 if (!pc_at_non_inline_function (aspace
,
7061 ecs
->event_thread
->stop_pc (),
7063 && !(ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
7064 && ecs
->event_thread
->control
.trap_expected
7065 && pc_at_non_inline_function (aspace
,
7066 ecs
->event_thread
->prev_pc
,
7069 stop_chain
= build_bpstat_chain (aspace
,
7070 ecs
->event_thread
->stop_pc (),
7072 skip_inline_frames (ecs
->event_thread
, stop_chain
);
7076 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
7077 && ecs
->event_thread
->control
.trap_expected
7078 && gdbarch_single_step_through_delay_p (gdbarch
)
7079 && currently_stepping (ecs
->event_thread
))
7081 /* We're trying to step off a breakpoint. Turns out that we're
7082 also on an instruction that needs to be stepped multiple
7083 times before it's been fully executing. E.g., architectures
7084 with a delay slot. It needs to be stepped twice, once for
7085 the instruction and once for the delay slot. */
7086 int step_through_delay
7087 = gdbarch_single_step_through_delay (gdbarch
, frame
);
7089 if (step_through_delay
)
7090 infrun_debug_printf ("step through delay");
7092 if (ecs
->event_thread
->control
.step_range_end
== 0
7093 && step_through_delay
)
7095 /* The user issued a continue when stopped at a breakpoint.
7096 Set up for another trap and get out of here. */
7097 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7101 else if (step_through_delay
)
7103 /* The user issued a step when stopped at a breakpoint.
7104 Maybe we should stop, maybe we should not - the delay
7105 slot *might* correspond to a line of source. In any
7106 case, don't decide that here, just set
7107 ecs->stepping_over_breakpoint, making sure we
7108 single-step again before breakpoints are re-inserted. */
7109 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7113 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
7114 handles this event. */
7115 ecs
->event_thread
->control
.stop_bpstat
7116 = bpstat_stop_status (ecs
->event_thread
->inf
->aspace
.get (),
7117 ecs
->event_thread
->stop_pc (),
7118 ecs
->event_thread
, ecs
->ws
, stop_chain
);
7120 /* Following in case break condition called a
7122 stop_print_frame
= true;
7124 /* This is where we handle "moribund" watchpoints. Unlike
7125 software breakpoints traps, hardware watchpoint traps are
7126 always distinguishable from random traps. If no high-level
7127 watchpoint is associated with the reported stop data address
7128 anymore, then the bpstat does not explain the signal ---
7129 simply make sure to ignore it if `stopped_by_watchpoint' is
7132 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
7133 && !bpstat_explains_signal (ecs
->event_thread
->control
.stop_bpstat
,
7135 && stopped_by_watchpoint
)
7137 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
7141 /* NOTE: cagney/2003-03-29: These checks for a random signal
7142 at one stage in the past included checks for an inferior
7143 function call's call dummy's return breakpoint. The original
7144 comment, that went with the test, read:
7146 ``End of a stack dummy. Some systems (e.g. Sony news) give
7147 another signal besides SIGTRAP, so check here as well as
7150 If someone ever tries to get call dummys on a
7151 non-executable stack to work (where the target would stop
7152 with something like a SIGSEGV), then those tests might need
7153 to be re-instated. Given, however, that the tests were only
7154 enabled when momentary breakpoints were not being used, I
7155 suspect that it won't be the case.
7157 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
7158 be necessary for call dummies on a non-executable stack on
7161 /* See if the breakpoints module can explain the signal. */
7163 = !bpstat_explains_signal (ecs
->event_thread
->control
.stop_bpstat
,
7164 ecs
->event_thread
->stop_signal ());
7166 /* Maybe this was a trap for a software breakpoint that has since
7168 if (random_signal
&& target_stopped_by_sw_breakpoint ())
7170 if (gdbarch_program_breakpoint_here_p (gdbarch
,
7171 ecs
->event_thread
->stop_pc ()))
7173 struct regcache
*regcache
;
7176 /* Re-adjust PC to what the program would see if GDB was not
7178 regcache
= get_thread_regcache (ecs
->event_thread
);
7179 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
7182 std::optional
<scoped_restore_tmpl
<int>>
7183 restore_operation_disable
;
7185 if (record_full_is_used ())
7186 restore_operation_disable
.emplace
7187 (record_full_gdb_operation_disable_set ());
7189 regcache_write_pc (regcache
,
7190 ecs
->event_thread
->stop_pc () + decr_pc
);
7195 /* A delayed software breakpoint event. Ignore the trap. */
7196 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
7201 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
7202 has since been removed. */
7203 if (random_signal
&& target_stopped_by_hw_breakpoint ())
7205 /* A delayed hardware breakpoint event. Ignore the trap. */
7206 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
7211 /* If not, perhaps stepping/nexting can. */
7213 random_signal
= !(ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
7214 && currently_stepping (ecs
->event_thread
));
7216 /* Perhaps the thread hit a single-step breakpoint of _another_
7217 thread. Single-step breakpoints are transparent to the
7218 breakpoints module. */
7220 random_signal
= !ecs
->hit_singlestep_breakpoint
;
7222 /* No? Perhaps we got a moribund watchpoint. */
7224 random_signal
= !stopped_by_watchpoint
;
7226 /* Always stop if the user explicitly requested this thread to
7228 if (ecs
->event_thread
->stop_requested
)
7231 infrun_debug_printf ("user-requested stop");
7234 /* For the program's own signals, act according to
7235 the signal handling tables. */
7239 /* Signal not for debugging purposes. */
7240 enum gdb_signal stop_signal
= ecs
->event_thread
->stop_signal ();
7242 infrun_debug_printf ("random signal (%s)",
7243 gdb_signal_to_symbol_string (stop_signal
));
7245 stopped_by_random_signal
= 1;
7247 /* Always stop on signals if we're either just gaining control
7248 of the program, or the user explicitly requested this thread
7249 to remain stopped. */
7250 if (stop_soon
!= NO_STOP_QUIETLY
7251 || ecs
->event_thread
->stop_requested
7252 || signal_stop_state (ecs
->event_thread
->stop_signal ()))
7258 /* Notify observers the signal has "handle print" set. Note we
7259 returned early above if stopping; normal_stop handles the
7260 printing in that case. */
7261 if (signal_print
[ecs
->event_thread
->stop_signal ()])
7263 /* The signal table tells us to print about this signal. */
7264 target_terminal::ours_for_output ();
7265 notify_signal_received (ecs
->event_thread
->stop_signal ());
7266 target_terminal::inferior ();
7269 /* Clear the signal if it should not be passed. */
7270 if (signal_program
[ecs
->event_thread
->stop_signal ()] == 0)
7271 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
7273 if (ecs
->event_thread
->prev_pc
== ecs
->event_thread
->stop_pc ()
7274 && ecs
->event_thread
->control
.trap_expected
7275 && ecs
->event_thread
->control
.step_resume_breakpoint
== nullptr)
7277 /* We were just starting a new sequence, attempting to
7278 single-step off of a breakpoint and expecting a SIGTRAP.
7279 Instead this signal arrives. This signal will take us out
7280 of the stepping range so GDB needs to remember to, when
7281 the signal handler returns, resume stepping off that
7283 /* To simplify things, "continue" is forced to use the same
7284 code paths as single-step - set a breakpoint at the
7285 signal return address and then, once hit, step off that
7287 infrun_debug_printf ("signal arrived while stepping over breakpoint");
7289 insert_hp_step_resume_breakpoint_at_frame (frame
);
7290 ecs
->event_thread
->step_after_step_resume_breakpoint
= 1;
7291 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7292 ecs
->event_thread
->control
.trap_expected
= 0;
7294 /* If we were nexting/stepping some other thread, switch to
7295 it, so that we don't continue it, losing control. */
7296 if (!switch_back_to_stepped_thread (ecs
))
7301 if (ecs
->event_thread
->stop_signal () != GDB_SIGNAL_0
7302 && (pc_in_thread_step_range (ecs
->event_thread
->stop_pc (),
7304 || ecs
->event_thread
->control
.step_range_end
== 1)
7305 && (get_stack_frame_id (frame
)
7306 == ecs
->event_thread
->control
.step_stack_frame_id
)
7307 && ecs
->event_thread
->control
.step_resume_breakpoint
== nullptr)
7309 /* The inferior is about to take a signal that will take it
7310 out of the single step range. Set a breakpoint at the
7311 current PC (which is presumably where the signal handler
7312 will eventually return) and then allow the inferior to
7315 Note that this is only needed for a signal delivered
7316 while in the single-step range. Nested signals aren't a
7317 problem as they eventually all return. */
7318 infrun_debug_printf ("signal may take us out of single-step range");
7320 clear_step_over_info ();
7321 insert_hp_step_resume_breakpoint_at_frame (frame
);
7322 ecs
->event_thread
->step_after_step_resume_breakpoint
= 1;
7323 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7324 ecs
->event_thread
->control
.trap_expected
= 0;
7329 /* Note: step_resume_breakpoint may be non-NULL. This occurs
7330 when either there's a nested signal, or when there's a
7331 pending signal enabled just as the signal handler returns
7332 (leaving the inferior at the step-resume-breakpoint without
7333 actually executing it). Either way continue until the
7334 breakpoint is really hit. */
7336 if (!switch_back_to_stepped_thread (ecs
))
7338 infrun_debug_printf ("random signal, keep going");
7345 process_event_stop_test (ecs
);
7348 /* Return the address for the beginning of the line. */
7351 update_line_range_start (CORE_ADDR pc
, struct execution_control_state
*ecs
)
7353 /* The line table may have multiple entries for the same source code line.
7354 Given the PC, check the line table and return the PC that corresponds
7355 to the line table entry for the source line that PC is in. */
7356 CORE_ADDR start_line_pc
= ecs
->event_thread
->control
.step_range_start
;
7357 std::optional
<CORE_ADDR
> real_range_start
;
7359 /* Call find_line_range_start to get the smallest address in the
7360 linetable for multiple Line X entries in the line table. */
7361 real_range_start
= find_line_range_start (pc
);
7363 if (real_range_start
.has_value ())
7364 start_line_pc
= *real_range_start
;
7366 return start_line_pc
;
7371 /* Helper class for process_event_stop_test implementing lazy evaluation. */
7372 template<typename T
>
7375 using fetcher_t
= std::function
<T ()>;
7378 explicit lazy_loader (fetcher_t
&&f
) : m_loader (std::move (f
))
7383 if (!m_value
.has_value ())
7384 m_value
.emplace (m_loader ());
7385 return m_value
.value ();
7394 std::optional
<T
> m_value
;
7400 /* Come here when we've got some debug event / signal we can explain
7401 (IOW, not a random signal), and test whether it should cause a
7402 stop, or whether we should resume the inferior (transparently).
7403 E.g., could be a breakpoint whose condition evaluates false; we
7404 could be still stepping within the line; etc. */
7407 process_event_stop_test (struct execution_control_state
*ecs
)
7409 struct symtab_and_line stop_pc_sal
;
7410 frame_info_ptr frame
;
7411 struct gdbarch
*gdbarch
;
7412 CORE_ADDR jmp_buf_pc
;
7413 struct bpstat_what what
;
7415 /* Handle cases caused by hitting a breakpoint. */
7417 frame
= get_current_frame ();
7418 gdbarch
= get_frame_arch (frame
);
7420 what
= bpstat_what (ecs
->event_thread
->control
.stop_bpstat
);
7422 if (what
.call_dummy
)
7424 stop_stack_dummy
= what
.call_dummy
;
7427 /* A few breakpoint types have callbacks associated (e.g.,
7428 bp_jit_event). Run them now. */
7429 bpstat_run_callbacks (ecs
->event_thread
->control
.stop_bpstat
);
7431 /* Shorthand to make if statements smaller. */
7432 struct frame_id original_frame_id
7433 = ecs
->event_thread
->control
.step_frame_id
;
7434 lazy_loader
<frame_id
> curr_frame_id
7435 ([] () { return get_frame_id (get_current_frame ()); });
7437 switch (what
.main_action
)
7439 case BPSTAT_WHAT_SET_LONGJMP_RESUME
:
7440 /* If we hit the breakpoint at longjmp while stepping, we
7441 install a momentary breakpoint at the target of the
7444 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
7446 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7448 if (what
.is_longjmp
)
7450 struct value
*arg_value
;
7452 /* If we set the longjmp breakpoint via a SystemTap probe,
7453 then use it to extract the arguments. The destination PC
7454 is the third argument to the probe. */
7455 arg_value
= probe_safe_evaluate_at_pc (frame
, 2);
7458 jmp_buf_pc
= value_as_address (arg_value
);
7459 jmp_buf_pc
= gdbarch_addr_bits_remove (gdbarch
, jmp_buf_pc
);
7461 else if (!gdbarch_get_longjmp_target_p (gdbarch
)
7462 || !gdbarch_get_longjmp_target (gdbarch
,
7463 frame
, &jmp_buf_pc
))
7465 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
7466 "(!gdbarch_get_longjmp_target)");
7471 /* Insert a breakpoint at resume address. */
7472 insert_longjmp_resume_breakpoint (gdbarch
, jmp_buf_pc
);
7475 check_exception_resume (ecs
, frame
);
7479 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME
:
7481 frame_info_ptr init_frame
;
7483 /* There are several cases to consider.
7485 1. The initiating frame no longer exists. In this case we
7486 must stop, because the exception or longjmp has gone too
7489 2. The initiating frame exists, and is the same as the
7490 current frame. We stop, because the exception or longjmp
7493 3. The initiating frame exists and is different from the
7494 current frame. This means the exception or longjmp has
7495 been caught beneath the initiating frame, so keep going.
7497 4. longjmp breakpoint has been placed just to protect
7498 against stale dummy frames and user is not interested in
7499 stopping around longjmps. */
7501 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
7503 gdb_assert (ecs
->event_thread
->control
.exception_resume_breakpoint
7505 delete_exception_resume_breakpoint (ecs
->event_thread
);
7507 if (what
.is_longjmp
)
7509 check_longjmp_breakpoint_for_call_dummy (ecs
->event_thread
);
7511 if (!frame_id_p (ecs
->event_thread
->initiating_frame
))
7519 init_frame
= frame_find_by_id (ecs
->event_thread
->initiating_frame
);
7523 if (*curr_frame_id
== ecs
->event_thread
->initiating_frame
)
7525 /* Case 2. Fall through. */
7535 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
7537 delete_step_resume_breakpoint (ecs
->event_thread
);
7539 end_stepping_range (ecs
);
7543 case BPSTAT_WHAT_SINGLE
:
7544 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
7545 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7546 /* Still need to check other stuff, at least the case where we
7547 are stepping and step out of the right range. */
7550 case BPSTAT_WHAT_STEP_RESUME
:
7551 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
7553 delete_step_resume_breakpoint (ecs
->event_thread
);
7554 if (ecs
->event_thread
->control
.proceed_to_finish
7555 && execution_direction
== EXEC_REVERSE
)
7557 struct thread_info
*tp
= ecs
->event_thread
;
7559 /* We are finishing a function in reverse, and just hit the
7560 step-resume breakpoint at the start address of the
7561 function, and we're almost there -- just need to back up
7562 by one more single-step, which should take us back to the
7564 tp
->control
.step_range_start
= tp
->control
.step_range_end
= 1;
7568 fill_in_stop_func (gdbarch
, ecs
);
7569 if (ecs
->event_thread
->stop_pc () == ecs
->stop_func_start
7570 && execution_direction
== EXEC_REVERSE
)
7572 /* We are stepping over a function call in reverse, and just
7573 hit the step-resume breakpoint at the start address of
7574 the function. Go back to single-stepping, which should
7575 take us back to the function call. */
7576 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7582 case BPSTAT_WHAT_STOP_NOISY
:
7583 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
7584 stop_print_frame
= true;
7586 /* Assume the thread stopped for a breakpoint. We'll still check
7587 whether a/the breakpoint is there when the thread is next
7589 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7594 case BPSTAT_WHAT_STOP_SILENT
:
7595 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
7596 stop_print_frame
= false;
7598 /* Assume the thread stopped for a breakpoint. We'll still check
7599 whether a/the breakpoint is there when the thread is next
7601 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7605 case BPSTAT_WHAT_HP_STEP_RESUME
:
7606 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
7608 delete_step_resume_breakpoint (ecs
->event_thread
);
7609 if (ecs
->event_thread
->step_after_step_resume_breakpoint
)
7611 /* Back when the step-resume breakpoint was inserted, we
7612 were trying to single-step off a breakpoint. Go back to
7614 ecs
->event_thread
->step_after_step_resume_breakpoint
= 0;
7615 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7621 case BPSTAT_WHAT_KEEP_CHECKING
:
7625 /* If we stepped a permanent breakpoint and we had a high priority
7626 step-resume breakpoint for the address we stepped, but we didn't
7627 hit it, then we must have stepped into the signal handler. The
7628 step-resume was only necessary to catch the case of _not_
7629 stepping into the handler, so delete it, and fall through to
7630 checking whether the step finished. */
7631 if (ecs
->event_thread
->stepped_breakpoint
)
7633 struct breakpoint
*sr_bp
7634 = ecs
->event_thread
->control
.step_resume_breakpoint
;
7636 if (sr_bp
!= nullptr
7637 && sr_bp
->first_loc ().permanent
7638 && sr_bp
->type
== bp_hp_step_resume
7639 && sr_bp
->first_loc ().address
== ecs
->event_thread
->prev_pc
)
7641 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
7642 delete_step_resume_breakpoint (ecs
->event_thread
);
7643 ecs
->event_thread
->step_after_step_resume_breakpoint
= 0;
7647 /* We come here if we hit a breakpoint but should not stop for it.
7648 Possibly we also were stepping and should stop for that. So fall
7649 through and test for stepping. But, if not stepping, do not
7652 /* In all-stop mode, if we're currently stepping but have stopped in
7653 some other thread, we need to switch back to the stepped thread. */
7654 if (switch_back_to_stepped_thread (ecs
))
7657 if (ecs
->event_thread
->control
.step_resume_breakpoint
)
7659 infrun_debug_printf ("step-resume breakpoint is inserted");
7661 /* Having a step-resume breakpoint overrides anything
7662 else having to do with stepping commands until
7663 that breakpoint is reached. */
7668 if (ecs
->event_thread
->control
.step_range_end
== 0)
7670 infrun_debug_printf ("no stepping, continue");
7671 /* Likewise if we aren't even stepping. */
7676 fill_in_stop_func (gdbarch
, ecs
);
7678 /* If stepping through a line, keep going if still within it.
7680 Note that step_range_end is the address of the first instruction
7681 beyond the step range, and NOT the address of the last instruction
7684 Note also that during reverse execution, we may be stepping
7685 through a function epilogue and therefore must detect when
7686 the current-frame changes in the middle of a line. */
7688 if (pc_in_thread_step_range (ecs
->event_thread
->stop_pc (),
7690 && (execution_direction
!= EXEC_REVERSE
7691 || *curr_frame_id
== original_frame_id
))
7694 ("stepping inside range [%s-%s]",
7695 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_start
),
7696 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_end
));
7698 /* Tentatively re-enable range stepping; `resume' disables it if
7699 necessary (e.g., if we're stepping over a breakpoint or we
7700 have software watchpoints). */
7701 ecs
->event_thread
->control
.may_range_step
= 1;
7703 /* When stepping backward, stop at beginning of line range
7704 (unless it's the function entry point, in which case
7705 keep going back to the call point). */
7706 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
7707 if (stop_pc
== ecs
->event_thread
->control
.step_range_start
7708 && stop_pc
!= ecs
->stop_func_start
7709 && execution_direction
== EXEC_REVERSE
)
7710 end_stepping_range (ecs
);
7717 /* We stepped out of the stepping range. */
7719 /* If we are stepping at the source level and entered the runtime
7720 loader dynamic symbol resolution code...
7722 EXEC_FORWARD: we keep on single stepping until we exit the run
7723 time loader code and reach the callee's address.
7725 EXEC_REVERSE: we've already executed the callee (backward), and
7726 the runtime loader code is handled just like any other
7727 undebuggable function call. Now we need only keep stepping
7728 backward through the trampoline code, and that's handled further
7729 down, so there is nothing for us to do here. */
7731 if (execution_direction
!= EXEC_REVERSE
7732 && ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
7733 && in_solib_dynsym_resolve_code (ecs
->event_thread
->stop_pc ())
7734 && (ecs
->event_thread
->control
.step_start_function
== nullptr
7735 || !in_solib_dynsym_resolve_code (
7736 ecs
->event_thread
->control
.step_start_function
->value_block ()
7739 CORE_ADDR pc_after_resolver
=
7740 gdbarch_skip_solib_resolver (gdbarch
, ecs
->event_thread
->stop_pc ());
7742 infrun_debug_printf ("stepped into dynsym resolve code");
7744 if (pc_after_resolver
)
7746 /* Set up a step-resume breakpoint at the address
7747 indicated by SKIP_SOLIB_RESOLVER. */
7748 symtab_and_line sr_sal
;
7749 sr_sal
.pc
= pc_after_resolver
;
7750 sr_sal
.pspace
= get_frame_program_space (frame
);
7752 insert_step_resume_breakpoint_at_sal (gdbarch
,
7753 sr_sal
, null_frame_id
);
7760 /* Step through an indirect branch thunk. */
7761 if (ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
7762 && gdbarch_in_indirect_branch_thunk (gdbarch
,
7763 ecs
->event_thread
->stop_pc ()))
7765 infrun_debug_printf ("stepped into indirect branch thunk");
7770 if (ecs
->event_thread
->control
.step_range_end
!= 1
7771 && (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
7772 || ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
)
7773 && get_frame_type (frame
) == SIGTRAMP_FRAME
)
7775 infrun_debug_printf ("stepped into signal trampoline");
7776 /* The inferior, while doing a "step" or "next", has ended up in
7777 a signal trampoline (either by a signal being delivered or by
7778 the signal handler returning). Just single-step until the
7779 inferior leaves the trampoline (either by calling the handler
7785 /* If we're in the return path from a shared library trampoline,
7786 we want to proceed through the trampoline when stepping. */
7787 /* macro/2012-04-25: This needs to come before the subroutine
7788 call check below as on some targets return trampolines look
7789 like subroutine calls (MIPS16 return thunks). */
7790 if (gdbarch_in_solib_return_trampoline (gdbarch
,
7791 ecs
->event_thread
->stop_pc (),
7792 ecs
->stop_func_name
)
7793 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
)
7795 /* Determine where this trampoline returns. */
7796 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
7797 CORE_ADDR real_stop_pc
7798 = gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
);
7800 infrun_debug_printf ("stepped into solib return tramp");
7802 /* Only proceed through if we know where it's going. */
7805 /* And put the step-breakpoint there and go until there. */
7806 symtab_and_line sr_sal
;
7807 sr_sal
.pc
= real_stop_pc
;
7808 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
7809 sr_sal
.pspace
= get_frame_program_space (frame
);
7811 /* Do not specify what the fp should be when we stop since
7812 on some machines the prologue is where the new fp value
7814 insert_step_resume_breakpoint_at_sal (gdbarch
,
7815 sr_sal
, null_frame_id
);
7817 /* Restart without fiddling with the step ranges or
7824 /* Check for subroutine calls. The check for the current frame
7825 equalling the step ID is not necessary - the check of the
7826 previous frame's ID is sufficient - but it is a common case and
7827 cheaper than checking the previous frame's ID.
7829 NOTE: frame_id::operator== will never report two invalid frame IDs as
7830 being equal, so to get into this block, both the current and
7831 previous frame must have valid frame IDs. */
7832 /* The outer_frame_id check is a heuristic to detect stepping
7833 through startup code. If we step over an instruction which
7834 sets the stack pointer from an invalid value to a valid value,
7835 we may detect that as a subroutine call from the mythical
7836 "outermost" function. This could be fixed by marking
7837 outermost frames as !stack_p,code_p,special_p. Then the
7838 initial outermost frame, before sp was valid, would
7839 have code_addr == &_start. See the comment in frame_id::operator==
7842 /* We want "nexti" to step into, not over, signal handlers invoked
7843 by the kernel, therefore this subroutine check should not trigger
7844 for a signal handler invocation. On most platforms, this is already
7845 not the case, as the kernel puts a signal trampoline frame onto the
7846 stack to handle proper return after the handler, and therefore at this
7847 point, the current frame is a grandchild of the step frame, not a
7848 child. However, on some platforms, the kernel actually uses a
7849 trampoline to handle *invocation* of the handler. In that case,
7850 when executing the first instruction of the trampoline, this check
7851 would erroneously detect the trampoline invocation as a subroutine
7852 call. Fix this by checking for SIGTRAMP_FRAME. */
7853 if ((get_stack_frame_id (frame
)
7854 != ecs
->event_thread
->control
.step_stack_frame_id
)
7855 && get_frame_type (frame
) != SIGTRAMP_FRAME
7856 && ((frame_unwind_caller_id (frame
)
7857 == ecs
->event_thread
->control
.step_stack_frame_id
)
7858 && ((ecs
->event_thread
->control
.step_stack_frame_id
7860 || (ecs
->event_thread
->control
.step_start_function
7861 != find_pc_function (ecs
->event_thread
->stop_pc ())))))
7863 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
7864 CORE_ADDR real_stop_pc
;
7866 infrun_debug_printf ("stepped into subroutine");
7868 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_NONE
)
7870 /* I presume that step_over_calls is only 0 when we're
7871 supposed to be stepping at the assembly language level
7872 ("stepi"). Just stop. */
7873 /* And this works the same backward as frontward. MVS */
7874 end_stepping_range (ecs
);
7878 /* Reverse stepping through solib trampolines. */
7880 if (execution_direction
== EXEC_REVERSE
7881 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
7882 && (gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
)
7883 || (ecs
->stop_func_start
== 0
7884 && in_solib_dynsym_resolve_code (stop_pc
))))
7886 /* Any solib trampoline code can be handled in reverse
7887 by simply continuing to single-step. We have already
7888 executed the solib function (backwards), and a few
7889 steps will take us back through the trampoline to the
7895 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
)
7897 /* We're doing a "next".
7899 Normal (forward) execution: set a breakpoint at the
7900 callee's return address (the address at which the caller
7903 Reverse (backward) execution. set the step-resume
7904 breakpoint at the start of the function that we just
7905 stepped into (backwards), and continue to there. When we
7906 get there, we'll need to single-step back to the caller. */
7908 if (execution_direction
== EXEC_REVERSE
)
7910 /* If we're already at the start of the function, we've either
7911 just stepped backward into a single instruction function,
7912 or stepped back out of a signal handler to the first instruction
7913 of the function. Just keep going, which will single-step back
7915 if (ecs
->stop_func_start
!= stop_pc
&& ecs
->stop_func_start
!= 0)
7917 /* Normal function call return (static or dynamic). */
7918 symtab_and_line sr_sal
;
7919 sr_sal
.pc
= ecs
->stop_func_start
;
7920 sr_sal
.pspace
= get_frame_program_space (frame
);
7921 insert_step_resume_breakpoint_at_sal (gdbarch
,
7922 sr_sal
, get_stack_frame_id (frame
));
7926 insert_step_resume_breakpoint_at_caller (frame
);
7932 /* If we are in a function call trampoline (a stub between the
7933 calling routine and the real function), locate the real
7934 function. That's what tells us (a) whether we want to step
7935 into it at all, and (b) what prologue we want to run to the
7936 end of, if we do step into it. */
7937 real_stop_pc
= skip_language_trampoline (frame
, stop_pc
);
7938 if (real_stop_pc
== 0)
7939 real_stop_pc
= gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
);
7940 if (real_stop_pc
!= 0)
7941 ecs
->stop_func_start
= real_stop_pc
;
7943 if (real_stop_pc
!= 0 && in_solib_dynsym_resolve_code (real_stop_pc
))
7945 symtab_and_line sr_sal
;
7946 sr_sal
.pc
= ecs
->stop_func_start
;
7947 sr_sal
.pspace
= get_frame_program_space (frame
);
7949 insert_step_resume_breakpoint_at_sal (gdbarch
,
7950 sr_sal
, null_frame_id
);
7955 /* If we have line number information for the function we are
7956 thinking of stepping into and the function isn't on the skip
7959 If there are several symtabs at that PC (e.g. with include
7960 files), just want to know whether *any* of them have line
7961 numbers. find_pc_line handles this. */
7963 struct symtab_and_line tmp_sal
;
7965 tmp_sal
= find_pc_line (ecs
->stop_func_start
, 0);
7966 if (tmp_sal
.line
!= 0
7967 && !function_name_is_marked_for_skip (ecs
->stop_func_name
,
7969 && !inline_frame_is_marked_for_skip (true, ecs
->event_thread
))
7971 if (execution_direction
== EXEC_REVERSE
)
7972 handle_step_into_function_backward (gdbarch
, ecs
);
7974 handle_step_into_function (gdbarch
, ecs
);
7979 /* If we have no line number and the step-stop-if-no-debug is
7980 set, we stop the step so that the user has a chance to switch
7981 in assembly mode. */
7982 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
7983 && step_stop_if_no_debug
)
7985 end_stepping_range (ecs
);
7989 if (execution_direction
== EXEC_REVERSE
)
7991 /* If we're already at the start of the function, we've either just
7992 stepped backward into a single instruction function without line
7993 number info, or stepped back out of a signal handler to the first
7994 instruction of the function without line number info. Just keep
7995 going, which will single-step back to the caller. */
7996 if (ecs
->stop_func_start
!= stop_pc
)
7998 /* Set a breakpoint at callee's start address.
7999 From there we can step once and be back in the caller. */
8000 symtab_and_line sr_sal
;
8001 sr_sal
.pc
= ecs
->stop_func_start
;
8002 sr_sal
.pspace
= get_frame_program_space (frame
);
8003 insert_step_resume_breakpoint_at_sal (gdbarch
,
8004 sr_sal
, null_frame_id
);
8008 /* Set a breakpoint at callee's return address (the address
8009 at which the caller will resume). */
8010 insert_step_resume_breakpoint_at_caller (frame
);
8016 /* Reverse stepping through solib trampolines. */
8018 if (execution_direction
== EXEC_REVERSE
8019 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
)
8021 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
8023 if (gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
)
8024 || (ecs
->stop_func_start
== 0
8025 && in_solib_dynsym_resolve_code (stop_pc
)))
8027 /* Any solib trampoline code can be handled in reverse
8028 by simply continuing to single-step. We have already
8029 executed the solib function (backwards), and a few
8030 steps will take us back through the trampoline to the
8035 else if (in_solib_dynsym_resolve_code (stop_pc
))
8037 /* Stepped backward into the solib dynsym resolver.
8038 Set a breakpoint at its start and continue, then
8039 one more step will take us out. */
8040 symtab_and_line sr_sal
;
8041 sr_sal
.pc
= ecs
->stop_func_start
;
8042 sr_sal
.pspace
= get_frame_program_space (frame
);
8043 insert_step_resume_breakpoint_at_sal (gdbarch
,
8044 sr_sal
, null_frame_id
);
8050 /* This always returns the sal for the inner-most frame when we are in a
8051 stack of inlined frames, even if GDB actually believes that it is in a
8052 more outer frame. This is checked for below by calls to
8053 inline_skipped_frames. */
8054 stop_pc_sal
= find_pc_line (ecs
->event_thread
->stop_pc (), 0);
8056 /* NOTE: tausq/2004-05-24: This if block used to be done before all
8057 the trampoline processing logic, however, there are some trampolines
8058 that have no names, so we should do trampoline handling first. */
8059 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
8060 && ecs
->stop_func_name
== nullptr
8061 && stop_pc_sal
.line
== 0)
8063 infrun_debug_printf ("stepped into undebuggable function");
8065 /* The inferior just stepped into, or returned to, an
8066 undebuggable function (where there is no debugging information
8067 and no line number corresponding to the address where the
8068 inferior stopped). Since we want to skip this kind of code,
8069 we keep going until the inferior returns from this
8070 function - unless the user has asked us not to (via
8071 set step-mode) or we no longer know how to get back
8072 to the call site. */
8073 if (step_stop_if_no_debug
8074 || !frame_id_p (frame_unwind_caller_id (frame
)))
8076 /* If we have no line number and the step-stop-if-no-debug
8077 is set, we stop the step so that the user has a chance to
8078 switch in assembly mode. */
8079 end_stepping_range (ecs
);
8084 /* Set a breakpoint at callee's return address (the address
8085 at which the caller will resume). */
8086 insert_step_resume_breakpoint_at_caller (frame
);
8092 if (execution_direction
== EXEC_REVERSE
8093 && ecs
->event_thread
->control
.proceed_to_finish
8094 && ecs
->event_thread
->stop_pc () >= ecs
->stop_func_alt_start
8095 && ecs
->event_thread
->stop_pc () < ecs
->stop_func_start
)
8097 /* We are executing the reverse-finish command.
8098 If the system supports multiple entry points and we are finishing a
8099 function in reverse. If we are between the entry points single-step
8100 back to the alternate entry point. If we are at the alternate entry
8101 point -- just need to back up by one more single-step, which
8102 should take us back to the function call. */
8103 ecs
->event_thread
->control
.step_range_start
8104 = ecs
->event_thread
->control
.step_range_end
= 1;
8110 if (ecs
->event_thread
->control
.step_range_end
== 1)
8112 /* It is stepi or nexti. We always want to stop stepping after
8114 infrun_debug_printf ("stepi/nexti");
8115 end_stepping_range (ecs
);
8119 if (stop_pc_sal
.line
== 0)
8121 /* We have no line number information. That means to stop
8122 stepping (does this always happen right after one instruction,
8123 when we do "s" in a function with no line numbers,
8124 or can this happen as a result of a return or longjmp?). */
8125 infrun_debug_printf ("line number info");
8126 end_stepping_range (ecs
);
8130 /* Handle the case when subroutines have multiple ranges. When we step
8131 from one part to the next part of the same subroutine, all subroutine
8132 levels are skipped again which begin here. Compensate for this by
8133 removing all skipped subroutines, which were already executing from
8134 the user's perspective. */
8136 if (get_stack_frame_id (frame
)
8137 == ecs
->event_thread
->control
.step_stack_frame_id
8138 && inline_skipped_frames (ecs
->event_thread
) > 0
8139 && ecs
->event_thread
->control
.step_frame_id
.artificial_depth
> 0
8140 && ecs
->event_thread
->control
.step_frame_id
.code_addr_p
)
8143 const struct block
*prev
8144 = block_for_pc (ecs
->event_thread
->control
.step_frame_id
.code_addr
);
8145 const struct block
*curr
= block_for_pc (ecs
->event_thread
->stop_pc ());
8146 while (curr
!= nullptr && !curr
->contains (prev
))
8148 if (curr
->inlined_p ())
8150 else if (curr
->function () != nullptr)
8152 curr
= curr
->superblock ();
8154 while (inline_skipped_frames (ecs
->event_thread
) > depth
)
8155 step_into_inline_frame (ecs
->event_thread
);
8158 /* Look for "calls" to inlined functions, part one. If the inline
8159 frame machinery detected some skipped call sites, we have entered
8160 a new inline function. */
8162 if ((*curr_frame_id
== original_frame_id
)
8163 && inline_skipped_frames (ecs
->event_thread
))
8165 infrun_debug_printf ("stepped into inlined function");
8167 symtab_and_line call_sal
= find_frame_sal (frame
);
8169 if (ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_ALL
)
8171 /* For "step", we're going to stop. But if the call site
8172 for this inlined function is on the same source line as
8173 we were previously stepping, go down into the function
8174 first. Otherwise stop at the call site. */
8176 if (call_sal
.line
== ecs
->event_thread
->current_line
8177 && call_sal
.symtab
== ecs
->event_thread
->current_symtab
)
8179 step_into_inline_frame (ecs
->event_thread
);
8180 if (inline_frame_is_marked_for_skip (false, ecs
->event_thread
))
8187 end_stepping_range (ecs
);
8192 /* For "next", we should stop at the call site if it is on a
8193 different source line. Otherwise continue through the
8194 inlined function. */
8195 if (call_sal
.line
== ecs
->event_thread
->current_line
8196 && call_sal
.symtab
== ecs
->event_thread
->current_symtab
)
8199 end_stepping_range (ecs
);
8204 /* Look for "calls" to inlined functions, part two. If we are still
8205 in the same real function we were stepping through, but we have
8206 to go further up to find the exact frame ID, we are stepping
8207 through a more inlined call beyond its call site. */
8209 if (get_frame_type (frame
) == INLINE_FRAME
8210 && (*curr_frame_id
!= original_frame_id
)
8211 && stepped_in_from (frame
, original_frame_id
))
8213 infrun_debug_printf ("stepping through inlined function");
8215 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
8216 || inline_frame_is_marked_for_skip (false, ecs
->event_thread
))
8219 end_stepping_range (ecs
);
8223 bool refresh_step_info
= true;
8224 if ((ecs
->event_thread
->stop_pc () == stop_pc_sal
.pc
)
8225 && (ecs
->event_thread
->current_line
!= stop_pc_sal
.line
8226 || ecs
->event_thread
->current_symtab
!= stop_pc_sal
.symtab
))
8228 /* We are at a different line. */
8230 if (stop_pc_sal
.is_stmt
)
8232 if (execution_direction
== EXEC_REVERSE
)
8234 /* We are stepping backwards make sure we have reached the
8235 beginning of the line. */
8236 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
8237 CORE_ADDR start_line_pc
8238 = update_line_range_start (stop_pc
, ecs
);
8240 if (stop_pc
!= start_line_pc
)
8242 /* Have not reached the beginning of the source code line.
8243 Set a step range. Execution should stop in any function
8244 calls we execute back into before reaching the beginning
8246 ecs
->event_thread
->control
.step_range_start
8248 ecs
->event_thread
->control
.step_range_end
= stop_pc
;
8249 set_step_info (ecs
->event_thread
, frame
, stop_pc_sal
);
8255 /* We are at the start of a statement.
8257 So stop. Note that we don't stop if we step into the middle of a
8258 statement. That is said to make things like for (;;) statements
8260 infrun_debug_printf ("stepped to a different line");
8261 end_stepping_range (ecs
);
8264 else if (*curr_frame_id
== original_frame_id
)
8266 /* We are not at the start of a statement, and we have not changed
8269 We ignore this line table entry, and continue stepping forward,
8270 looking for a better place to stop. */
8271 refresh_step_info
= false;
8272 infrun_debug_printf ("stepped to a different line, but "
8273 "it's not the start of a statement");
8277 /* We are not the start of a statement, and we have changed frame.
8279 We ignore this line table entry, and continue stepping forward,
8280 looking for a better place to stop. Keep refresh_step_info at
8281 true to note that the frame has changed, but ignore the line
8282 number to make sure we don't ignore a subsequent entry with the
8283 same line number. */
8284 stop_pc_sal
.line
= 0;
8285 infrun_debug_printf ("stepped to a different frame, but "
8286 "it's not the start of a statement");
8290 if (execution_direction
== EXEC_REVERSE
8291 && *curr_frame_id
!= original_frame_id
8292 && original_frame_id
.code_addr_p
&& curr_frame_id
->code_addr_p
8293 && original_frame_id
.code_addr
== curr_frame_id
->code_addr
)
8295 /* If we enter here, we're leaving a recursive function call. In this
8296 situation, we shouldn't refresh the step information, because if we
8297 do, we'll lose the frame_id of when we started stepping, and this
8298 will make GDB not know we need to print frame information. */
8299 refresh_step_info
= false;
8300 infrun_debug_printf ("reverse stepping, left a recursive call, don't "
8301 "update step info so we remember we left a frame");
8304 /* We aren't done stepping.
8306 Optimize by setting the stepping range to the line.
8307 (We might not be in the original line, but if we entered a
8308 new line in mid-statement, we continue stepping. This makes
8309 things like for(;;) statements work better.)
8311 If we entered a SAL that indicates a non-statement line table entry,
8312 then we update the stepping range, but we don't update the step info,
8313 which includes things like the line number we are stepping away from.
8314 This means we will stop when we find a line table entry that is marked
8315 as is-statement, even if it matches the non-statement one we just
8318 ecs
->event_thread
->control
.step_range_start
= stop_pc_sal
.pc
;
8319 ecs
->event_thread
->control
.step_range_end
= stop_pc_sal
.end
;
8320 ecs
->event_thread
->control
.may_range_step
= 1;
8322 ("updated step range, start = %s, end = %s, may_range_step = %d",
8323 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_start
),
8324 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_end
),
8325 ecs
->event_thread
->control
.may_range_step
);
8326 if (refresh_step_info
)
8327 set_step_info (ecs
->event_thread
, frame
, stop_pc_sal
);
8329 infrun_debug_printf ("keep going");
8331 if (execution_direction
== EXEC_REVERSE
)
8333 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
8335 /* Make sure the stop_pc is set to the beginning of the line. */
8336 if (stop_pc
!= ecs
->event_thread
->control
.step_range_start
)
8337 ecs
->event_thread
->control
.step_range_start
8338 = update_line_range_start (stop_pc
, ecs
);
8344 static bool restart_stepped_thread (process_stratum_target
*resume_target
,
8345 ptid_t resume_ptid
);
8347 /* In all-stop mode, if we're currently stepping but have stopped in
8348 some other thread, we may need to switch back to the stepped
8349 thread. Returns true we set the inferior running, false if we left
8350 it stopped (and the event needs further processing). */
8353 switch_back_to_stepped_thread (struct execution_control_state
*ecs
)
8355 if (!target_is_non_stop_p ())
8357 /* If any thread is blocked on some internal breakpoint, and we
8358 simply need to step over that breakpoint to get it going
8359 again, do that first. */
8361 /* However, if we see an event for the stepping thread, then we
8362 know all other threads have been moved past their breakpoints
8363 already. Let the caller check whether the step is finished,
8364 etc., before deciding to move it past a breakpoint. */
8365 if (ecs
->event_thread
->control
.step_range_end
!= 0)
8368 /* Check if the current thread is blocked on an incomplete
8369 step-over, interrupted by a random signal. */
8370 if (ecs
->event_thread
->control
.trap_expected
8371 && ecs
->event_thread
->stop_signal () != GDB_SIGNAL_TRAP
)
8374 ("need to finish step-over of [%s]",
8375 ecs
->event_thread
->ptid
.to_string ().c_str ());
8380 /* Check if the current thread is blocked by a single-step
8381 breakpoint of another thread. */
8382 if (ecs
->hit_singlestep_breakpoint
)
8384 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
8385 ecs
->ptid
.to_string ().c_str ());
8390 /* If this thread needs yet another step-over (e.g., stepping
8391 through a delay slot), do it first before moving on to
8393 if (thread_still_needs_step_over (ecs
->event_thread
))
8396 ("thread [%s] still needs step-over",
8397 ecs
->event_thread
->ptid
.to_string ().c_str ());
8402 /* If scheduler locking applies even if not stepping, there's no
8403 need to walk over threads. Above we've checked whether the
8404 current thread is stepping. If some other thread not the
8405 event thread is stepping, then it must be that scheduler
8406 locking is not in effect. */
8407 if (schedlock_applies (ecs
->event_thread
))
8410 /* Otherwise, we no longer expect a trap in the current thread.
8411 Clear the trap_expected flag before switching back -- this is
8412 what keep_going does as well, if we call it. */
8413 ecs
->event_thread
->control
.trap_expected
= 0;
8415 /* Likewise, clear the signal if it should not be passed. */
8416 if (!signal_program
[ecs
->event_thread
->stop_signal ()])
8417 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
8419 if (restart_stepped_thread (ecs
->target
, ecs
->ptid
))
8421 prepare_to_wait (ecs
);
8425 switch_to_thread (ecs
->event_thread
);
8431 /* Look for the thread that was stepping, and resume it.
8432 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
8433 is resuming. Return true if a thread was started, false
8437 restart_stepped_thread (process_stratum_target
*resume_target
,
8440 /* Do all pending step-overs before actually proceeding with
8442 if (start_step_over ())
8445 for (thread_info
*tp
: all_threads_safe ())
8447 if (tp
->state
== THREAD_EXITED
)
8450 if (tp
->has_pending_waitstatus ())
8453 /* Ignore threads of processes the caller is not
8456 && (tp
->inf
->process_target () != resume_target
8457 || tp
->inf
->pid
!= resume_ptid
.pid ()))
8460 if (tp
->control
.trap_expected
)
8462 infrun_debug_printf ("switching back to stepped thread (step-over)");
8464 if (keep_going_stepped_thread (tp
))
8469 for (thread_info
*tp
: all_threads_safe ())
8471 if (tp
->state
== THREAD_EXITED
)
8474 if (tp
->has_pending_waitstatus ())
8477 /* Ignore threads of processes the caller is not
8480 && (tp
->inf
->process_target () != resume_target
8481 || tp
->inf
->pid
!= resume_ptid
.pid ()))
8484 /* Did we find the stepping thread? */
8485 if (tp
->control
.step_range_end
)
8487 infrun_debug_printf ("switching back to stepped thread (stepping)");
8489 if (keep_going_stepped_thread (tp
))
8500 restart_after_all_stop_detach (process_stratum_target
*proc_target
)
8502 /* Note we don't check target_is_non_stop_p() here, because the
8503 current inferior may no longer have a process_stratum target
8504 pushed, as we just detached. */
8506 /* See if we have a THREAD_RUNNING thread that need to be
8507 re-resumed. If we have any thread that is already executing,
8508 then we don't need to resume the target -- it is already been
8509 resumed. With the remote target (in all-stop), it's even
8510 impossible to issue another resumption if the target is already
8511 resumed, until the target reports a stop. */
8512 for (thread_info
*thr
: all_threads (proc_target
))
8514 if (thr
->state
!= THREAD_RUNNING
)
8517 /* If we have any thread that is already executing, then we
8518 don't need to resume the target -- it is already been
8520 if (thr
->executing ())
8523 /* If we have a pending event to process, skip resuming the
8524 target and go straight to processing it. */
8525 if (thr
->resumed () && thr
->has_pending_waitstatus ())
8529 /* Alright, we need to re-resume the target. If a thread was
8530 stepping, we need to restart it stepping. */
8531 if (restart_stepped_thread (proc_target
, minus_one_ptid
))
8534 /* Otherwise, find the first THREAD_RUNNING thread and resume
8536 for (thread_info
*thr
: all_threads (proc_target
))
8538 if (thr
->state
!= THREAD_RUNNING
)
8541 execution_control_state
ecs (thr
);
8542 switch_to_thread (thr
);
8548 /* Set a previously stepped thread back to stepping. Returns true on
8549 success, false if the resume is not possible (e.g., the thread
8553 keep_going_stepped_thread (struct thread_info
*tp
)
8555 frame_info_ptr frame
;
8557 /* If the stepping thread exited, then don't try to switch back and
8558 resume it, which could fail in several different ways depending
8559 on the target. Instead, just keep going.
8561 We can find a stepping dead thread in the thread list in two
8564 - The target supports thread exit events, and when the target
8565 tries to delete the thread from the thread list, inferior_ptid
8566 pointed at the exiting thread. In such case, calling
8567 delete_thread does not really remove the thread from the list;
8568 instead, the thread is left listed, with 'exited' state.
8570 - The target's debug interface does not support thread exit
8571 events, and so we have no idea whatsoever if the previously
8572 stepping thread is still alive. For that reason, we need to
8573 synchronously query the target now. */
8575 if (tp
->state
== THREAD_EXITED
|| !target_thread_alive (tp
->ptid
))
8577 infrun_debug_printf ("not resuming previously stepped thread, it has "
8584 infrun_debug_printf ("resuming previously stepped thread");
8586 execution_control_state
ecs (tp
);
8587 switch_to_thread (tp
);
8589 tp
->set_stop_pc (regcache_read_pc (get_thread_regcache (tp
)));
8590 frame
= get_current_frame ();
8592 /* If the PC of the thread we were trying to single-step has
8593 changed, then that thread has trapped or been signaled, but the
8594 event has not been reported to GDB yet. Re-poll the target
8595 looking for this particular thread's event (i.e. temporarily
8596 enable schedlock) by:
8598 - setting a break at the current PC
8599 - resuming that particular thread, only (by setting trap
8602 This prevents us continuously moving the single-step breakpoint
8603 forward, one instruction at a time, overstepping. */
8605 if (tp
->stop_pc () != tp
->prev_pc
)
8609 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
8610 paddress (current_inferior ()->arch (), tp
->prev_pc
),
8611 paddress (current_inferior ()->arch (),
8614 /* Clear the info of the previous step-over, as it's no longer
8615 valid (if the thread was trying to step over a breakpoint, it
8616 has already succeeded). It's what keep_going would do too,
8617 if we called it. Do this before trying to insert the sss
8618 breakpoint, otherwise if we were previously trying to step
8619 over this exact address in another thread, the breakpoint is
8621 clear_step_over_info ();
8622 tp
->control
.trap_expected
= 0;
8624 insert_single_step_breakpoint (get_frame_arch (frame
),
8625 get_frame_address_space (frame
),
8628 tp
->set_resumed (true);
8629 resume_ptid
= internal_resume_ptid (tp
->control
.stepping_command
);
8630 do_target_resume (resume_ptid
, false, GDB_SIGNAL_0
);
8634 infrun_debug_printf ("expected thread still hasn't advanced");
8636 keep_going_pass_signal (&ecs
);
8642 /* Is thread TP in the middle of (software or hardware)
8643 single-stepping? (Note the result of this function must never be
8644 passed directly as target_resume's STEP parameter.) */
8647 currently_stepping (struct thread_info
*tp
)
8649 return ((tp
->control
.step_range_end
8650 && tp
->control
.step_resume_breakpoint
== nullptr)
8651 || tp
->control
.trap_expected
8652 || tp
->stepped_breakpoint
8653 || bpstat_should_step ());
8656 /* Inferior has stepped into a subroutine call with source code that
8657 we should not step over. Do step to the first line of code in
8661 handle_step_into_function (struct gdbarch
*gdbarch
,
8662 struct execution_control_state
*ecs
)
8664 fill_in_stop_func (gdbarch
, ecs
);
8666 compunit_symtab
*cust
8667 = find_pc_compunit_symtab (ecs
->event_thread
->stop_pc ());
8668 if (cust
!= nullptr && cust
->language () != language_asm
)
8669 ecs
->stop_func_start
8670 = gdbarch_skip_prologue_noexcept (gdbarch
, ecs
->stop_func_start
);
8672 symtab_and_line stop_func_sal
= find_pc_line (ecs
->stop_func_start
, 0);
8673 /* Use the step_resume_break to step until the end of the prologue,
8674 even if that involves jumps (as it seems to on the vax under
8676 /* If the prologue ends in the middle of a source line, continue to
8677 the end of that source line (if it is still within the function).
8678 Otherwise, just go to end of prologue. */
8679 if (stop_func_sal
.end
8680 && stop_func_sal
.pc
!= ecs
->stop_func_start
8681 && stop_func_sal
.end
< ecs
->stop_func_end
)
8682 ecs
->stop_func_start
= stop_func_sal
.end
;
8684 /* Architectures which require breakpoint adjustment might not be able
8685 to place a breakpoint at the computed address. If so, the test
8686 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
8687 ecs->stop_func_start to an address at which a breakpoint may be
8688 legitimately placed.
8690 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
8691 made, GDB will enter an infinite loop when stepping through
8692 optimized code consisting of VLIW instructions which contain
8693 subinstructions corresponding to different source lines. On
8694 FR-V, it's not permitted to place a breakpoint on any but the
8695 first subinstruction of a VLIW instruction. When a breakpoint is
8696 set, GDB will adjust the breakpoint address to the beginning of
8697 the VLIW instruction. Thus, we need to make the corresponding
8698 adjustment here when computing the stop address. */
8700 if (gdbarch_adjust_breakpoint_address_p (gdbarch
))
8702 ecs
->stop_func_start
8703 = gdbarch_adjust_breakpoint_address (gdbarch
,
8704 ecs
->stop_func_start
);
8707 if (ecs
->stop_func_start
== ecs
->event_thread
->stop_pc ())
8709 /* We are already there: stop now. */
8710 end_stepping_range (ecs
);
8715 /* Put the step-breakpoint there and go until there. */
8716 symtab_and_line sr_sal
;
8717 sr_sal
.pc
= ecs
->stop_func_start
;
8718 sr_sal
.section
= find_pc_overlay (ecs
->stop_func_start
);
8719 sr_sal
.pspace
= get_frame_program_space (get_current_frame ());
8721 /* Do not specify what the fp should be when we stop since on
8722 some machines the prologue is where the new fp value is
8724 insert_step_resume_breakpoint_at_sal (gdbarch
, sr_sal
, null_frame_id
);
8726 /* And make sure stepping stops right away then. */
8727 ecs
->event_thread
->control
.step_range_end
8728 = ecs
->event_thread
->control
.step_range_start
;
8733 /* Inferior has stepped backward into a subroutine call with source
8734 code that we should not step over. Do step to the beginning of the
8735 last line of code in it. */
8738 handle_step_into_function_backward (struct gdbarch
*gdbarch
,
8739 struct execution_control_state
*ecs
)
8741 struct compunit_symtab
*cust
;
8742 struct symtab_and_line stop_func_sal
;
8744 fill_in_stop_func (gdbarch
, ecs
);
8746 cust
= find_pc_compunit_symtab (ecs
->event_thread
->stop_pc ());
8747 if (cust
!= nullptr && cust
->language () != language_asm
)
8748 ecs
->stop_func_start
8749 = gdbarch_skip_prologue_noexcept (gdbarch
, ecs
->stop_func_start
);
8751 stop_func_sal
= find_pc_line (ecs
->event_thread
->stop_pc (), 0);
8753 /* OK, we're just going to keep stepping here. */
8754 if (stop_func_sal
.pc
== ecs
->event_thread
->stop_pc ())
8756 /* We're there already. Just stop stepping now. */
8757 end_stepping_range (ecs
);
8761 /* Else just reset the step range and keep going.
8762 No step-resume breakpoint, they don't work for
8763 epilogues, which can have multiple entry paths. */
8764 ecs
->event_thread
->control
.step_range_start
= stop_func_sal
.pc
;
8765 ecs
->event_thread
->control
.step_range_end
= stop_func_sal
.end
;
8771 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
8772 This is used to both functions and to skip over code. */
8775 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch
*gdbarch
,
8776 struct symtab_and_line sr_sal
,
8777 struct frame_id sr_id
,
8778 enum bptype sr_type
)
8780 /* There should never be more than one step-resume or longjmp-resume
8781 breakpoint per thread, so we should never be setting a new
8782 step_resume_breakpoint when one is already active. */
8783 gdb_assert (inferior_thread ()->control
.step_resume_breakpoint
== nullptr);
8784 gdb_assert (sr_type
== bp_step_resume
|| sr_type
== bp_hp_step_resume
);
8786 infrun_debug_printf ("inserting step-resume breakpoint at %s",
8787 paddress (gdbarch
, sr_sal
.pc
));
8789 inferior_thread ()->control
.step_resume_breakpoint
8790 = set_momentary_breakpoint (gdbarch
, sr_sal
, sr_id
, sr_type
).release ();
8794 insert_step_resume_breakpoint_at_sal (struct gdbarch
*gdbarch
,
8795 struct symtab_and_line sr_sal
,
8796 struct frame_id sr_id
)
8798 insert_step_resume_breakpoint_at_sal_1 (gdbarch
,
8803 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
8804 This is used to skip a potential signal handler.
8806 This is called with the interrupted function's frame. The signal
8807 handler, when it returns, will resume the interrupted function at
8811 insert_hp_step_resume_breakpoint_at_frame (const frame_info_ptr
&return_frame
)
8813 gdb_assert (return_frame
!= nullptr);
8815 struct gdbarch
*gdbarch
= get_frame_arch (return_frame
);
8817 symtab_and_line sr_sal
;
8818 sr_sal
.pc
= gdbarch_addr_bits_remove (gdbarch
, get_frame_pc (return_frame
));
8819 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
8820 sr_sal
.pspace
= get_frame_program_space (return_frame
);
8822 insert_step_resume_breakpoint_at_sal_1 (gdbarch
, sr_sal
,
8823 get_stack_frame_id (return_frame
),
8827 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
8828 is used to skip a function after stepping into it (for "next" or if
8829 the called function has no debugging information).
8831 The current function has almost always been reached by single
8832 stepping a call or return instruction. NEXT_FRAME belongs to the
8833 current function, and the breakpoint will be set at the caller's
8836 This is a separate function rather than reusing
8837 insert_hp_step_resume_breakpoint_at_frame in order to avoid
8838 get_prev_frame, which may stop prematurely (see the implementation
8839 of frame_unwind_caller_id for an example). */
8842 insert_step_resume_breakpoint_at_caller (const frame_info_ptr
&next_frame
)
8844 /* We shouldn't have gotten here if we don't know where the call site
8846 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame
)));
8848 struct gdbarch
*gdbarch
= frame_unwind_caller_arch (next_frame
);
8850 symtab_and_line sr_sal
;
8851 sr_sal
.pc
= gdbarch_addr_bits_remove (gdbarch
,
8852 frame_unwind_caller_pc (next_frame
));
8853 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
8854 sr_sal
.pspace
= frame_unwind_program_space (next_frame
);
8856 insert_step_resume_breakpoint_at_sal (gdbarch
, sr_sal
,
8857 frame_unwind_caller_id (next_frame
));
8860 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
8861 new breakpoint at the target of a jmp_buf. The handling of
8862 longjmp-resume uses the same mechanisms used for handling
8863 "step-resume" breakpoints. */
8866 insert_longjmp_resume_breakpoint (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
8868 /* There should never be more than one longjmp-resume breakpoint per
8869 thread, so we should never be setting a new
8870 longjmp_resume_breakpoint when one is already active. */
8871 gdb_assert (inferior_thread ()->control
.exception_resume_breakpoint
== nullptr);
8873 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
8874 paddress (gdbarch
, pc
));
8876 inferior_thread ()->control
.exception_resume_breakpoint
=
8877 set_momentary_breakpoint_at_pc (gdbarch
, pc
, bp_longjmp_resume
).release ();
8880 /* Insert an exception resume breakpoint. TP is the thread throwing
8881 the exception. The block B is the block of the unwinder debug hook
8882 function. FRAME is the frame corresponding to the call to this
8883 function. SYM is the symbol of the function argument holding the
8884 target PC of the exception. */
8887 insert_exception_resume_breakpoint (struct thread_info
*tp
,
8888 const struct block
*b
,
8889 const frame_info_ptr
&frame
,
8894 struct block_symbol vsym
;
8895 struct value
*value
;
8897 struct breakpoint
*bp
;
8899 vsym
= lookup_symbol_search_name (sym
->search_name (),
8900 b
, SEARCH_VAR_DOMAIN
);
8901 value
= read_var_value (vsym
.symbol
, vsym
.block
, frame
);
8902 /* If the value was optimized out, revert to the old behavior. */
8903 if (! value
->optimized_out ())
8905 handler
= value_as_address (value
);
8907 infrun_debug_printf ("exception resume at %lx",
8908 (unsigned long) handler
);
8910 /* set_momentary_breakpoint_at_pc creates a thread-specific
8911 breakpoint for the current inferior thread. */
8912 gdb_assert (tp
== inferior_thread ());
8913 bp
= set_momentary_breakpoint_at_pc (get_frame_arch (frame
),
8915 bp_exception_resume
).release ();
8917 tp
->control
.exception_resume_breakpoint
= bp
;
8920 catch (const gdb_exception_error
&e
)
8922 /* We want to ignore errors here. */
8926 /* A helper for check_exception_resume that sets an
8927 exception-breakpoint based on a SystemTap probe. */
8930 insert_exception_resume_from_probe (struct thread_info
*tp
,
8931 const struct bound_probe
*probe
,
8932 const frame_info_ptr
&frame
)
8934 struct value
*arg_value
;
8936 struct breakpoint
*bp
;
8938 arg_value
= probe_safe_evaluate_at_pc (frame
, 1);
8942 handler
= value_as_address (arg_value
);
8944 infrun_debug_printf ("exception resume at %s",
8945 paddress (probe
->objfile
->arch (), handler
));
8947 /* set_momentary_breakpoint_at_pc creates a thread-specific breakpoint
8948 for the current inferior thread. */
8949 gdb_assert (tp
== inferior_thread ());
8950 bp
= set_momentary_breakpoint_at_pc (get_frame_arch (frame
),
8951 handler
, bp_exception_resume
).release ();
8952 tp
->control
.exception_resume_breakpoint
= bp
;
8955 /* This is called when an exception has been intercepted. Check to
8956 see whether the exception's destination is of interest, and if so,
8957 set an exception resume breakpoint there. */
8960 check_exception_resume (struct execution_control_state
*ecs
,
8961 const frame_info_ptr
&frame
)
8963 struct bound_probe probe
;
8964 struct symbol
*func
;
8966 /* First see if this exception unwinding breakpoint was set via a
8967 SystemTap probe point. If so, the probe has two arguments: the
8968 CFA and the HANDLER. We ignore the CFA, extract the handler, and
8969 set a breakpoint there. */
8970 probe
= find_probe_by_pc (get_frame_pc (frame
));
8973 insert_exception_resume_from_probe (ecs
->event_thread
, &probe
, frame
);
8977 func
= get_frame_function (frame
);
8983 const struct block
*b
;
8986 /* The exception breakpoint is a thread-specific breakpoint on
8987 the unwinder's debug hook, declared as:
8989 void _Unwind_DebugHook (void *cfa, void *handler);
8991 The CFA argument indicates the frame to which control is
8992 about to be transferred. HANDLER is the destination PC.
8994 We ignore the CFA and set a temporary breakpoint at HANDLER.
8995 This is not extremely efficient but it avoids issues in gdb
8996 with computing the DWARF CFA, and it also works even in weird
8997 cases such as throwing an exception from inside a signal
9000 b
= func
->value_block ();
9001 for (struct symbol
*sym
: block_iterator_range (b
))
9003 if (!sym
->is_argument ())
9010 insert_exception_resume_breakpoint (ecs
->event_thread
,
9016 catch (const gdb_exception_error
&e
)
9022 stop_waiting (struct execution_control_state
*ecs
)
9024 infrun_debug_printf ("stop_waiting");
9026 /* Let callers know we don't want to wait for the inferior anymore. */
9027 ecs
->wait_some_more
= 0;
9030 /* Like keep_going, but passes the signal to the inferior, even if the
9031 signal is set to nopass. */
9034 keep_going_pass_signal (struct execution_control_state
*ecs
)
9036 gdb_assert (ecs
->event_thread
->ptid
== inferior_ptid
);
9037 gdb_assert (!ecs
->event_thread
->resumed ());
9039 /* Save the pc before execution, to compare with pc after stop. */
9040 ecs
->event_thread
->prev_pc
9041 = regcache_read_pc_protected (get_thread_regcache (ecs
->event_thread
));
9043 if (ecs
->event_thread
->control
.trap_expected
)
9045 struct thread_info
*tp
= ecs
->event_thread
;
9047 infrun_debug_printf ("%s has trap_expected set, "
9048 "resuming to collect trap",
9049 tp
->ptid
.to_string ().c_str ());
9051 /* We haven't yet gotten our trap, and either: intercepted a
9052 non-signal event (e.g., a fork); or took a signal which we
9053 are supposed to pass through to the inferior. Simply
9055 resume (ecs
->event_thread
->stop_signal ());
9057 else if (step_over_info_valid_p ())
9059 /* Another thread is stepping over a breakpoint in-line. If
9060 this thread needs a step-over too, queue the request. In
9061 either case, this resume must be deferred for later. */
9062 struct thread_info
*tp
= ecs
->event_thread
;
9064 if (ecs
->hit_singlestep_breakpoint
9065 || thread_still_needs_step_over (tp
))
9067 infrun_debug_printf ("step-over already in progress: "
9068 "step-over for %s deferred",
9069 tp
->ptid
.to_string ().c_str ());
9070 global_thread_step_over_chain_enqueue (tp
);
9073 infrun_debug_printf ("step-over in progress: resume of %s deferred",
9074 tp
->ptid
.to_string ().c_str ());
9078 regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
9081 step_over_what step_what
;
9083 /* Either the trap was not expected, but we are continuing
9084 anyway (if we got a signal, the user asked it be passed to
9087 We got our expected trap, but decided we should resume from
9090 We're going to run this baby now!
9092 Note that insert_breakpoints won't try to re-insert
9093 already inserted breakpoints. Therefore, we don't
9094 care if breakpoints were already inserted, or not. */
9096 /* If we need to step over a breakpoint, and we're not using
9097 displaced stepping to do so, insert all breakpoints
9098 (watchpoints, etc.) but the one we're stepping over, step one
9099 instruction, and then re-insert the breakpoint when that step
9102 step_what
= thread_still_needs_step_over (ecs
->event_thread
);
9104 remove_bp
= (ecs
->hit_singlestep_breakpoint
9105 || (step_what
& STEP_OVER_BREAKPOINT
));
9106 remove_wps
= (step_what
& STEP_OVER_WATCHPOINT
);
9108 /* We can't use displaced stepping if we need to step past a
9109 watchpoint. The instruction copied to the scratch pad would
9110 still trigger the watchpoint. */
9112 && (remove_wps
|| !use_displaced_stepping (ecs
->event_thread
)))
9114 set_step_over_info (ecs
->event_thread
->inf
->aspace
.get (),
9115 regcache_read_pc (regcache
), remove_wps
,
9116 ecs
->event_thread
->global_num
);
9118 else if (remove_wps
)
9119 set_step_over_info (nullptr, 0, remove_wps
, -1);
9121 /* If we now need to do an in-line step-over, we need to stop
9122 all other threads. Note this must be done before
9123 insert_breakpoints below, because that removes the breakpoint
9124 we're about to step over, otherwise other threads could miss
9126 if (step_over_info_valid_p () && target_is_non_stop_p ())
9127 stop_all_threads ("starting in-line step-over");
9129 /* Stop stepping if inserting breakpoints fails. */
9132 insert_breakpoints ();
9134 catch (const gdb_exception_error
&e
)
9136 exception_print (gdb_stderr
, e
);
9138 clear_step_over_info ();
9142 ecs
->event_thread
->control
.trap_expected
= (remove_bp
|| remove_wps
);
9144 resume (ecs
->event_thread
->stop_signal ());
9147 prepare_to_wait (ecs
);
9150 /* Called when we should continue running the inferior, because the
9151 current event doesn't cause a user visible stop. This does the
9152 resuming part; waiting for the next event is done elsewhere. */
9155 keep_going (struct execution_control_state
*ecs
)
9157 if (ecs
->event_thread
->control
.trap_expected
9158 && ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
)
9159 ecs
->event_thread
->control
.trap_expected
= 0;
9161 if (!signal_program
[ecs
->event_thread
->stop_signal ()])
9162 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
9163 keep_going_pass_signal (ecs
);
9166 /* This function normally comes after a resume, before
9167 handle_inferior_event exits. It takes care of any last bits of
9168 housekeeping, and sets the all-important wait_some_more flag. */
9171 prepare_to_wait (struct execution_control_state
*ecs
)
9173 infrun_debug_printf ("prepare_to_wait");
9175 ecs
->wait_some_more
= 1;
9177 /* If the target can't async, emulate it by marking the infrun event
9178 handler such that as soon as we get back to the event-loop, we
9179 immediately end up in fetch_inferior_event again calling
9181 if (!target_can_async_p ())
9182 mark_infrun_async_event_handler ();
9185 /* We are done with the step range of a step/next/si/ni command.
9186 Called once for each n of a "step n" operation. */
9189 end_stepping_range (struct execution_control_state
*ecs
)
9191 ecs
->event_thread
->control
.stop_step
= 1;
9195 /* Several print_*_reason functions to print why the inferior has stopped.
9196 We always print something when the inferior exits, or receives a signal.
9197 The rest of the cases are dealt with later on in normal_stop and
9198 print_it_typical. Ideally there should be a call to one of these
9199 print_*_reason functions functions from handle_inferior_event each time
9200 stop_waiting is called.
9202 Note that we don't call these directly, instead we delegate that to
9203 the interpreters, through observers. Interpreters then call these
9204 with whatever uiout is right. */
9207 print_signal_exited_reason (struct ui_out
*uiout
, enum gdb_signal siggnal
)
9209 annotate_signalled ();
9210 if (uiout
->is_mi_like_p ())
9212 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED
));
9213 uiout
->text ("\nProgram terminated with signal ");
9214 annotate_signal_name ();
9215 uiout
->field_string ("signal-name",
9216 gdb_signal_to_name (siggnal
));
9217 annotate_signal_name_end ();
9219 annotate_signal_string ();
9220 uiout
->field_string ("signal-meaning",
9221 gdb_signal_to_string (siggnal
));
9222 annotate_signal_string_end ();
9223 uiout
->text (".\n");
9224 uiout
->text ("The program no longer exists.\n");
9228 print_exited_reason (struct ui_out
*uiout
, int exitstatus
)
9230 struct inferior
*inf
= current_inferior ();
9231 std::string pidstr
= target_pid_to_str (ptid_t (inf
->pid
));
9233 annotate_exited (exitstatus
);
9236 if (uiout
->is_mi_like_p ())
9237 uiout
->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED
));
9238 std::string exit_code_str
9239 = string_printf ("0%o", (unsigned int) exitstatus
);
9240 uiout
->message ("[Inferior %s (%s) exited with code %pF]\n",
9241 plongest (inf
->num
), pidstr
.c_str (),
9242 string_field ("exit-code", exit_code_str
.c_str ()));
9246 if (uiout
->is_mi_like_p ())
9248 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY
));
9249 uiout
->message ("[Inferior %s (%s) exited normally]\n",
9250 plongest (inf
->num
), pidstr
.c_str ());
9255 print_signal_received_reason (struct ui_out
*uiout
, enum gdb_signal siggnal
)
9257 struct thread_info
*thr
= inferior_thread ();
9259 infrun_debug_printf ("signal = %s", gdb_signal_to_string (siggnal
));
9263 if (uiout
->is_mi_like_p ())
9265 else if (show_thread_that_caused_stop ())
9267 uiout
->text ("\nThread ");
9268 uiout
->field_string ("thread-id", print_thread_id (thr
));
9270 const char *name
= thread_name (thr
);
9271 if (name
!= nullptr)
9273 uiout
->text (" \"");
9274 uiout
->field_string ("name", name
);
9279 uiout
->text ("\nProgram");
9281 if (siggnal
== GDB_SIGNAL_0
&& !uiout
->is_mi_like_p ())
9282 uiout
->text (" stopped");
9285 uiout
->text (" received signal ");
9286 annotate_signal_name ();
9287 if (uiout
->is_mi_like_p ())
9289 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED
));
9290 uiout
->field_string ("signal-name", gdb_signal_to_name (siggnal
));
9291 annotate_signal_name_end ();
9293 annotate_signal_string ();
9294 uiout
->field_string ("signal-meaning", gdb_signal_to_string (siggnal
));
9296 regcache
*regcache
= get_thread_regcache (thr
);
9297 struct gdbarch
*gdbarch
= regcache
->arch ();
9298 if (gdbarch_report_signal_info_p (gdbarch
))
9299 gdbarch_report_signal_info (gdbarch
, uiout
, siggnal
);
9301 annotate_signal_string_end ();
9303 uiout
->text (".\n");
9307 print_no_history_reason (struct ui_out
*uiout
)
9309 if (uiout
->is_mi_like_p ())
9310 uiout
->field_string ("reason", async_reason_lookup (EXEC_ASYNC_NO_HISTORY
));
9311 else if (execution_direction
== EXEC_FORWARD
)
9312 uiout
->text ("\nReached end of recorded history; stopping.\nFollowing "
9313 "forward execution will be added to history.\n");
9316 gdb_assert (execution_direction
== EXEC_REVERSE
);
9317 uiout
->text ("\nReached end of recorded history; stopping.\nBackward "
9318 "execution from here not possible.\n");
9322 /* Print current location without a level number, if we have changed
9323 functions or hit a breakpoint. Print source line if we have one.
9324 bpstat_print contains the logic deciding in detail what to print,
9325 based on the event(s) that just occurred. */
9328 print_stop_location (const target_waitstatus
&ws
)
9331 enum print_what source_flag
;
9332 int do_frame_printing
= 1;
9333 struct thread_info
*tp
= inferior_thread ();
9335 bpstat_ret
= bpstat_print (tp
->control
.stop_bpstat
, ws
.kind ());
9339 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
9340 should) carry around the function and does (or should) use
9341 that when doing a frame comparison. */
9342 if (tp
->control
.stop_step
9343 && (tp
->control
.step_frame_id
9344 == get_frame_id (get_current_frame ()))
9345 && (tp
->control
.step_start_function
9346 == find_pc_function (tp
->stop_pc ())))
9348 symtab_and_line sal
= find_frame_sal (get_selected_frame (nullptr));
9349 if (sal
.symtab
!= tp
->current_symtab
)
9351 /* Finished step in same frame but into different file, print
9352 location and source line. */
9353 source_flag
= SRC_AND_LOC
;
9357 /* Finished step in same frame and same file, just print source
9359 source_flag
= SRC_LINE
;
9364 /* Finished step into different frame, print location and source
9366 source_flag
= SRC_AND_LOC
;
9369 case PRINT_SRC_AND_LOC
:
9370 /* Print location and source line. */
9371 source_flag
= SRC_AND_LOC
;
9373 case PRINT_SRC_ONLY
:
9374 source_flag
= SRC_LINE
;
9377 /* Something bogus. */
9378 source_flag
= SRC_LINE
;
9379 do_frame_printing
= 0;
9382 internal_error (_("Unknown value."));
9385 /* The behavior of this routine with respect to the source
9387 SRC_LINE: Print only source line
9388 LOCATION: Print only location
9389 SRC_AND_LOC: Print location and source line. */
9390 if (do_frame_printing
)
9391 print_stack_frame (get_selected_frame (nullptr), 0, source_flag
, 1);
9394 /* See `print_stop_event` in infrun.h. */
9397 do_print_stop_event (struct ui_out
*uiout
, bool displays
)
9399 struct target_waitstatus last
;
9400 struct thread_info
*tp
;
9402 get_last_target_status (nullptr, nullptr, &last
);
9405 scoped_restore save_uiout
= make_scoped_restore (¤t_uiout
, uiout
);
9407 print_stop_location (last
);
9409 /* Display the auto-display expressions. */
9414 tp
= inferior_thread ();
9415 if (tp
->thread_fsm () != nullptr
9416 && tp
->thread_fsm ()->finished_p ())
9418 struct return_value_info
*rv
;
9420 rv
= tp
->thread_fsm ()->return_value ();
9422 print_return_value (uiout
, rv
);
9426 /* See infrun.h. This function itself sets up buffered output for the
9427 duration of do_print_stop_event, which performs the actual event
9431 print_stop_event (struct ui_out
*uiout
, bool displays
)
9433 do_with_buffered_output (do_print_stop_event
, uiout
, displays
);
9439 maybe_remove_breakpoints (void)
9441 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
9443 if (remove_breakpoints ())
9445 target_terminal::ours_for_output ();
9446 gdb_printf (_("Cannot remove breakpoints because "
9447 "program is no longer writable.\nFurther "
9448 "execution is probably impossible.\n"));
9453 /* The execution context that just caused a normal stop. */
9459 DISABLE_COPY_AND_ASSIGN (stop_context
);
9461 bool changed () const;
9466 /* The event PTID. */
9470 /* If stopped for a thread event, this is the thread that caused the
9472 thread_info_ref thread
;
9474 /* The inferior that caused the stop. */
9478 /* Initializes a new stop context. If stopped for a thread event, this
9479 takes a strong reference to the thread. */
9481 stop_context::stop_context ()
9483 stop_id
= get_stop_id ();
9484 ptid
= inferior_ptid
;
9485 inf_num
= current_inferior ()->num
;
9487 if (inferior_ptid
!= null_ptid
)
9489 /* Take a strong reference so that the thread can't be deleted
9491 thread
= thread_info_ref::new_reference (inferior_thread ());
9495 /* Return true if the current context no longer matches the saved stop
9499 stop_context::changed () const
9501 if (ptid
!= inferior_ptid
)
9503 if (inf_num
!= current_inferior ()->num
)
9505 if (thread
!= nullptr && thread
->state
!= THREAD_STOPPED
)
9507 if (get_stop_id () != stop_id
)
9517 struct target_waitstatus last
;
9519 get_last_target_status (nullptr, nullptr, &last
);
9523 /* If an exception is thrown from this point on, make sure to
9524 propagate GDB's knowledge of the executing state to the
9525 frontend/user running state. A QUIT is an easy exception to see
9526 here, so do this before any filtered output. */
9528 ptid_t finish_ptid
= null_ptid
;
9531 finish_ptid
= minus_one_ptid
;
9532 else if (last
.kind () == TARGET_WAITKIND_SIGNALLED
9533 || last
.kind () == TARGET_WAITKIND_EXITED
)
9535 /* On some targets, we may still have live threads in the
9536 inferior when we get a process exit event. E.g., for
9537 "checkpoint", when the current checkpoint/fork exits,
9538 linux-fork.c automatically switches to another fork from
9539 within target_mourn_inferior. */
9540 if (inferior_ptid
!= null_ptid
)
9541 finish_ptid
= ptid_t (inferior_ptid
.pid ());
9543 else if (last
.kind () != TARGET_WAITKIND_NO_RESUMED
9544 && last
.kind () != TARGET_WAITKIND_THREAD_EXITED
)
9545 finish_ptid
= inferior_ptid
;
9547 std::optional
<scoped_finish_thread_state
> maybe_finish_thread_state
;
9548 if (finish_ptid
!= null_ptid
)
9550 maybe_finish_thread_state
.emplace
9551 (user_visible_resume_target (finish_ptid
), finish_ptid
);
9554 /* As we're presenting a stop, and potentially removing breakpoints,
9555 update the thread list so we can tell whether there are threads
9556 running on the target. With target remote, for example, we can
9557 only learn about new threads when we explicitly update the thread
9558 list. Do this before notifying the interpreters about signal
9559 stops, end of stepping ranges, etc., so that the "new thread"
9560 output is emitted before e.g., "Program received signal FOO",
9561 instead of after. */
9562 update_thread_list ();
9564 if (last
.kind () == TARGET_WAITKIND_STOPPED
&& stopped_by_random_signal
)
9565 notify_signal_received (inferior_thread ()->stop_signal ());
9567 /* As with the notification of thread events, we want to delay
9568 notifying the user that we've switched thread context until
9569 the inferior actually stops.
9571 There's no point in saying anything if the inferior has exited.
9572 Note that SIGNALLED here means "exited with a signal", not
9573 "received a signal".
9575 Also skip saying anything in non-stop mode. In that mode, as we
9576 don't want GDB to switch threads behind the user's back, to avoid
9577 races where the user is typing a command to apply to thread x,
9578 but GDB switches to thread y before the user finishes entering
9579 the command, fetch_inferior_event installs a cleanup to restore
9580 the current thread back to the thread the user had selected right
9581 after this event is handled, so we're not really switching, only
9582 informing of a stop. */
9585 if ((last
.kind () != TARGET_WAITKIND_SIGNALLED
9586 && last
.kind () != TARGET_WAITKIND_EXITED
9587 && last
.kind () != TARGET_WAITKIND_NO_RESUMED
9588 && last
.kind () != TARGET_WAITKIND_THREAD_EXITED
)
9589 && target_has_execution ()
9590 && previous_thread
!= inferior_thread ())
9592 SWITCH_THRU_ALL_UIS ()
9594 target_terminal::ours_for_output ();
9595 gdb_printf (_("[Switching to %s]\n"),
9596 target_pid_to_str (inferior_ptid
).c_str ());
9597 annotate_thread_changed ();
9601 update_previous_thread ();
9604 if (last
.kind () == TARGET_WAITKIND_NO_RESUMED
9605 || last
.kind () == TARGET_WAITKIND_THREAD_EXITED
)
9607 stop_print_frame
= false;
9609 SWITCH_THRU_ALL_UIS ()
9610 if (current_ui
->prompt_state
== PROMPT_BLOCKED
)
9612 target_terminal::ours_for_output ();
9613 if (last
.kind () == TARGET_WAITKIND_NO_RESUMED
)
9614 gdb_printf (_("No unwaited-for children left.\n"));
9615 else if (last
.kind () == TARGET_WAITKIND_THREAD_EXITED
)
9616 gdb_printf (_("Command aborted, thread exited.\n"));
9618 gdb_assert_not_reached ("unhandled");
9622 /* Note: this depends on the update_thread_list call above. */
9623 maybe_remove_breakpoints ();
9625 /* If an auto-display called a function and that got a signal,
9626 delete that auto-display to avoid an infinite recursion. */
9628 if (stopped_by_random_signal
)
9629 disable_current_display ();
9631 SWITCH_THRU_ALL_UIS ()
9633 async_enable_stdin ();
9636 /* Let the user/frontend see the threads as stopped. */
9637 maybe_finish_thread_state
.reset ();
9639 /* Select innermost stack frame - i.e., current frame is frame 0,
9640 and current location is based on that. Handle the case where the
9641 dummy call is returning after being stopped. E.g. the dummy call
9642 previously hit a breakpoint. (If the dummy call returns
9643 normally, we won't reach here.) Do this before the stop hook is
9644 run, so that it doesn't get to see the temporary dummy frame,
9645 which is not where we'll present the stop. */
9646 if (has_stack_frames ())
9648 if (stop_stack_dummy
== STOP_STACK_DUMMY
)
9650 /* Pop the empty frame that contains the stack dummy. This
9651 also restores inferior state prior to the call (struct
9652 infcall_suspend_state). */
9653 frame_info_ptr frame
= get_current_frame ();
9655 gdb_assert (get_frame_type (frame
) == DUMMY_FRAME
);
9657 /* frame_pop calls reinit_frame_cache as the last thing it
9658 does which means there's now no selected frame. */
9661 select_frame (get_current_frame ());
9663 /* Set the current source location. */
9664 set_current_sal_from_frame (get_current_frame ());
9667 /* Look up the hook_stop and run it (CLI internally handles problem
9668 of stop_command's pre-hook not existing). */
9669 stop_context saved_context
;
9673 execute_cmd_pre_hook (stop_command
);
9675 catch (const gdb_exception_error
&ex
)
9677 exception_fprintf (gdb_stderr
, ex
,
9678 "Error while running hook_stop:\n");
9681 /* If the stop hook resumes the target, then there's no point in
9682 trying to notify about the previous stop; its context is
9683 gone. Likewise if the command switches thread or inferior --
9684 the observers would print a stop for the wrong
9686 if (saved_context
.changed ())
9689 /* Notify observers about the stop. This is where the interpreters
9690 print the stop event. */
9691 notify_normal_stop ((inferior_ptid
!= null_ptid
9692 ? inferior_thread ()->control
.stop_bpstat
9695 annotate_stopped ();
9697 if (target_has_execution ())
9699 if (last
.kind () != TARGET_WAITKIND_SIGNALLED
9700 && last
.kind () != TARGET_WAITKIND_EXITED
9701 && last
.kind () != TARGET_WAITKIND_NO_RESUMED
9702 && last
.kind () != TARGET_WAITKIND_THREAD_EXITED
)
9703 /* Delete the breakpoint we stopped at, if it wants to be deleted.
9704 Delete any breakpoint that is to be deleted at the next stop. */
9705 breakpoint_auto_delete (inferior_thread ()->control
.stop_bpstat
);
9712 signal_stop_state (int signo
)
9714 return signal_stop
[signo
];
9718 signal_print_state (int signo
)
9720 return signal_print
[signo
];
9724 signal_pass_state (int signo
)
9726 return signal_program
[signo
];
9730 signal_cache_update (int signo
)
9734 for (signo
= 0; signo
< (int) GDB_SIGNAL_LAST
; signo
++)
9735 signal_cache_update (signo
);
9740 signal_pass
[signo
] = (signal_stop
[signo
] == 0
9741 && signal_print
[signo
] == 0
9742 && signal_program
[signo
] == 1
9743 && signal_catch
[signo
] == 0);
9747 signal_stop_update (int signo
, int state
)
9749 int ret
= signal_stop
[signo
];
9751 signal_stop
[signo
] = state
;
9752 signal_cache_update (signo
);
9757 signal_print_update (int signo
, int state
)
9759 int ret
= signal_print
[signo
];
9761 signal_print
[signo
] = state
;
9762 signal_cache_update (signo
);
9767 signal_pass_update (int signo
, int state
)
9769 int ret
= signal_program
[signo
];
9771 signal_program
[signo
] = state
;
9772 signal_cache_update (signo
);
9776 /* Update the global 'signal_catch' from INFO and notify the
9780 signal_catch_update (const unsigned int *info
)
9784 for (i
= 0; i
< GDB_SIGNAL_LAST
; ++i
)
9785 signal_catch
[i
] = info
[i
] > 0;
9786 signal_cache_update (-1);
9787 target_pass_signals (signal_pass
);
9791 sig_print_header (void)
9793 gdb_printf (_("Signal Stop\tPrint\tPass "
9794 "to program\tDescription\n"));
9798 sig_print_info (enum gdb_signal oursig
)
9800 const char *name
= gdb_signal_to_name (oursig
);
9801 int name_padding
= 13 - strlen (name
);
9803 if (name_padding
<= 0)
9806 gdb_printf ("%s", name
);
9807 gdb_printf ("%*.*s ", name_padding
, name_padding
, " ");
9808 gdb_printf ("%s\t", signal_stop
[oursig
] ? "Yes" : "No");
9809 gdb_printf ("%s\t", signal_print
[oursig
] ? "Yes" : "No");
9810 gdb_printf ("%s\t\t", signal_program
[oursig
] ? "Yes" : "No");
9811 gdb_printf ("%s\n", gdb_signal_to_string (oursig
));
9814 /* Specify how various signals in the inferior should be handled. */
9817 handle_command (const char *args
, int from_tty
)
9819 int digits
, wordlen
;
9820 int sigfirst
, siglast
;
9821 enum gdb_signal oursig
;
9824 if (args
== nullptr)
9826 error_no_arg (_("signal to handle"));
9829 /* Allocate and zero an array of flags for which signals to handle. */
9831 const size_t nsigs
= GDB_SIGNAL_LAST
;
9832 unsigned char sigs
[nsigs
] {};
9834 /* Break the command line up into args. */
9836 gdb_argv
built_argv (args
);
9838 /* Walk through the args, looking for signal oursigs, signal names, and
9839 actions. Signal numbers and signal names may be interspersed with
9840 actions, with the actions being performed for all signals cumulatively
9841 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
9843 for (char *arg
: built_argv
)
9845 wordlen
= strlen (arg
);
9846 for (digits
= 0; isdigit (arg
[digits
]); digits
++)
9850 sigfirst
= siglast
= -1;
9852 if (wordlen
>= 1 && !strncmp (arg
, "all", wordlen
))
9854 /* Apply action to all signals except those used by the
9855 debugger. Silently skip those. */
9858 siglast
= nsigs
- 1;
9860 else if (wordlen
>= 1 && !strncmp (arg
, "stop", wordlen
))
9862 SET_SIGS (nsigs
, sigs
, signal_stop
);
9863 SET_SIGS (nsigs
, sigs
, signal_print
);
9865 else if (wordlen
>= 1 && !strncmp (arg
, "ignore", wordlen
))
9867 UNSET_SIGS (nsigs
, sigs
, signal_program
);
9869 else if (wordlen
>= 2 && !strncmp (arg
, "print", wordlen
))
9871 SET_SIGS (nsigs
, sigs
, signal_print
);
9873 else if (wordlen
>= 2 && !strncmp (arg
, "pass", wordlen
))
9875 SET_SIGS (nsigs
, sigs
, signal_program
);
9877 else if (wordlen
>= 3 && !strncmp (arg
, "nostop", wordlen
))
9879 UNSET_SIGS (nsigs
, sigs
, signal_stop
);
9881 else if (wordlen
>= 3 && !strncmp (arg
, "noignore", wordlen
))
9883 SET_SIGS (nsigs
, sigs
, signal_program
);
9885 else if (wordlen
>= 4 && !strncmp (arg
, "noprint", wordlen
))
9887 UNSET_SIGS (nsigs
, sigs
, signal_print
);
9888 UNSET_SIGS (nsigs
, sigs
, signal_stop
);
9890 else if (wordlen
>= 4 && !strncmp (arg
, "nopass", wordlen
))
9892 UNSET_SIGS (nsigs
, sigs
, signal_program
);
9894 else if (digits
> 0)
9896 /* It is numeric. The numeric signal refers to our own
9897 internal signal numbering from target.h, not to host/target
9898 signal number. This is a feature; users really should be
9899 using symbolic names anyway, and the common ones like
9900 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
9902 sigfirst
= siglast
= (int)
9903 gdb_signal_from_command (atoi (arg
));
9904 if (arg
[digits
] == '-')
9907 gdb_signal_from_command (atoi (arg
+ digits
+ 1));
9909 if (sigfirst
> siglast
)
9911 /* Bet he didn't figure we'd think of this case... */
9912 std::swap (sigfirst
, siglast
);
9917 oursig
= gdb_signal_from_name (arg
);
9918 if (oursig
!= GDB_SIGNAL_UNKNOWN
)
9920 sigfirst
= siglast
= (int) oursig
;
9924 /* Not a number and not a recognized flag word => complain. */
9925 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg
);
9929 /* If any signal numbers or symbol names were found, set flags for
9930 which signals to apply actions to. */
9932 for (int signum
= sigfirst
; signum
>= 0 && signum
<= siglast
; signum
++)
9934 switch ((enum gdb_signal
) signum
)
9936 case GDB_SIGNAL_TRAP
:
9937 case GDB_SIGNAL_INT
:
9938 if (!allsigs
&& !sigs
[signum
])
9940 if (query (_("%s is used by the debugger.\n\
9941 Are you sure you want to change it? "),
9942 gdb_signal_to_name ((enum gdb_signal
) signum
)))
9947 gdb_printf (_("Not confirmed, unchanged.\n"));
9951 case GDB_SIGNAL_DEFAULT
:
9952 case GDB_SIGNAL_UNKNOWN
:
9953 /* Make sure that "all" doesn't print these. */
9962 for (int signum
= 0; signum
< nsigs
; signum
++)
9965 signal_cache_update (-1);
9966 target_pass_signals (signal_pass
);
9967 target_program_signals (signal_program
);
9971 /* Show the results. */
9972 sig_print_header ();
9973 for (; signum
< nsigs
; signum
++)
9975 sig_print_info ((enum gdb_signal
) signum
);
9982 /* Complete the "handle" command. */
9985 handle_completer (struct cmd_list_element
*ignore
,
9986 completion_tracker
&tracker
,
9987 const char *text
, const char *word
)
9989 static const char * const keywords
[] =
10003 signal_completer (ignore
, tracker
, text
, word
);
10004 complete_on_enum (tracker
, keywords
, word
, word
);
10008 gdb_signal_from_command (int num
)
10010 if (num
>= 1 && num
<= 15)
10011 return (enum gdb_signal
) num
;
10012 error (_("Only signals 1-15 are valid as numeric signals.\n\
10013 Use \"info signals\" for a list of symbolic signals."));
10016 /* Print current contents of the tables set by the handle command.
10017 It is possible we should just be printing signals actually used
10018 by the current target (but for things to work right when switching
10019 targets, all signals should be in the signal tables). */
10022 info_signals_command (const char *signum_exp
, int from_tty
)
10024 enum gdb_signal oursig
;
10026 sig_print_header ();
10030 /* First see if this is a symbol name. */
10031 oursig
= gdb_signal_from_name (signum_exp
);
10032 if (oursig
== GDB_SIGNAL_UNKNOWN
)
10034 /* No, try numeric. */
10036 gdb_signal_from_command (parse_and_eval_long (signum_exp
));
10038 sig_print_info (oursig
);
10043 /* These ugly casts brought to you by the native VAX compiler. */
10044 for (oursig
= GDB_SIGNAL_FIRST
;
10045 (int) oursig
< (int) GDB_SIGNAL_LAST
;
10046 oursig
= (enum gdb_signal
) ((int) oursig
+ 1))
10050 if (oursig
!= GDB_SIGNAL_UNKNOWN
10051 && oursig
!= GDB_SIGNAL_DEFAULT
&& oursig
!= GDB_SIGNAL_0
)
10052 sig_print_info (oursig
);
10055 gdb_printf (_("\nUse the \"%ps\" command to change these tables.\n"),
10056 styled_string (command_style
.style (), "handle"));
10059 /* The $_siginfo convenience variable is a bit special. We don't know
10060 for sure the type of the value until we actually have a chance to
10061 fetch the data. The type can change depending on gdbarch, so it is
10062 also dependent on which thread you have selected.
10064 1. making $_siginfo be an internalvar that creates a new value on
10067 2. making the value of $_siginfo be an lval_computed value. */
10069 /* This function implements the lval_computed support for reading a
10070 $_siginfo value. */
10073 siginfo_value_read (struct value
*v
)
10075 LONGEST transferred
;
10077 /* If we can access registers, so can we access $_siginfo. Likewise
10079 validate_registers_access ();
10082 target_read (current_inferior ()->top_target (),
10083 TARGET_OBJECT_SIGNAL_INFO
,
10085 v
->contents_all_raw ().data (),
10087 v
->type ()->length ());
10089 if (transferred
!= v
->type ()->length ())
10090 error (_("Unable to read siginfo"));
10093 /* This function implements the lval_computed support for writing a
10094 $_siginfo value. */
10097 siginfo_value_write (struct value
*v
, struct value
*fromval
)
10099 LONGEST transferred
;
10101 /* If we can access registers, so can we access $_siginfo. Likewise
10103 validate_registers_access ();
10105 transferred
= target_write (current_inferior ()->top_target (),
10106 TARGET_OBJECT_SIGNAL_INFO
,
10108 fromval
->contents_all_raw ().data (),
10110 fromval
->type ()->length ());
10112 if (transferred
!= fromval
->type ()->length ())
10113 error (_("Unable to write siginfo"));
10116 static const struct lval_funcs siginfo_value_funcs
=
10118 siginfo_value_read
,
10119 siginfo_value_write
10122 /* Return a new value with the correct type for the siginfo object of
10123 the current thread using architecture GDBARCH. Return a void value
10124 if there's no object available. */
10126 static struct value
*
10127 siginfo_make_value (struct gdbarch
*gdbarch
, struct internalvar
*var
,
10130 if (target_has_stack ()
10131 && inferior_ptid
!= null_ptid
10132 && gdbarch_get_siginfo_type_p (gdbarch
))
10134 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
10136 return value::allocate_computed (type
, &siginfo_value_funcs
, nullptr);
10139 return value::allocate (builtin_type (gdbarch
)->builtin_void
);
10143 /* infcall_suspend_state contains state about the program itself like its
10144 registers and any signal it received when it last stopped.
10145 This state must be restored regardless of how the inferior function call
10146 ends (either successfully, or after it hits a breakpoint or signal)
10147 if the program is to properly continue where it left off. */
10149 class infcall_suspend_state
10152 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
10153 once the inferior function call has finished. */
10154 infcall_suspend_state (struct gdbarch
*gdbarch
,
10155 const struct thread_info
*tp
,
10156 struct regcache
*regcache
)
10157 : m_registers (new readonly_detached_regcache (*regcache
))
10159 tp
->save_suspend_to (m_thread_suspend
);
10161 gdb::unique_xmalloc_ptr
<gdb_byte
> siginfo_data
;
10163 if (gdbarch_get_siginfo_type_p (gdbarch
))
10165 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
10166 size_t len
= type
->length ();
10168 siginfo_data
.reset ((gdb_byte
*) xmalloc (len
));
10170 if (target_read (current_inferior ()->top_target (),
10171 TARGET_OBJECT_SIGNAL_INFO
, nullptr,
10172 siginfo_data
.get (), 0, len
) != len
)
10174 /* Errors ignored. */
10175 siginfo_data
.reset (nullptr);
10181 m_siginfo_gdbarch
= gdbarch
;
10182 m_siginfo_data
= std::move (siginfo_data
);
10186 /* Return a pointer to the stored register state. */
10188 readonly_detached_regcache
*registers () const
10190 return m_registers
.get ();
10193 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
10195 void restore (struct gdbarch
*gdbarch
,
10196 struct thread_info
*tp
,
10197 struct regcache
*regcache
) const
10199 tp
->restore_suspend_from (m_thread_suspend
);
10201 if (m_siginfo_gdbarch
== gdbarch
)
10203 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
10205 /* Errors ignored. */
10206 target_write (current_inferior ()->top_target (),
10207 TARGET_OBJECT_SIGNAL_INFO
, nullptr,
10208 m_siginfo_data
.get (), 0, type
->length ());
10211 /* The inferior can be gone if the user types "print exit(0)"
10212 (and perhaps other times). */
10213 if (target_has_execution ())
10214 /* NB: The register write goes through to the target. */
10215 regcache
->restore (registers ());
10219 /* How the current thread stopped before the inferior function call was
10221 struct thread_suspend_state m_thread_suspend
;
10223 /* The registers before the inferior function call was executed. */
10224 std::unique_ptr
<readonly_detached_regcache
> m_registers
;
10226 /* Format of SIGINFO_DATA or NULL if it is not present. */
10227 struct gdbarch
*m_siginfo_gdbarch
= nullptr;
10229 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
10230 gdbarch_get_siginfo_type ()->length (). For different gdbarch the
10231 content would be invalid. */
10232 gdb::unique_xmalloc_ptr
<gdb_byte
> m_siginfo_data
;
10235 infcall_suspend_state_up
10236 save_infcall_suspend_state ()
10238 struct thread_info
*tp
= inferior_thread ();
10239 regcache
*regcache
= get_thread_regcache (tp
);
10240 struct gdbarch
*gdbarch
= regcache
->arch ();
10242 infcall_suspend_state_up inf_state
10243 (new struct infcall_suspend_state (gdbarch
, tp
, regcache
));
10245 /* Having saved the current state, adjust the thread state, discarding
10246 any stop signal information. The stop signal is not useful when
10247 starting an inferior function call, and run_inferior_call will not use
10248 the signal due to its `proceed' call with GDB_SIGNAL_0. */
10249 tp
->set_stop_signal (GDB_SIGNAL_0
);
10254 /* Restore inferior session state to INF_STATE. */
10257 restore_infcall_suspend_state (struct infcall_suspend_state
*inf_state
)
10259 struct thread_info
*tp
= inferior_thread ();
10260 regcache
*regcache
= get_thread_regcache (inferior_thread ());
10261 struct gdbarch
*gdbarch
= regcache
->arch ();
10263 inf_state
->restore (gdbarch
, tp
, regcache
);
10264 discard_infcall_suspend_state (inf_state
);
10268 discard_infcall_suspend_state (struct infcall_suspend_state
*inf_state
)
10273 readonly_detached_regcache
*
10274 get_infcall_suspend_state_regcache (struct infcall_suspend_state
*inf_state
)
10276 return inf_state
->registers ();
10279 /* infcall_control_state contains state regarding gdb's control of the
10280 inferior itself like stepping control. It also contains session state like
10281 the user's currently selected frame. */
10283 struct infcall_control_state
10285 struct thread_control_state thread_control
;
10286 struct inferior_control_state inferior_control
;
10288 /* Other fields: */
10289 enum stop_stack_kind stop_stack_dummy
= STOP_NONE
;
10290 int stopped_by_random_signal
= 0;
10292 /* ID and level of the selected frame when the inferior function
10294 struct frame_id selected_frame_id
{};
10295 int selected_frame_level
= -1;
10298 /* Save all of the information associated with the inferior<==>gdb
10301 infcall_control_state_up
10302 save_infcall_control_state ()
10304 infcall_control_state_up
inf_status (new struct infcall_control_state
);
10305 struct thread_info
*tp
= inferior_thread ();
10306 struct inferior
*inf
= current_inferior ();
10308 inf_status
->thread_control
= tp
->control
;
10309 inf_status
->inferior_control
= inf
->control
;
10311 tp
->control
.step_resume_breakpoint
= nullptr;
10312 tp
->control
.exception_resume_breakpoint
= nullptr;
10314 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
10315 chain. If caller's caller is walking the chain, they'll be happier if we
10316 hand them back the original chain when restore_infcall_control_state is
10318 tp
->control
.stop_bpstat
= bpstat_copy (tp
->control
.stop_bpstat
);
10320 /* Other fields: */
10321 inf_status
->stop_stack_dummy
= stop_stack_dummy
;
10322 inf_status
->stopped_by_random_signal
= stopped_by_random_signal
;
10324 save_selected_frame (&inf_status
->selected_frame_id
,
10325 &inf_status
->selected_frame_level
);
10330 /* Restore inferior session state to INF_STATUS. */
10333 restore_infcall_control_state (struct infcall_control_state
*inf_status
)
10335 struct thread_info
*tp
= inferior_thread ();
10336 struct inferior
*inf
= current_inferior ();
10338 if (tp
->control
.step_resume_breakpoint
)
10339 tp
->control
.step_resume_breakpoint
->disposition
= disp_del_at_next_stop
;
10341 if (tp
->control
.exception_resume_breakpoint
)
10342 tp
->control
.exception_resume_breakpoint
->disposition
10343 = disp_del_at_next_stop
;
10345 /* Handle the bpstat_copy of the chain. */
10346 bpstat_clear (&tp
->control
.stop_bpstat
);
10348 tp
->control
= inf_status
->thread_control
;
10349 inf
->control
= inf_status
->inferior_control
;
10351 /* Other fields: */
10352 stop_stack_dummy
= inf_status
->stop_stack_dummy
;
10353 stopped_by_random_signal
= inf_status
->stopped_by_random_signal
;
10355 if (target_has_stack ())
10357 restore_selected_frame (inf_status
->selected_frame_id
,
10358 inf_status
->selected_frame_level
);
10365 discard_infcall_control_state (struct infcall_control_state
*inf_status
)
10367 if (inf_status
->thread_control
.step_resume_breakpoint
)
10368 inf_status
->thread_control
.step_resume_breakpoint
->disposition
10369 = disp_del_at_next_stop
;
10371 if (inf_status
->thread_control
.exception_resume_breakpoint
)
10372 inf_status
->thread_control
.exception_resume_breakpoint
->disposition
10373 = disp_del_at_next_stop
;
10375 /* See save_infcall_control_state for info on stop_bpstat. */
10376 bpstat_clear (&inf_status
->thread_control
.stop_bpstat
);
10381 /* See infrun.h. */
10384 clear_exit_convenience_vars (void)
10386 clear_internalvar (lookup_internalvar ("_exitsignal"));
10387 clear_internalvar (lookup_internalvar ("_exitcode"));
10391 /* User interface for reverse debugging:
10392 Set exec-direction / show exec-direction commands
10393 (returns error unless target implements to_set_exec_direction method). */
10395 enum exec_direction_kind execution_direction
= EXEC_FORWARD
;
10396 static const char exec_forward
[] = "forward";
10397 static const char exec_reverse
[] = "reverse";
10398 static const char *exec_direction
= exec_forward
;
10399 static const char *const exec_direction_names
[] = {
10406 set_exec_direction_func (const char *args
, int from_tty
,
10407 struct cmd_list_element
*cmd
)
10409 if (target_can_execute_reverse ())
10411 if (!strcmp (exec_direction
, exec_forward
))
10412 execution_direction
= EXEC_FORWARD
;
10413 else if (!strcmp (exec_direction
, exec_reverse
))
10414 execution_direction
= EXEC_REVERSE
;
10418 exec_direction
= exec_forward
;
10419 error (_("Target does not support this operation."));
10424 show_exec_direction_func (struct ui_file
*out
, int from_tty
,
10425 struct cmd_list_element
*cmd
, const char *value
)
10427 switch (execution_direction
) {
10429 gdb_printf (out
, _("Forward.\n"));
10432 gdb_printf (out
, _("Reverse.\n"));
10435 internal_error (_("bogus execution_direction value: %d"),
10436 (int) execution_direction
);
10441 show_schedule_multiple (struct ui_file
*file
, int from_tty
,
10442 struct cmd_list_element
*c
, const char *value
)
10444 gdb_printf (file
, _("Resuming the execution of threads "
10445 "of all processes is %s.\n"), value
);
10448 /* Implementation of `siginfo' variable. */
10450 static const struct internalvar_funcs siginfo_funcs
=
10452 siginfo_make_value
,
10456 /* Callback for infrun's target events source. This is marked when a
10457 thread has a pending status to process. */
10460 infrun_async_inferior_event_handler (gdb_client_data data
)
10462 clear_async_event_handler (infrun_async_inferior_event_token
);
10463 inferior_event_handler (INF_REG_EVENT
);
10467 namespace selftests
10470 /* Verify that when two threads with the same ptid exist (from two different
10471 targets) and one of them changes ptid, we only update inferior_ptid if
10472 it is appropriate. */
10475 infrun_thread_ptid_changed ()
10477 gdbarch
*arch
= current_inferior ()->arch ();
10479 /* The thread which inferior_ptid represents changes ptid. */
10481 scoped_restore_current_pspace_and_thread restore
;
10483 scoped_mock_context
<test_target_ops
> target1 (arch
);
10484 scoped_mock_context
<test_target_ops
> target2 (arch
);
10486 ptid_t
old_ptid (111, 222);
10487 ptid_t
new_ptid (111, 333);
10489 target1
.mock_inferior
.pid
= old_ptid
.pid ();
10490 target1
.mock_thread
.ptid
= old_ptid
;
10491 target1
.mock_inferior
.ptid_thread_map
.clear ();
10492 target1
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target1
.mock_thread
;
10494 target2
.mock_inferior
.pid
= old_ptid
.pid ();
10495 target2
.mock_thread
.ptid
= old_ptid
;
10496 target2
.mock_inferior
.ptid_thread_map
.clear ();
10497 target2
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target2
.mock_thread
;
10499 auto restore_inferior_ptid
= make_scoped_restore (&inferior_ptid
, old_ptid
);
10500 set_current_inferior (&target1
.mock_inferior
);
10502 thread_change_ptid (&target1
.mock_target
, old_ptid
, new_ptid
);
10504 gdb_assert (inferior_ptid
== new_ptid
);
10507 /* A thread with the same ptid as inferior_ptid, but from another target,
10510 scoped_restore_current_pspace_and_thread restore
;
10512 scoped_mock_context
<test_target_ops
> target1 (arch
);
10513 scoped_mock_context
<test_target_ops
> target2 (arch
);
10515 ptid_t
old_ptid (111, 222);
10516 ptid_t
new_ptid (111, 333);
10518 target1
.mock_inferior
.pid
= old_ptid
.pid ();
10519 target1
.mock_thread
.ptid
= old_ptid
;
10520 target1
.mock_inferior
.ptid_thread_map
.clear ();
10521 target1
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target1
.mock_thread
;
10523 target2
.mock_inferior
.pid
= old_ptid
.pid ();
10524 target2
.mock_thread
.ptid
= old_ptid
;
10525 target2
.mock_inferior
.ptid_thread_map
.clear ();
10526 target2
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target2
.mock_thread
;
10528 auto restore_inferior_ptid
= make_scoped_restore (&inferior_ptid
, old_ptid
);
10529 set_current_inferior (&target2
.mock_inferior
);
10531 thread_change_ptid (&target1
.mock_target
, old_ptid
, new_ptid
);
10533 gdb_assert (inferior_ptid
== old_ptid
);
10537 } /* namespace selftests */
10539 #endif /* GDB_SELF_TEST */
10541 INIT_GDB_FILE (infrun
)
10543 struct cmd_list_element
*c
;
10545 /* Register extra event sources in the event loop. */
10546 infrun_async_inferior_event_token
10547 = create_async_event_handler (infrun_async_inferior_event_handler
, nullptr,
10550 cmd_list_element
*info_signals_cmd
10551 = add_info ("signals", info_signals_command
, _("\
10552 What debugger does when program gets various signals.\n\
10553 Specify a signal as argument to print info on that signal only."));
10554 add_info_alias ("handle", info_signals_cmd
, 0);
10556 c
= add_com ("handle", class_run
, handle_command
, _("\
10557 Specify how to handle signals.\n\
10558 Usage: handle SIGNAL [ACTIONS]\n\
10559 Args are signals and actions to apply to those signals.\n\
10560 If no actions are specified, the current settings for the specified signals\n\
10561 will be displayed instead.\n\
10563 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
10564 from 1-15 are allowed for compatibility with old versions of GDB.\n\
10565 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
10566 The special arg \"all\" is recognized to mean all signals except those\n\
10567 used by the debugger, typically SIGTRAP and SIGINT.\n\
10569 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
10570 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
10571 Stop means reenter debugger if this signal happens (implies print).\n\
10572 Print means print a message if this signal happens.\n\
10573 Pass means let program see this signal; otherwise program doesn't know.\n\
10574 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
10575 Pass and Stop may be combined.\n\
10577 Multiple signals may be specified. Signal numbers and signal names\n\
10578 may be interspersed with actions, with the actions being performed for\n\
10579 all signals cumulatively specified."));
10580 set_cmd_completer (c
, handle_completer
);
10582 stop_command
= add_cmd ("stop", class_obscure
,
10583 not_just_help_class_command
, _("\
10584 There is no `stop' command, but you can set a hook on `stop'.\n\
10585 This allows you to set a list of commands to be run each time execution\n\
10586 of the program stops."), &cmdlist
);
10588 add_setshow_boolean_cmd
10589 ("infrun", class_maintenance
, &debug_infrun
,
10590 _("Set inferior debugging."),
10591 _("Show inferior debugging."),
10592 _("When non-zero, inferior specific debugging is enabled."),
10593 nullptr, show_debug_infrun
, &setdebuglist
, &showdebuglist
);
10595 add_setshow_boolean_cmd ("non-stop", no_class
,
10597 Set whether gdb controls the inferior in non-stop mode."), _("\
10598 Show whether gdb controls the inferior in non-stop mode."), _("\
10599 When debugging a multi-threaded program and this setting is\n\
10600 off (the default, also called all-stop mode), when one thread stops\n\
10601 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
10602 all other threads in the program while you interact with the thread of\n\
10603 interest. When you continue or step a thread, you can allow the other\n\
10604 threads to run, or have them remain stopped, but while you inspect any\n\
10605 thread's state, all threads stop.\n\
10607 In non-stop mode, when one thread stops, other threads can continue\n\
10608 to run freely. You'll be able to step each thread independently,\n\
10609 leave it stopped or free to run as needed."),
10615 for (size_t i
= 0; i
< GDB_SIGNAL_LAST
; i
++)
10617 signal_stop
[i
] = 1;
10618 signal_print
[i
] = 1;
10619 signal_program
[i
] = 1;
10620 signal_catch
[i
] = 0;
10623 /* Signals caused by debugger's own actions should not be given to
10624 the program afterwards.
10626 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
10627 explicitly specifies that it should be delivered to the target
10628 program. Typically, that would occur when a user is debugging a
10629 target monitor on a simulator: the target monitor sets a
10630 breakpoint; the simulator encounters this breakpoint and halts
10631 the simulation handing control to GDB; GDB, noting that the stop
10632 address doesn't map to any known breakpoint, returns control back
10633 to the simulator; the simulator then delivers the hardware
10634 equivalent of a GDB_SIGNAL_TRAP to the program being
10636 signal_program
[GDB_SIGNAL_TRAP
] = 0;
10637 signal_program
[GDB_SIGNAL_INT
] = 0;
10639 /* Signals that are not errors should not normally enter the debugger. */
10640 signal_stop
[GDB_SIGNAL_ALRM
] = 0;
10641 signal_print
[GDB_SIGNAL_ALRM
] = 0;
10642 signal_stop
[GDB_SIGNAL_VTALRM
] = 0;
10643 signal_print
[GDB_SIGNAL_VTALRM
] = 0;
10644 signal_stop
[GDB_SIGNAL_PROF
] = 0;
10645 signal_print
[GDB_SIGNAL_PROF
] = 0;
10646 signal_stop
[GDB_SIGNAL_CHLD
] = 0;
10647 signal_print
[GDB_SIGNAL_CHLD
] = 0;
10648 signal_stop
[GDB_SIGNAL_IO
] = 0;
10649 signal_print
[GDB_SIGNAL_IO
] = 0;
10650 signal_stop
[GDB_SIGNAL_POLL
] = 0;
10651 signal_print
[GDB_SIGNAL_POLL
] = 0;
10652 signal_stop
[GDB_SIGNAL_URG
] = 0;
10653 signal_print
[GDB_SIGNAL_URG
] = 0;
10654 signal_stop
[GDB_SIGNAL_WINCH
] = 0;
10655 signal_print
[GDB_SIGNAL_WINCH
] = 0;
10656 signal_stop
[GDB_SIGNAL_PRIO
] = 0;
10657 signal_print
[GDB_SIGNAL_PRIO
] = 0;
10659 /* These signals are used internally by user-level thread
10660 implementations. (See signal(5) on Solaris.) Like the above
10661 signals, a healthy program receives and handles them as part of
10662 its normal operation. */
10663 signal_stop
[GDB_SIGNAL_LWP
] = 0;
10664 signal_print
[GDB_SIGNAL_LWP
] = 0;
10665 signal_stop
[GDB_SIGNAL_WAITING
] = 0;
10666 signal_print
[GDB_SIGNAL_WAITING
] = 0;
10667 signal_stop
[GDB_SIGNAL_CANCEL
] = 0;
10668 signal_print
[GDB_SIGNAL_CANCEL
] = 0;
10669 signal_stop
[GDB_SIGNAL_LIBRT
] = 0;
10670 signal_print
[GDB_SIGNAL_LIBRT
] = 0;
10672 /* Update cached state. */
10673 signal_cache_update (-1);
10675 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support
,
10676 &stop_on_solib_events
, _("\
10677 Set stopping for shared library events."), _("\
10678 Show stopping for shared library events."), _("\
10679 If nonzero, gdb will give control to the user when the dynamic linker\n\
10680 notifies gdb of shared library events. The most common event of interest\n\
10681 to the user would be loading/unloading of a new library."),
10682 set_stop_on_solib_events
,
10683 show_stop_on_solib_events
,
10684 &setlist
, &showlist
);
10686 add_setshow_enum_cmd ("follow-fork-mode", class_run
,
10687 follow_fork_mode_kind_names
,
10688 &follow_fork_mode_string
, _("\
10689 Set debugger response to a program call of fork or vfork."), _("\
10690 Show debugger response to a program call of fork or vfork."), _("\
10691 A fork or vfork creates a new process. follow-fork-mode can be:\n\
10692 parent - the original process is debugged after a fork\n\
10693 child - the new process is debugged after a fork\n\
10694 The unfollowed process will continue to run.\n\
10695 By default, the debugger will follow the parent process."),
10697 show_follow_fork_mode_string
,
10698 &setlist
, &showlist
);
10700 add_setshow_enum_cmd ("follow-exec-mode", class_run
,
10701 follow_exec_mode_names
,
10702 &follow_exec_mode_string
, _("\
10703 Set debugger response to a program call of exec."), _("\
10704 Show debugger response to a program call of exec."), _("\
10705 An exec call replaces the program image of a process.\n\
10707 follow-exec-mode can be:\n\
10709 new - the debugger creates a new inferior and rebinds the process\n\
10710 to this new inferior. The program the process was running before\n\
10711 the exec call can be restarted afterwards by restarting the original\n\
10714 same - the debugger keeps the process bound to the same inferior.\n\
10715 The new executable image replaces the previous executable loaded in\n\
10716 the inferior. Restarting the inferior after the exec call restarts\n\
10717 the executable the process was running after the exec call.\n\
10719 By default, the debugger will use the same inferior."),
10721 show_follow_exec_mode_string
,
10722 &setlist
, &showlist
);
10724 add_setshow_enum_cmd ("scheduler-locking", class_run
,
10725 scheduler_enums
, &scheduler_mode
, _("\
10726 Set mode for locking scheduler during execution."), _("\
10727 Show mode for locking scheduler during execution."), _("\
10728 off == no locking (threads may preempt at any time)\n\
10729 on == full locking (no thread except the current thread may run)\n\
10730 This applies to both normal execution and replay mode.\n\
10731 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
10732 In this mode, other threads may run during other commands.\n\
10733 This applies to both normal execution and replay mode.\n\
10734 replay == scheduler locked in replay mode and unlocked during normal execution."),
10735 set_schedlock_func
, /* traps on target vector */
10736 show_scheduler_mode
,
10737 &setlist
, &showlist
);
10739 add_setshow_boolean_cmd ("schedule-multiple", class_run
, &sched_multi
, _("\
10740 Set mode for resuming threads of all processes."), _("\
10741 Show mode for resuming threads of all processes."), _("\
10742 When on, execution commands (such as 'continue' or 'next') resume all\n\
10743 threads of all processes. When off (which is the default), execution\n\
10744 commands only resume the threads of the current process. The set of\n\
10745 threads that are resumed is further refined by the scheduler-locking\n\
10746 mode (see help set scheduler-locking)."),
10748 show_schedule_multiple
,
10749 &setlist
, &showlist
);
10751 add_setshow_boolean_cmd ("step-mode", class_run
, &step_stop_if_no_debug
, _("\
10752 Set mode of the step operation."), _("\
10753 Show mode of the step operation."), _("\
10754 When set, doing a step over a function without debug line information\n\
10755 will stop at the first instruction of that function. Otherwise, the\n\
10756 function is skipped and the step command stops at a different source line."),
10758 show_step_stop_if_no_debug
,
10759 &setlist
, &showlist
);
10761 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run
,
10762 &can_use_displaced_stepping
, _("\
10763 Set debugger's willingness to use displaced stepping."), _("\
10764 Show debugger's willingness to use displaced stepping."), _("\
10765 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
10766 supported by the target architecture. If off, gdb will not use displaced\n\
10767 stepping to step over breakpoints, even if such is supported by the target\n\
10768 architecture. If auto (which is the default), gdb will use displaced stepping\n\
10769 if the target architecture supports it and non-stop mode is active, but will not\n\
10770 use it in all-stop mode (see help set non-stop)."),
10772 show_can_use_displaced_stepping
,
10773 &setlist
, &showlist
);
10775 add_setshow_enum_cmd ("exec-direction", class_run
, exec_direction_names
,
10776 &exec_direction
, _("Set direction of execution.\n\
10777 Options are 'forward' or 'reverse'."),
10778 _("Show direction of execution (forward/reverse)."),
10779 _("Tells gdb whether to execute forward or backward."),
10780 set_exec_direction_func
, show_exec_direction_func
,
10781 &setlist
, &showlist
);
10783 /* Set/show detach-on-fork: user-settable mode. */
10785 add_setshow_boolean_cmd ("detach-on-fork", class_run
, &detach_fork
, _("\
10786 Set whether gdb will detach the child of a fork."), _("\
10787 Show whether gdb will detach the child of a fork."), _("\
10788 Tells gdb whether to detach the child of a fork."),
10789 nullptr, nullptr, &setlist
, &showlist
);
10791 /* Set/show disable address space randomization mode. */
10793 add_setshow_boolean_cmd ("disable-randomization", class_support
,
10794 &disable_randomization
, _("\
10795 Set disabling of debuggee's virtual address space randomization."), _("\
10796 Show disabling of debuggee's virtual address space randomization."), _("\
10797 When this mode is on (which is the default), randomization of the virtual\n\
10798 address space is disabled. Standalone programs run with the randomization\n\
10799 enabled by default on some platforms."),
10800 &set_disable_randomization
,
10801 &show_disable_randomization
,
10802 &setlist
, &showlist
);
10804 /* ptid initializations */
10805 inferior_ptid
= null_ptid
;
10806 target_last_wait_ptid
= minus_one_ptid
;
10808 gdb::observers::thread_ptid_changed
.attach (infrun_thread_ptid_changed
,
10810 gdb::observers::thread_stop_requested
.attach (infrun_thread_stop_requested
,
10812 gdb::observers::inferior_exit
.attach (infrun_inferior_exit
, "infrun");
10813 gdb::observers::inferior_execd
.attach (infrun_inferior_execd
, "infrun");
10815 /* Explicitly create without lookup, since that tries to create a
10816 value with a void typed value, and when we get here, gdbarch
10817 isn't initialized yet. At this point, we're quite sure there
10818 isn't another convenience variable of the same name. */
10819 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs
, nullptr);
10821 add_setshow_boolean_cmd ("observer", no_class
,
10822 &observer_mode_1
, _("\
10823 Set whether gdb controls the inferior in observer mode."), _("\
10824 Show whether gdb controls the inferior in observer mode."), _("\
10825 In observer mode, GDB can get data from the inferior, but not\n\
10826 affect its execution. Registers and memory may not be changed,\n\
10827 breakpoints may not be set, and the program cannot be interrupted\n\
10830 show_observer_mode
,
10835 selftests::register_test ("infrun_thread_ptid_changed",
10836 selftests::infrun_thread_ptid_changed
);