]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame_incremental - gdb/infrun.c
Automatic date update in version.in
[thirdparty/binutils-gdb.git] / gdb / infrun.c
... / ...
CommitLineData
1/* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2025 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "cli/cli-cmds.h"
22#include "cli/cli-style.h"
23#include "displaced-stepping.h"
24#include "infrun.h"
25#include <ctype.h>
26#include "exceptions.h"
27#include "symtab.h"
28#include "frame.h"
29#include "inferior.h"
30#include "breakpoint.h"
31#include "gdbcore.h"
32#include "target.h"
33#include "target-connection.h"
34#include "gdbthread.h"
35#include "annotate.h"
36#include "symfile.h"
37#include "top.h"
38#include "ui.h"
39#include "inf-loop.h"
40#include "regcache.h"
41#include "value.h"
42#include "observable.h"
43#include "language.h"
44#include "solib.h"
45#include "main.h"
46#include "block.h"
47#include "mi/mi-common.h"
48#include "event-top.h"
49#include "record.h"
50#include "record-full.h"
51#include "inline-frame.h"
52#include "jit.h"
53#include "tracepoint.h"
54#include "skip.h"
55#include "probe.h"
56#include "objfiles.h"
57#include "completer.h"
58#include "target-descriptions.h"
59#include "target-dcache.h"
60#include "terminal.h"
61#include "gdbsupport/event-loop.h"
62#include "thread-fsm.h"
63#include "gdbsupport/enum-flags.h"
64#include "progspace-and-thread.h"
65#include <optional>
66#include "arch-utils.h"
67#include "gdbsupport/scope-exit.h"
68#include "gdbsupport/forward-scope-exit.h"
69#include "gdbsupport/gdb_select.h"
70#include <unordered_map>
71#include "async-event.h"
72#include "gdbsupport/selftest.h"
73#include "scoped-mock-context.h"
74#include "test-target.h"
75#include "gdbsupport/common-debug.h"
76#include "gdbsupport/buildargv.h"
77#include "extension.h"
78#include "disasm.h"
79#include "interps.h"
80
81/* Prototypes for local functions */
82
83static void sig_print_info (enum gdb_signal);
84
85static void sig_print_header (void);
86
87static void follow_inferior_reset_breakpoints (void);
88
89static bool currently_stepping (struct thread_info *tp);
90
91static void insert_hp_step_resume_breakpoint_at_frame (const frame_info_ptr &);
92
93static void insert_step_resume_breakpoint_at_caller (const frame_info_ptr &);
94
95static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
96
97static bool maybe_software_singlestep (struct gdbarch *gdbarch);
98
99static void resume (gdb_signal sig);
100
101static void wait_for_inferior (inferior *inf);
102
103static void restart_threads (struct thread_info *event_thread,
104 inferior *inf = nullptr);
105
106static bool start_step_over (void);
107
108static bool step_over_info_valid_p (void);
109
110static bool schedlock_applies (struct thread_info *tp);
111
112/* Asynchronous signal handler registered as event loop source for
113 when we have pending events ready to be passed to the core. */
114static struct async_event_handler *infrun_async_inferior_event_token;
115
116/* Stores whether infrun_async was previously enabled or disabled.
117 Starts off as -1, indicating "never enabled/disabled". */
118static int infrun_is_async = -1;
119static CORE_ADDR update_line_range_start (CORE_ADDR pc,
120 struct execution_control_state *ecs);
121
122/* See infrun.h. */
123
124void
125infrun_async (int enable)
126{
127 if (infrun_is_async != enable)
128 {
129 infrun_is_async = enable;
130
131 infrun_debug_printf ("enable=%d", enable);
132
133 if (enable)
134 mark_async_event_handler (infrun_async_inferior_event_token);
135 else
136 clear_async_event_handler (infrun_async_inferior_event_token);
137 }
138}
139
140/* See infrun.h. */
141
142void
143mark_infrun_async_event_handler (void)
144{
145 mark_async_event_handler (infrun_async_inferior_event_token);
146}
147
148/* When set, stop the 'step' command if we enter a function which has
149 no line number information. The normal behavior is that we step
150 over such function. */
151bool step_stop_if_no_debug = false;
152static void
153show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
154 struct cmd_list_element *c, const char *value)
155{
156 gdb_printf (file, _("Mode of the step operation is %s.\n"), value);
157}
158
159/* proceed and normal_stop use this to notify the user when the
160 inferior stopped in a different thread than it had been running in.
161 It can also be used to find for which thread normal_stop last
162 reported a stop. */
163static thread_info_ref previous_thread;
164
165/* See infrun.h. */
166
167void
168update_previous_thread ()
169{
170 if (inferior_ptid == null_ptid)
171 previous_thread = nullptr;
172 else
173 previous_thread = thread_info_ref::new_reference (inferior_thread ());
174}
175
176/* See infrun.h. */
177
178thread_info *
179get_previous_thread ()
180{
181 return previous_thread.get ();
182}
183
184/* If set (default for legacy reasons), when following a fork, GDB
185 will detach from one of the fork branches, child or parent.
186 Exactly which branch is detached depends on 'set follow-fork-mode'
187 setting. */
188
189static bool detach_fork = true;
190
191bool debug_infrun = false;
192static void
193show_debug_infrun (struct ui_file *file, int from_tty,
194 struct cmd_list_element *c, const char *value)
195{
196 gdb_printf (file, _("Inferior debugging is %s.\n"), value);
197}
198
199/* Support for disabling address space randomization. */
200
201bool disable_randomization = true;
202
203static void
204show_disable_randomization (struct ui_file *file, int from_tty,
205 struct cmd_list_element *c, const char *value)
206{
207 if (target_supports_disable_randomization ())
208 gdb_printf (file,
209 _("Disabling randomization of debuggee's "
210 "virtual address space is %s.\n"),
211 value);
212 else
213 gdb_puts (_("Disabling randomization of debuggee's "
214 "virtual address space is unsupported on\n"
215 "this platform.\n"), file);
216}
217
218static void
219set_disable_randomization (const char *args, int from_tty,
220 struct cmd_list_element *c)
221{
222 if (!target_supports_disable_randomization ())
223 error (_("Disabling randomization of debuggee's "
224 "virtual address space is unsupported on\n"
225 "this platform."));
226}
227
228/* User interface for non-stop mode. */
229
230bool non_stop = false;
231static bool non_stop_1 = false;
232
233static void
234set_non_stop (const char *args, int from_tty,
235 struct cmd_list_element *c)
236{
237 if (target_has_execution ())
238 {
239 non_stop_1 = non_stop;
240 error (_("Cannot change this setting while the inferior is running."));
241 }
242
243 non_stop = non_stop_1;
244}
245
246static void
247show_non_stop (struct ui_file *file, int from_tty,
248 struct cmd_list_element *c, const char *value)
249{
250 gdb_printf (file,
251 _("Controlling the inferior in non-stop mode is %s.\n"),
252 value);
253}
254
255/* "Observer mode" is somewhat like a more extreme version of
256 non-stop, in which all GDB operations that might affect the
257 target's execution have been disabled. */
258
259static bool observer_mode = false;
260static bool observer_mode_1 = false;
261
262static void
263set_observer_mode (const char *args, int from_tty,
264 struct cmd_list_element *c)
265{
266 if (target_has_execution ())
267 {
268 observer_mode_1 = observer_mode;
269 error (_("Cannot change this setting while the inferior is running."));
270 }
271
272 observer_mode = observer_mode_1;
273
274 may_write_registers = !observer_mode;
275 may_write_memory = !observer_mode;
276 may_insert_breakpoints = !observer_mode;
277 may_insert_tracepoints = !observer_mode;
278 /* We can insert fast tracepoints in or out of observer mode,
279 but enable them if we're going into this mode. */
280 if (observer_mode)
281 may_insert_fast_tracepoints = true;
282 may_stop = !observer_mode;
283 update_target_permissions ();
284
285 /* Going *into* observer mode we must force non-stop, then
286 going out we leave it that way. */
287 if (observer_mode)
288 {
289 pagination_enabled = false;
290 non_stop = non_stop_1 = true;
291 }
292
293 if (from_tty)
294 gdb_printf (_("Observer mode is now %s.\n"),
295 (observer_mode ? "on" : "off"));
296}
297
298static void
299show_observer_mode (struct ui_file *file, int from_tty,
300 struct cmd_list_element *c, const char *value)
301{
302 gdb_printf (file, _("Observer mode is %s.\n"), value);
303}
304
305/* This updates the value of observer mode based on changes in
306 permissions. Note that we are deliberately ignoring the values of
307 may-write-registers and may-write-memory, since the user may have
308 reason to enable these during a session, for instance to turn on a
309 debugging-related global. */
310
311void
312update_observer_mode (void)
313{
314 bool newval = (!may_insert_breakpoints
315 && !may_insert_tracepoints
316 && may_insert_fast_tracepoints
317 && !may_stop
318 && non_stop);
319
320 /* Let the user know if things change. */
321 if (newval != observer_mode)
322 gdb_printf (_("Observer mode is now %s.\n"),
323 (newval ? "on" : "off"));
324
325 observer_mode = observer_mode_1 = newval;
326}
327
328/* Tables of how to react to signals; the user sets them. */
329
330static unsigned char signal_stop[GDB_SIGNAL_LAST];
331static unsigned char signal_print[GDB_SIGNAL_LAST];
332static unsigned char signal_program[GDB_SIGNAL_LAST];
333
334/* Table of signals that are registered with "catch signal". A
335 non-zero entry indicates that the signal is caught by some "catch
336 signal" command. */
337static unsigned char signal_catch[GDB_SIGNAL_LAST];
338
339/* Table of signals that the target may silently handle.
340 This is automatically determined from the flags above,
341 and simply cached here. */
342static unsigned char signal_pass[GDB_SIGNAL_LAST];
343
344#define SET_SIGS(nsigs,sigs,flags) \
345 do { \
346 int signum = (nsigs); \
347 while (signum-- > 0) \
348 if ((sigs)[signum]) \
349 (flags)[signum] = 1; \
350 } while (0)
351
352#define UNSET_SIGS(nsigs,sigs,flags) \
353 do { \
354 int signum = (nsigs); \
355 while (signum-- > 0) \
356 if ((sigs)[signum]) \
357 (flags)[signum] = 0; \
358 } while (0)
359
360/* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
361 this function is to avoid exporting `signal_program'. */
362
363void
364update_signals_program_target (void)
365{
366 target_program_signals (signal_program);
367}
368
369/* Value to pass to target_resume() to cause all threads to resume. */
370
371#define RESUME_ALL minus_one_ptid
372
373/* Command list pointer for the "stop" placeholder. */
374
375static struct cmd_list_element *stop_command;
376
377/* Nonzero if we want to give control to the user when we're notified
378 of shared library events by the dynamic linker. */
379int stop_on_solib_events;
380
381/* Enable or disable optional shared library event breakpoints
382 as appropriate when the above flag is changed. */
383
384static void
385set_stop_on_solib_events (const char *args,
386 int from_tty, struct cmd_list_element *c)
387{
388 update_solib_breakpoints ();
389}
390
391static void
392show_stop_on_solib_events (struct ui_file *file, int from_tty,
393 struct cmd_list_element *c, const char *value)
394{
395 gdb_printf (file, _("Stopping for shared library events is %s.\n"),
396 value);
397}
398
399/* True after stop if current stack frame should be printed. */
400
401static bool stop_print_frame;
402
403/* This is a cached copy of the target/ptid/waitstatus of the last
404 event returned by target_wait().
405 This information is returned by get_last_target_status(). */
406static process_stratum_target *target_last_proc_target;
407static ptid_t target_last_wait_ptid;
408static struct target_waitstatus target_last_waitstatus;
409
410void init_thread_stepping_state (struct thread_info *tss);
411
412static const char follow_fork_mode_child[] = "child";
413static const char follow_fork_mode_parent[] = "parent";
414
415static const char *const follow_fork_mode_kind_names[] = {
416 follow_fork_mode_child,
417 follow_fork_mode_parent,
418 nullptr
419};
420
421static const char *follow_fork_mode_string = follow_fork_mode_parent;
422static void
423show_follow_fork_mode_string (struct ui_file *file, int from_tty,
424 struct cmd_list_element *c, const char *value)
425{
426 gdb_printf (file,
427 _("Debugger response to a program "
428 "call of fork or vfork is \"%s\".\n"),
429 value);
430}
431\f
432
433/* Handle changes to the inferior list based on the type of fork,
434 which process is being followed, and whether the other process
435 should be detached. On entry inferior_ptid must be the ptid of
436 the fork parent. At return inferior_ptid is the ptid of the
437 followed inferior. */
438
439static bool
440follow_fork_inferior (bool follow_child, bool detach_fork)
441{
442 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
443
444 infrun_debug_printf ("follow_child = %d, detach_fork = %d",
445 follow_child, detach_fork);
446
447 target_waitkind fork_kind = inferior_thread ()->pending_follow.kind ();
448 gdb_assert (fork_kind == TARGET_WAITKIND_FORKED
449 || fork_kind == TARGET_WAITKIND_VFORKED);
450 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
451 ptid_t parent_ptid = inferior_ptid;
452 ptid_t child_ptid = inferior_thread ()->pending_follow.child_ptid ();
453
454 if (has_vforked
455 && !non_stop /* Non-stop always resumes both branches. */
456 && current_ui->prompt_state == PROMPT_BLOCKED
457 && !(follow_child || detach_fork || sched_multi))
458 {
459 /* The parent stays blocked inside the vfork syscall until the
460 child execs or exits. If we don't let the child run, then
461 the parent stays blocked. If we're telling the parent to run
462 in the foreground, the user will not be able to ctrl-c to get
463 back the terminal, effectively hanging the debug session. */
464 gdb_printf (gdb_stderr, _("\
465Can not resume the parent process over vfork in the foreground while\n\
466holding the child stopped. Try \"set %ps\" or \"%ps\".\n"),
467 styled_string (command_style.style (), "set detach-on-fork"),
468 styled_string (command_style.style (),
469 "set schedule-multiple"));
470 return true;
471 }
472
473 inferior *parent_inf = current_inferior ();
474 inferior *child_inf = nullptr;
475 bool child_has_new_pspace = false;
476
477 gdb_assert (parent_inf->thread_waiting_for_vfork_done == nullptr);
478
479 if (!follow_child)
480 {
481 /* Detach new forked process? */
482 if (detach_fork)
483 {
484 /* Before detaching from the child, remove all breakpoints
485 from it. If we forked, then this has already been taken
486 care of by infrun.c. If we vforked however, any
487 breakpoint inserted in the parent is visible in the
488 child, even those added while stopped in a vfork
489 catchpoint. This will remove the breakpoints from the
490 parent also, but they'll be reinserted below. */
491 if (has_vforked)
492 {
493 /* Keep breakpoints list in sync. */
494 remove_breakpoints_inf (current_inferior ());
495 }
496
497 if (print_inferior_events)
498 {
499 /* Ensure that we have a process ptid. */
500 ptid_t process_ptid = ptid_t (child_ptid.pid ());
501
502 target_terminal::ours_for_output ();
503 gdb_printf (_("[Detaching after %s from child %s]\n"),
504 has_vforked ? "vfork" : "fork",
505 target_pid_to_str (process_ptid).c_str ());
506 }
507 }
508 else
509 {
510 /* Add process to GDB's tables. */
511 child_inf = add_inferior (child_ptid.pid ());
512
513 child_inf->attach_flag = parent_inf->attach_flag;
514 copy_terminal_info (child_inf, parent_inf);
515 child_inf->set_arch (parent_inf->arch ());
516 child_inf->tdesc_info = parent_inf->tdesc_info;
517
518 child_inf->symfile_flags = SYMFILE_NO_READ;
519
520 /* If this is a vfork child, then the address-space is
521 shared with the parent. */
522 if (has_vforked)
523 {
524 child_inf->pspace = parent_inf->pspace;
525 child_inf->aspace = parent_inf->aspace;
526
527 exec_on_vfork (child_inf);
528
529 /* The parent will be frozen until the child is done
530 with the shared region. Keep track of the
531 parent. */
532 child_inf->vfork_parent = parent_inf;
533 child_inf->pending_detach = false;
534 parent_inf->vfork_child = child_inf;
535 parent_inf->pending_detach = false;
536 }
537 else
538 {
539 child_inf->pspace = new program_space (new_address_space ());
540 child_has_new_pspace = true;
541 child_inf->aspace = child_inf->pspace->aspace;
542 child_inf->removable = true;
543 clone_program_space (child_inf->pspace, parent_inf->pspace);
544 }
545 }
546
547 if (has_vforked)
548 {
549 /* If we detached from the child, then we have to be careful
550 to not insert breakpoints in the parent until the child
551 is done with the shared memory region. However, if we're
552 staying attached to the child, then we can and should
553 insert breakpoints, so that we can debug it. A
554 subsequent child exec or exit is enough to know when does
555 the child stops using the parent's address space. */
556 parent_inf->thread_waiting_for_vfork_done
557 = detach_fork ? inferior_thread () : nullptr;
558 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
559
560 infrun_debug_printf
561 ("parent_inf->thread_waiting_for_vfork_done == %s",
562 (parent_inf->thread_waiting_for_vfork_done == nullptr
563 ? "nullptr"
564 : (parent_inf->thread_waiting_for_vfork_done
565 ->ptid.to_string ().c_str ())));
566 }
567 }
568 else
569 {
570 /* Follow the child. */
571
572 if (print_inferior_events)
573 {
574 std::string parent_pid = target_pid_to_str (parent_ptid);
575 std::string child_pid = target_pid_to_str (child_ptid);
576
577 target_terminal::ours_for_output ();
578 gdb_printf (_("[Attaching after %s %s to child %s]\n"),
579 parent_pid.c_str (),
580 has_vforked ? "vfork" : "fork",
581 child_pid.c_str ());
582 }
583
584 /* Add the new inferior first, so that the target_detach below
585 doesn't unpush the target. */
586
587 child_inf = add_inferior (child_ptid.pid ());
588
589 child_inf->attach_flag = parent_inf->attach_flag;
590 copy_terminal_info (child_inf, parent_inf);
591 child_inf->set_arch (parent_inf->arch ());
592 child_inf->tdesc_info = parent_inf->tdesc_info;
593
594 if (has_vforked)
595 {
596 /* If this is a vfork child, then the address-space is shared
597 with the parent. */
598 child_inf->aspace = parent_inf->aspace;
599 child_inf->pspace = parent_inf->pspace;
600
601 exec_on_vfork (child_inf);
602 }
603 else if (detach_fork)
604 {
605 /* We follow the child and detach from the parent: move the parent's
606 program space to the child. This simplifies some things, like
607 doing "next" over fork() and landing on the expected line in the
608 child (note, that is broken with "set detach-on-fork off").
609
610 Before assigning brand new spaces for the parent, remove
611 breakpoints from it: because the new pspace won't match
612 currently inserted locations, the normal detach procedure
613 wouldn't remove them, and we would leave them inserted when
614 detaching. */
615 remove_breakpoints_inf (parent_inf);
616
617 child_inf->aspace = parent_inf->aspace;
618 child_inf->pspace = parent_inf->pspace;
619 parent_inf->pspace = new program_space (new_address_space ());
620 parent_inf->aspace = parent_inf->pspace->aspace;
621 clone_program_space (parent_inf->pspace, child_inf->pspace);
622
623 /* The parent inferior is still the current one, so keep things
624 in sync. */
625 set_current_program_space (parent_inf->pspace);
626 }
627 else
628 {
629 child_inf->pspace = new program_space (new_address_space ());
630 child_has_new_pspace = true;
631 child_inf->aspace = child_inf->pspace->aspace;
632 child_inf->removable = true;
633 child_inf->symfile_flags = SYMFILE_NO_READ;
634 clone_program_space (child_inf->pspace, parent_inf->pspace);
635 }
636 }
637
638 gdb_assert (current_inferior () == parent_inf);
639
640 /* If we are setting up an inferior for the child, target_follow_fork is
641 responsible for pushing the appropriate targets on the new inferior's
642 target stack and adding the initial thread (with ptid CHILD_PTID).
643
644 If we are not setting up an inferior for the child (because following
645 the parent and detach_fork is true), it is responsible for detaching
646 from CHILD_PTID. */
647 target_follow_fork (child_inf, child_ptid, fork_kind, follow_child,
648 detach_fork);
649
650 gdb::observers::inferior_forked.notify (parent_inf, child_inf, fork_kind);
651
652 /* target_follow_fork must leave the parent as the current inferior. If we
653 want to follow the child, we make it the current one below. */
654 gdb_assert (current_inferior () == parent_inf);
655
656 /* If there is a child inferior, target_follow_fork must have created a thread
657 for it. */
658 if (child_inf != nullptr)
659 gdb_assert (!child_inf->thread_list.empty ());
660
661 /* Clear the parent thread's pending follow field. Do this before calling
662 target_detach, so that the target can differentiate the two following
663 cases:
664
665 - We continue past a fork with "follow-fork-mode == child" &&
666 "detach-on-fork on", and therefore detach the parent. In that
667 case the target should not detach the fork child.
668 - We run to a fork catchpoint and the user types "detach". In that
669 case, the target should detach the fork child in addition to the
670 parent.
671
672 The former case will have pending_follow cleared, the later will have
673 pending_follow set. */
674 thread_info *parent_thread = parent_inf->find_thread (parent_ptid);
675 gdb_assert (parent_thread != nullptr);
676 parent_thread->pending_follow.set_spurious ();
677
678 /* Detach the parent if needed. */
679 if (follow_child)
680 {
681 /* If we're vforking, we want to hold on to the parent until
682 the child exits or execs. At child exec or exit time we
683 can remove the old breakpoints from the parent and detach
684 or resume debugging it. Otherwise, detach the parent now;
685 we'll want to reuse it's program/address spaces, but we
686 can't set them to the child before removing breakpoints
687 from the parent, otherwise, the breakpoints module could
688 decide to remove breakpoints from the wrong process (since
689 they'd be assigned to the same address space). */
690
691 if (has_vforked)
692 {
693 gdb_assert (child_inf->vfork_parent == nullptr);
694 gdb_assert (parent_inf->vfork_child == nullptr);
695 child_inf->vfork_parent = parent_inf;
696 child_inf->pending_detach = false;
697 parent_inf->vfork_child = child_inf;
698 parent_inf->pending_detach = detach_fork;
699 }
700 else if (detach_fork)
701 {
702 if (print_inferior_events)
703 {
704 /* Ensure that we have a process ptid. */
705 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
706
707 target_terminal::ours_for_output ();
708 gdb_printf (_("[Detaching after fork from "
709 "parent %s]\n"),
710 target_pid_to_str (process_ptid).c_str ());
711 }
712
713 target_detach (parent_inf, 0);
714 }
715 }
716
717 /* If we ended up creating a new inferior, call post_create_inferior to inform
718 the various subcomponents. */
719 if (child_inf != nullptr)
720 {
721 /* If FOLLOW_CHILD, we leave CHILD_INF as the current inferior
722 (do not restore the parent as the current inferior). */
723 std::optional<scoped_restore_current_thread> maybe_restore;
724
725 if (!follow_child && !sched_multi)
726 maybe_restore.emplace ();
727
728 switch_to_thread (*child_inf->threads ().begin ());
729
730 post_create_inferior (0, child_has_new_pspace);
731 }
732
733 return false;
734}
735
736/* Set the last target status as TP having stopped. */
737
738static void
739set_last_target_status_stopped (thread_info *tp)
740{
741 set_last_target_status (tp->inf->process_target (), tp->ptid,
742 target_waitstatus {}.set_stopped (GDB_SIGNAL_0));
743}
744
745/* Tell the target to follow the fork we're stopped at. Returns true
746 if the inferior should be resumed; false, if the target for some
747 reason decided it's best not to resume. */
748
749static bool
750follow_fork ()
751{
752 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
753
754 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
755 bool should_resume = true;
756
757 /* Copy user stepping state to the new inferior thread. FIXME: the
758 followed fork child thread should have a copy of most of the
759 parent thread structure's run control related fields, not just these.
760 Initialized to avoid "may be used uninitialized" warnings from gcc. */
761 struct breakpoint *step_resume_breakpoint = nullptr;
762 struct breakpoint *exception_resume_breakpoint = nullptr;
763 CORE_ADDR step_range_start = 0;
764 CORE_ADDR step_range_end = 0;
765 int current_line = 0;
766 symtab *current_symtab = nullptr;
767 struct frame_id step_frame_id = { 0 };
768
769 if (!non_stop)
770 {
771 thread_info *cur_thr = inferior_thread ();
772
773 ptid_t resume_ptid
774 = user_visible_resume_ptid (cur_thr->control.stepping_command);
775 process_stratum_target *resume_target
776 = user_visible_resume_target (resume_ptid);
777
778 /* Check if there's a thread that we're about to resume, other
779 than the current, with an unfollowed fork/vfork. If so,
780 switch back to it, to tell the target to follow it (in either
781 direction). We'll afterwards refuse to resume, and inform
782 the user what happened. */
783 for (thread_info *tp : all_non_exited_threads (resume_target,
784 resume_ptid))
785 {
786 if (tp == cur_thr)
787 continue;
788
789 /* follow_fork_inferior clears tp->pending_follow, and below
790 we'll need the value after the follow_fork_inferior
791 call. */
792 target_waitkind kind = tp->pending_follow.kind ();
793
794 if (kind != TARGET_WAITKIND_SPURIOUS)
795 {
796 infrun_debug_printf ("need to follow-fork [%s] first",
797 tp->ptid.to_string ().c_str ());
798
799 switch_to_thread (tp);
800
801 /* Set up inferior(s) as specified by the caller, and
802 tell the target to do whatever is necessary to follow
803 either parent or child. */
804 if (follow_child)
805 {
806 /* The thread that started the execution command
807 won't exist in the child. Abort the command and
808 immediately stop in this thread, in the child,
809 inside fork. */
810 should_resume = false;
811 }
812 else
813 {
814 /* Following the parent, so let the thread fork its
815 child freely, it won't influence the current
816 execution command. */
817 if (follow_fork_inferior (follow_child, detach_fork))
818 {
819 /* Target refused to follow, or there's some
820 other reason we shouldn't resume. */
821 switch_to_thread (cur_thr);
822 set_last_target_status_stopped (cur_thr);
823 return false;
824 }
825
826 /* If we're following a vfork, when we need to leave
827 the just-forked thread as selected, as we need to
828 solo-resume it to collect the VFORK_DONE event.
829 If we're following a fork, however, switch back
830 to the original thread that we continue stepping
831 it, etc. */
832 if (kind != TARGET_WAITKIND_VFORKED)
833 {
834 gdb_assert (kind == TARGET_WAITKIND_FORKED);
835 switch_to_thread (cur_thr);
836 }
837 }
838
839 break;
840 }
841 }
842 }
843
844 thread_info *tp = inferior_thread ();
845
846 /* If there were any forks/vforks that were caught and are now to be
847 followed, then do so now. */
848 switch (tp->pending_follow.kind ())
849 {
850 case TARGET_WAITKIND_FORKED:
851 case TARGET_WAITKIND_VFORKED:
852 {
853 ptid_t parent, child;
854 std::unique_ptr<struct thread_fsm> thread_fsm;
855
856 /* If the user did a next/step, etc, over a fork call,
857 preserve the stepping state in the fork child. */
858 if (follow_child && should_resume)
859 {
860 step_resume_breakpoint = clone_momentary_breakpoint
861 (tp->control.step_resume_breakpoint);
862 step_range_start = tp->control.step_range_start;
863 step_range_end = tp->control.step_range_end;
864 current_line = tp->current_line;
865 current_symtab = tp->current_symtab;
866 step_frame_id = tp->control.step_frame_id;
867 exception_resume_breakpoint
868 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
869 thread_fsm = tp->release_thread_fsm ();
870
871 /* For now, delete the parent's sr breakpoint, otherwise,
872 parent/child sr breakpoints are considered duplicates,
873 and the child version will not be installed. Remove
874 this when the breakpoints module becomes aware of
875 inferiors and address spaces. */
876 delete_step_resume_breakpoint (tp);
877 tp->control.step_range_start = 0;
878 tp->control.step_range_end = 0;
879 tp->control.step_frame_id = null_frame_id;
880 delete_exception_resume_breakpoint (tp);
881 }
882
883 parent = inferior_ptid;
884 child = tp->pending_follow.child_ptid ();
885
886 /* If handling a vfork, stop all the inferior's threads, they will be
887 restarted when the vfork shared region is complete. */
888 if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED
889 && target_is_non_stop_p ())
890 stop_all_threads ("handling vfork", tp->inf);
891
892 process_stratum_target *parent_targ = tp->inf->process_target ();
893 /* Set up inferior(s) as specified by the caller, and tell the
894 target to do whatever is necessary to follow either parent
895 or child. */
896 if (follow_fork_inferior (follow_child, detach_fork))
897 {
898 /* Target refused to follow, or there's some other reason
899 we shouldn't resume. */
900 should_resume = 0;
901 }
902 else
903 {
904 /* If we followed the child, switch to it... */
905 if (follow_child)
906 {
907 tp = parent_targ->find_thread (child);
908 switch_to_thread (tp);
909
910 /* ... and preserve the stepping state, in case the
911 user was stepping over the fork call. */
912 if (should_resume)
913 {
914 tp->control.step_resume_breakpoint
915 = step_resume_breakpoint;
916 tp->control.step_range_start = step_range_start;
917 tp->control.step_range_end = step_range_end;
918 tp->current_line = current_line;
919 tp->current_symtab = current_symtab;
920 tp->control.step_frame_id = step_frame_id;
921 tp->control.exception_resume_breakpoint
922 = exception_resume_breakpoint;
923 tp->set_thread_fsm (std::move (thread_fsm));
924 }
925 else
926 {
927 /* If we get here, it was because we're trying to
928 resume from a fork catchpoint, but, the user
929 has switched threads away from the thread that
930 forked. In that case, the resume command
931 issued is most likely not applicable to the
932 child, so just warn, and refuse to resume. */
933 warning (_("Not resuming: switched threads "
934 "before following fork child."));
935 }
936
937 /* Reset breakpoints in the child as appropriate. */
938 follow_inferior_reset_breakpoints ();
939 }
940 }
941 }
942 break;
943 case TARGET_WAITKIND_SPURIOUS:
944 /* Nothing to follow. */
945 break;
946 default:
947 internal_error ("Unexpected pending_follow.kind %d\n",
948 tp->pending_follow.kind ());
949 break;
950 }
951
952 if (!should_resume)
953 set_last_target_status_stopped (tp);
954 return should_resume;
955}
956
957static void
958follow_inferior_reset_breakpoints (void)
959{
960 struct thread_info *tp = inferior_thread ();
961
962 /* Was there a step_resume breakpoint? (There was if the user
963 did a "next" at the fork() call.) If so, explicitly reset its
964 thread number. Cloned step_resume breakpoints are disabled on
965 creation, so enable it here now that it is associated with the
966 correct thread.
967
968 step_resumes are a form of bp that are made to be per-thread.
969 Since we created the step_resume bp when the parent process
970 was being debugged, and now are switching to the child process,
971 from the breakpoint package's viewpoint, that's a switch of
972 "threads". We must update the bp's notion of which thread
973 it is for, or it'll be ignored when it triggers. */
974
975 if (tp->control.step_resume_breakpoint)
976 {
977 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
978 tp->control.step_resume_breakpoint->first_loc ().enabled = 1;
979 }
980
981 /* Treat exception_resume breakpoints like step_resume breakpoints. */
982 if (tp->control.exception_resume_breakpoint)
983 {
984 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
985 tp->control.exception_resume_breakpoint->first_loc ().enabled = 1;
986 }
987
988 /* Reinsert all breakpoints in the child. The user may have set
989 breakpoints after catching the fork, in which case those
990 were never set in the child, but only in the parent. This makes
991 sure the inserted breakpoints match the breakpoint list. */
992
993 breakpoint_re_set ();
994 insert_breakpoints ();
995}
996
997/* The child has exited or execed: resume THREAD, a thread of the parent,
998 if it was meant to be executing. */
999
1000static void
1001proceed_after_vfork_done (thread_info *thread)
1002{
1003 if (thread->state == THREAD_RUNNING
1004 && !thread->executing ()
1005 && !thread->stop_requested
1006 && thread->stop_signal () == GDB_SIGNAL_0)
1007 {
1008 infrun_debug_printf ("resuming vfork parent thread %s",
1009 thread->ptid.to_string ().c_str ());
1010
1011 switch_to_thread (thread);
1012 clear_proceed_status (0);
1013 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
1014 }
1015}
1016
1017/* Called whenever we notice an exec or exit event, to handle
1018 detaching or resuming a vfork parent. */
1019
1020static void
1021handle_vfork_child_exec_or_exit (int exec)
1022{
1023 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1024
1025 struct inferior *inf = current_inferior ();
1026
1027 if (inf->vfork_parent)
1028 {
1029 inferior *resume_parent = nullptr;
1030
1031 /* This exec or exit marks the end of the shared memory region
1032 between the parent and the child. Break the bonds. */
1033 inferior *vfork_parent = inf->vfork_parent;
1034 inf->vfork_parent->vfork_child = nullptr;
1035 inf->vfork_parent = nullptr;
1036
1037 /* If the user wanted to detach from the parent, now is the
1038 time. */
1039 if (vfork_parent->pending_detach)
1040 {
1041 struct program_space *pspace;
1042
1043 /* follow-fork child, detach-on-fork on. */
1044
1045 vfork_parent->pending_detach = false;
1046
1047 scoped_restore_current_pspace_and_thread restore_thread;
1048
1049 /* We're letting loose of the parent. */
1050 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
1051 switch_to_thread (tp);
1052
1053 /* We're about to detach from the parent, which implicitly
1054 removes breakpoints from its address space. There's a
1055 catch here: we want to reuse the spaces for the child,
1056 but, parent/child are still sharing the pspace at this
1057 point, although the exec in reality makes the kernel give
1058 the child a fresh set of new pages. The problem here is
1059 that the breakpoints module being unaware of this, would
1060 likely chose the child process to write to the parent
1061 address space. Swapping the child temporarily away from
1062 the spaces has the desired effect. Yes, this is "sort
1063 of" a hack. */
1064
1065 pspace = inf->pspace;
1066 inf->pspace = nullptr;
1067 address_space_ref_ptr aspace = std::move (inf->aspace);
1068
1069 if (print_inferior_events)
1070 {
1071 std::string pidstr
1072 = target_pid_to_str (ptid_t (vfork_parent->pid));
1073
1074 target_terminal::ours_for_output ();
1075
1076 if (exec)
1077 {
1078 gdb_printf (_("[Detaching vfork parent %s "
1079 "after child exec]\n"), pidstr.c_str ());
1080 }
1081 else
1082 {
1083 gdb_printf (_("[Detaching vfork parent %s "
1084 "after child exit]\n"), pidstr.c_str ());
1085 }
1086 }
1087
1088 target_detach (vfork_parent, 0);
1089
1090 /* Put it back. */
1091 inf->pspace = pspace;
1092 inf->aspace = aspace;
1093 }
1094 else if (exec)
1095 {
1096 /* We're staying attached to the parent, so, really give the
1097 child a new address space. */
1098 inf->pspace = new program_space (maybe_new_address_space ());
1099 inf->aspace = inf->pspace->aspace;
1100 inf->removable = true;
1101 set_current_program_space (inf->pspace);
1102
1103 resume_parent = vfork_parent;
1104 }
1105 else
1106 {
1107 /* If this is a vfork child exiting, then the pspace and
1108 aspaces were shared with the parent. Since we're
1109 reporting the process exit, we'll be mourning all that is
1110 found in the address space, and switching to null_ptid,
1111 preparing to start a new inferior. But, since we don't
1112 want to clobber the parent's address/program spaces, we
1113 go ahead and create a new one for this exiting
1114 inferior. */
1115
1116 scoped_restore_current_thread restore_thread;
1117
1118 /* Temporarily switch to the vfork parent, to facilitate ptrace
1119 calls done during maybe_new_address_space. */
1120 switch_to_thread (any_live_thread_of_inferior (vfork_parent));
1121 address_space_ref_ptr aspace = maybe_new_address_space ();
1122
1123 /* Switch back to the vfork child inferior. Switch to no-thread
1124 while running clone_program_space, so that clone_program_space
1125 doesn't want to read the selected frame of a dead process. */
1126 switch_to_inferior_no_thread (inf);
1127
1128 inf->pspace = new program_space (std::move (aspace));
1129 inf->aspace = inf->pspace->aspace;
1130 set_current_program_space (inf->pspace);
1131 inf->removable = true;
1132 inf->symfile_flags = SYMFILE_NO_READ;
1133 clone_program_space (inf->pspace, vfork_parent->pspace);
1134
1135 resume_parent = vfork_parent;
1136 }
1137
1138 gdb_assert (current_program_space == inf->pspace);
1139
1140 if (non_stop && resume_parent != nullptr)
1141 {
1142 /* If the user wanted the parent to be running, let it go
1143 free now. */
1144 scoped_restore_current_thread restore_thread;
1145
1146 infrun_debug_printf ("resuming vfork parent process %d",
1147 resume_parent->pid);
1148
1149 for (thread_info *thread : resume_parent->threads ())
1150 proceed_after_vfork_done (thread);
1151 }
1152 }
1153}
1154
1155/* Handle TARGET_WAITKIND_VFORK_DONE. */
1156
1157static void
1158handle_vfork_done (thread_info *event_thread)
1159{
1160 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1161
1162 /* We only care about this event if inferior::thread_waiting_for_vfork_done is
1163 set, that is if we are waiting for a vfork child not under our control
1164 (because we detached it) to exec or exit.
1165
1166 If an inferior has vforked and we are debugging the child, we don't use
1167 the vfork-done event to get notified about the end of the shared address
1168 space window. We rely instead on the child's exec or exit event, and the
1169 inferior::vfork_{parent,child} fields are used instead. See
1170 handle_vfork_child_exec_or_exit for that. */
1171 if (event_thread->inf->thread_waiting_for_vfork_done == nullptr)
1172 {
1173 infrun_debug_printf ("not waiting for a vfork-done event");
1174 return;
1175 }
1176
1177 /* We stopped all threads (other than the vforking thread) of the inferior in
1178 follow_fork and kept them stopped until now. It should therefore not be
1179 possible for another thread to have reported a vfork during that window.
1180 If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
1181 vfork-done we are handling right now. */
1182 gdb_assert (event_thread->inf->thread_waiting_for_vfork_done == event_thread);
1183
1184 event_thread->inf->thread_waiting_for_vfork_done = nullptr;
1185 event_thread->inf->pspace->breakpoints_not_allowed = 0;
1186
1187 /* On non-stop targets, we stopped all the inferior's threads in follow_fork,
1188 resume them now. On all-stop targets, everything that needs to be resumed
1189 will be when we resume the event thread. */
1190 if (target_is_non_stop_p ())
1191 {
1192 /* restart_threads and start_step_over may change the current thread, make
1193 sure we leave the event thread as the current thread. */
1194 scoped_restore_current_thread restore_thread;
1195
1196 insert_breakpoints ();
1197 start_step_over ();
1198
1199 if (!step_over_info_valid_p ())
1200 restart_threads (event_thread, event_thread->inf);
1201 }
1202}
1203
1204/* Enum strings for "set|show follow-exec-mode". */
1205
1206static const char follow_exec_mode_new[] = "new";
1207static const char follow_exec_mode_same[] = "same";
1208static const char *const follow_exec_mode_names[] =
1209{
1210 follow_exec_mode_new,
1211 follow_exec_mode_same,
1212 nullptr,
1213};
1214
1215static const char *follow_exec_mode_string = follow_exec_mode_same;
1216static void
1217show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1218 struct cmd_list_element *c, const char *value)
1219{
1220 gdb_printf (file, _("Follow exec mode is \"%s\".\n"), value);
1221}
1222
1223/* EXEC_FILE_TARGET is assumed to be non-NULL. */
1224
1225static void
1226follow_exec (ptid_t ptid, const char *exec_file_target)
1227{
1228 int pid = ptid.pid ();
1229 ptid_t process_ptid;
1230
1231 /* Switch terminal for any messages produced e.g. by
1232 breakpoint_re_set. */
1233 target_terminal::ours_for_output ();
1234
1235 /* This is an exec event that we actually wish to pay attention to.
1236 Refresh our symbol table to the newly exec'd program, remove any
1237 momentary bp's, etc.
1238
1239 If there are breakpoints, they aren't really inserted now,
1240 since the exec() transformed our inferior into a fresh set
1241 of instructions.
1242
1243 We want to preserve symbolic breakpoints on the list, since
1244 we have hopes that they can be reset after the new a.out's
1245 symbol table is read.
1246
1247 However, any "raw" breakpoints must be removed from the list
1248 (e.g., the solib bp's), since their address is probably invalid
1249 now.
1250
1251 And, we DON'T want to call delete_breakpoints() here, since
1252 that may write the bp's "shadow contents" (the instruction
1253 value that was overwritten with a TRAP instruction). Since
1254 we now have a new a.out, those shadow contents aren't valid. */
1255
1256 mark_breakpoints_out (current_program_space);
1257
1258 /* The target reports the exec event to the main thread, even if
1259 some other thread does the exec, and even if the main thread was
1260 stopped or already gone. We may still have non-leader threads of
1261 the process on our list. E.g., on targets that don't have thread
1262 exit events (like remote) and nothing forces an update of the
1263 thread list up to here. When debugging remotely, it's best to
1264 avoid extra traffic, when possible, so avoid syncing the thread
1265 list with the target, and instead go ahead and delete all threads
1266 of the process but the one that reported the event. Note this must
1267 be done before calling update_breakpoints_after_exec, as
1268 otherwise clearing the threads' resources would reference stale
1269 thread breakpoints -- it may have been one of these threads that
1270 stepped across the exec. We could just clear their stepping
1271 states, but as long as we're iterating, might as well delete
1272 them. Deleting them now rather than at the next user-visible
1273 stop provides a nicer sequence of events for user and MI
1274 notifications. */
1275 for (thread_info *th : all_threads_safe ())
1276 if (th->ptid.pid () == pid && th->ptid != ptid)
1277 delete_thread (th);
1278
1279 /* We also need to clear any left over stale state for the
1280 leader/event thread. E.g., if there was any step-resume
1281 breakpoint or similar, it's gone now. We cannot truly
1282 step-to-next statement through an exec(). */
1283 thread_info *th = inferior_thread ();
1284 th->control.step_resume_breakpoint = nullptr;
1285 th->control.exception_resume_breakpoint = nullptr;
1286 th->control.single_step_breakpoints = nullptr;
1287 th->control.step_range_start = 0;
1288 th->control.step_range_end = 0;
1289
1290 /* The user may have had the main thread held stopped in the
1291 previous image (e.g., schedlock on, or non-stop). Release
1292 it now. */
1293 th->stop_requested = false;
1294
1295 update_breakpoints_after_exec ();
1296
1297 /* What is this a.out's name? */
1298 process_ptid = ptid_t (pid);
1299 gdb_printf (_("%s is executing new program: %s\n"),
1300 target_pid_to_str (process_ptid).c_str (),
1301 exec_file_target);
1302
1303 /* We've followed the inferior through an exec. Therefore, the
1304 inferior has essentially been killed & reborn. */
1305
1306 breakpoint_init_inferior (current_inferior (), inf_execd);
1307
1308 gdb::unique_xmalloc_ptr<char> exec_file_host
1309 = exec_file_find (exec_file_target, nullptr);
1310
1311 /* If we were unable to map the executable target pathname onto a host
1312 pathname, tell the user that. Otherwise GDB's subsequent behavior
1313 is confusing. Maybe it would even be better to stop at this point
1314 so that the user can specify a file manually before continuing. */
1315 if (exec_file_host == nullptr)
1316 warning (_("Could not load symbols for executable %s.\n"
1317 "Do you need \"%ps\"?"),
1318 exec_file_target,
1319 styled_string (command_style.style (), "set sysroot"));
1320
1321 /* Reset the shared library package. This ensures that we get a
1322 shlib event when the child reaches "_start", at which point the
1323 dld will have had a chance to initialize the child. */
1324 /* Also, loading a symbol file below may trigger symbol lookups, and
1325 we don't want those to be satisfied by the libraries of the
1326 previous incarnation of this process. */
1327 no_shared_libraries (current_program_space);
1328 current_program_space->unset_solib_ops ();
1329
1330 inferior *execing_inferior = current_inferior ();
1331 inferior *following_inferior;
1332
1333 if (follow_exec_mode_string == follow_exec_mode_new)
1334 {
1335 /* The user wants to keep the old inferior and program spaces
1336 around. Create a new fresh one, and switch to it. */
1337
1338 /* Do exit processing for the original inferior before setting the new
1339 inferior's pid. Having two inferiors with the same pid would confuse
1340 find_inferior_p(t)id. Transfer the terminal state and info from the
1341 old to the new inferior. */
1342 following_inferior = add_inferior_with_spaces ();
1343
1344 swap_terminal_info (following_inferior, execing_inferior);
1345 exit_inferior (execing_inferior);
1346
1347 following_inferior->pid = pid;
1348 }
1349 else
1350 {
1351 /* follow-exec-mode is "same", we continue execution in the execing
1352 inferior. */
1353 following_inferior = execing_inferior;
1354
1355 /* The old description may no longer be fit for the new image.
1356 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1357 old description; we'll read a new one below. No need to do
1358 this on "follow-exec-mode new", as the old inferior stays
1359 around (its description is later cleared/refetched on
1360 restart). */
1361 target_clear_description ();
1362 }
1363
1364 target_follow_exec (following_inferior, ptid, exec_file_target);
1365
1366 gdb_assert (current_inferior () == following_inferior);
1367 gdb_assert (current_program_space == following_inferior->pspace);
1368
1369 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1370 because the proper displacement for a PIE (Position Independent
1371 Executable) main symbol file will only be computed by
1372 solib_create_inferior_hook below. breakpoint_re_set would fail
1373 to insert the breakpoints with the zero displacement. */
1374 try_open_exec_file (exec_file_host.get (), following_inferior,
1375 SYMFILE_DEFER_BP_RESET);
1376
1377 /* If the target can specify a description, read it. Must do this
1378 after flipping to the new executable (because the target supplied
1379 description must be compatible with the executable's
1380 architecture, and the old executable may e.g., be 32-bit, while
1381 the new one 64-bit), and before anything involving memory or
1382 registers. */
1383 target_find_description ();
1384
1385 current_program_space->set_solib_ops
1386 (gdbarch_make_solib_ops (following_inferior->arch ()));
1387 gdb::observers::inferior_execd.notify (execing_inferior, following_inferior);
1388
1389 breakpoint_re_set ();
1390
1391 /* Reinsert all breakpoints. (Those which were symbolic have
1392 been reset to the proper address in the new a.out, thanks
1393 to symbol_file_command...). */
1394 insert_breakpoints ();
1395
1396 /* The next resume of this inferior should bring it to the shlib
1397 startup breakpoints. (If the user had also set bp's on
1398 "main" from the old (parent) process, then they'll auto-
1399 matically get reset there in the new process.). */
1400}
1401
1402/* The chain of threads that need to do a step-over operation to get
1403 past e.g., a breakpoint. What technique is used to step over the
1404 breakpoint/watchpoint does not matter -- all threads end up in the
1405 same queue, to maintain rough temporal order of execution, in order
1406 to avoid starvation, otherwise, we could e.g., find ourselves
1407 constantly stepping the same couple threads past their breakpoints
1408 over and over, if the single-step finish fast enough. */
1409thread_step_over_list global_thread_step_over_list;
1410
1411/* Bit flags indicating what the thread needs to step over. */
1412
1413enum step_over_what_flag
1414 {
1415 /* Step over a breakpoint. */
1416 STEP_OVER_BREAKPOINT = 1,
1417
1418 /* Step past a non-continuable watchpoint, in order to let the
1419 instruction execute so we can evaluate the watchpoint
1420 expression. */
1421 STEP_OVER_WATCHPOINT = 2
1422 };
1423DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
1424
1425/* Info about an instruction that is being stepped over. */
1426
1427struct step_over_info
1428{
1429 /* If we're stepping past a breakpoint, this is the address space
1430 and address of the instruction the breakpoint is set at. We'll
1431 skip inserting all breakpoints here. Valid iff ASPACE is
1432 non-NULL. */
1433 const address_space *aspace = nullptr;
1434 CORE_ADDR address = 0;
1435
1436 /* The instruction being stepped over triggers a nonsteppable
1437 watchpoint. If true, we'll skip inserting watchpoints. */
1438 int nonsteppable_watchpoint_p = 0;
1439
1440 /* The thread's global number. */
1441 int thread = -1;
1442};
1443
1444/* The step-over info of the location that is being stepped over.
1445
1446 Note that with async/breakpoint always-inserted mode, a user might
1447 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1448 being stepped over. As setting a new breakpoint inserts all
1449 breakpoints, we need to make sure the breakpoint being stepped over
1450 isn't inserted then. We do that by only clearing the step-over
1451 info when the step-over is actually finished (or aborted).
1452
1453 Presently GDB can only step over one breakpoint at any given time.
1454 Given threads that can't run code in the same address space as the
1455 breakpoint's can't really miss the breakpoint, GDB could be taught
1456 to step-over at most one breakpoint per address space (so this info
1457 could move to the address space object if/when GDB is extended).
1458 The set of breakpoints being stepped over will normally be much
1459 smaller than the set of all breakpoints, so a flag in the
1460 breakpoint location structure would be wasteful. A separate list
1461 also saves complexity and run-time, as otherwise we'd have to go
1462 through all breakpoint locations clearing their flag whenever we
1463 start a new sequence. Similar considerations weigh against storing
1464 this info in the thread object. Plus, not all step overs actually
1465 have breakpoint locations -- e.g., stepping past a single-step
1466 breakpoint, or stepping to complete a non-continuable
1467 watchpoint. */
1468static struct step_over_info step_over_info;
1469
1470/* Record the address of the breakpoint/instruction we're currently
1471 stepping over.
1472 N.B. We record the aspace and address now, instead of say just the thread,
1473 because when we need the info later the thread may be running. */
1474
1475static void
1476set_step_over_info (const address_space *aspace, CORE_ADDR address,
1477 int nonsteppable_watchpoint_p,
1478 int thread)
1479{
1480 step_over_info.aspace = aspace;
1481 step_over_info.address = address;
1482 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1483 step_over_info.thread = thread;
1484}
1485
1486/* Called when we're not longer stepping over a breakpoint / an
1487 instruction, so all breakpoints are free to be (re)inserted. */
1488
1489static void
1490clear_step_over_info (void)
1491{
1492 infrun_debug_printf ("clearing step over info");
1493 step_over_info.aspace = nullptr;
1494 step_over_info.address = 0;
1495 step_over_info.nonsteppable_watchpoint_p = 0;
1496 step_over_info.thread = -1;
1497}
1498
1499/* See infrun.h. */
1500
1501int
1502stepping_past_instruction_at (struct address_space *aspace,
1503 CORE_ADDR address)
1504{
1505 return (step_over_info.aspace != nullptr
1506 && breakpoint_address_match (aspace, address,
1507 step_over_info.aspace,
1508 step_over_info.address));
1509}
1510
1511/* See infrun.h. */
1512
1513int
1514thread_is_stepping_over_breakpoint (int thread)
1515{
1516 return (step_over_info.thread != -1
1517 && thread == step_over_info.thread);
1518}
1519
1520/* See infrun.h. */
1521
1522int
1523stepping_past_nonsteppable_watchpoint (void)
1524{
1525 return step_over_info.nonsteppable_watchpoint_p;
1526}
1527
1528/* Returns true if step-over info is valid. */
1529
1530static bool
1531step_over_info_valid_p (void)
1532{
1533 return (step_over_info.aspace != nullptr
1534 || stepping_past_nonsteppable_watchpoint ());
1535}
1536
1537\f
1538/* Displaced stepping. */
1539
1540/* In non-stop debugging mode, we must take special care to manage
1541 breakpoints properly; in particular, the traditional strategy for
1542 stepping a thread past a breakpoint it has hit is unsuitable.
1543 'Displaced stepping' is a tactic for stepping one thread past a
1544 breakpoint it has hit while ensuring that other threads running
1545 concurrently will hit the breakpoint as they should.
1546
1547 The traditional way to step a thread T off a breakpoint in a
1548 multi-threaded program in all-stop mode is as follows:
1549
1550 a0) Initially, all threads are stopped, and breakpoints are not
1551 inserted.
1552 a1) We single-step T, leaving breakpoints uninserted.
1553 a2) We insert breakpoints, and resume all threads.
1554
1555 In non-stop debugging, however, this strategy is unsuitable: we
1556 don't want to have to stop all threads in the system in order to
1557 continue or step T past a breakpoint. Instead, we use displaced
1558 stepping:
1559
1560 n0) Initially, T is stopped, other threads are running, and
1561 breakpoints are inserted.
1562 n1) We copy the instruction "under" the breakpoint to a separate
1563 location, outside the main code stream, making any adjustments
1564 to the instruction, register, and memory state as directed by
1565 T's architecture.
1566 n2) We single-step T over the instruction at its new location.
1567 n3) We adjust the resulting register and memory state as directed
1568 by T's architecture. This includes resetting T's PC to point
1569 back into the main instruction stream.
1570 n4) We resume T.
1571
1572 This approach depends on the following gdbarch methods:
1573
1574 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1575 indicate where to copy the instruction, and how much space must
1576 be reserved there. We use these in step n1.
1577
1578 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1579 address, and makes any necessary adjustments to the instruction,
1580 register contents, and memory. We use this in step n1.
1581
1582 - gdbarch_displaced_step_fixup adjusts registers and memory after
1583 we have successfully single-stepped the instruction, to yield the
1584 same effect the instruction would have had if we had executed it
1585 at its original address. We use this in step n3.
1586
1587 The gdbarch_displaced_step_copy_insn and
1588 gdbarch_displaced_step_fixup functions must be written so that
1589 copying an instruction with gdbarch_displaced_step_copy_insn,
1590 single-stepping across the copied instruction, and then applying
1591 gdbarch_displaced_insn_fixup should have the same effects on the
1592 thread's memory and registers as stepping the instruction in place
1593 would have. Exactly which responsibilities fall to the copy and
1594 which fall to the fixup is up to the author of those functions.
1595
1596 See the comments in gdbarch.sh for details.
1597
1598 Note that displaced stepping and software single-step cannot
1599 currently be used in combination, although with some care I think
1600 they could be made to. Software single-step works by placing
1601 breakpoints on all possible subsequent instructions; if the
1602 displaced instruction is a PC-relative jump, those breakpoints
1603 could fall in very strange places --- on pages that aren't
1604 executable, or at addresses that are not proper instruction
1605 boundaries. (We do generally let other threads run while we wait
1606 to hit the software single-step breakpoint, and they might
1607 encounter such a corrupted instruction.) One way to work around
1608 this would be to have gdbarch_displaced_step_copy_insn fully
1609 simulate the effect of PC-relative instructions (and return NULL)
1610 on architectures that use software single-stepping.
1611
1612 In non-stop mode, we can have independent and simultaneous step
1613 requests, so more than one thread may need to simultaneously step
1614 over a breakpoint. The current implementation assumes there is
1615 only one scratch space per process. In this case, we have to
1616 serialize access to the scratch space. If thread A wants to step
1617 over a breakpoint, but we are currently waiting for some other
1618 thread to complete a displaced step, we leave thread A stopped and
1619 place it in the displaced_step_request_queue. Whenever a displaced
1620 step finishes, we pick the next thread in the queue and start a new
1621 displaced step operation on it. See displaced_step_prepare and
1622 displaced_step_finish for details. */
1623
1624/* Return true if THREAD is doing a displaced step. */
1625
1626static bool
1627displaced_step_in_progress_thread (thread_info *thread)
1628{
1629 gdb_assert (thread != nullptr);
1630
1631 return thread->displaced_step_state.in_progress ();
1632}
1633
1634/* Return true if INF has a thread doing a displaced step. */
1635
1636static bool
1637displaced_step_in_progress (inferior *inf)
1638{
1639 return inf->displaced_step_state.in_progress_count > 0;
1640}
1641
1642/* Return true if any thread is doing a displaced step. */
1643
1644static bool
1645displaced_step_in_progress_any_thread ()
1646{
1647 for (inferior *inf : all_non_exited_inferiors ())
1648 {
1649 if (displaced_step_in_progress (inf))
1650 return true;
1651 }
1652
1653 return false;
1654}
1655
1656static void
1657infrun_inferior_exit (struct inferior *inf)
1658{
1659 inf->displaced_step_state.reset ();
1660 inf->thread_waiting_for_vfork_done = nullptr;
1661}
1662
1663static void
1664infrun_inferior_execd (inferior *exec_inf, inferior *follow_inf)
1665{
1666 /* If some threads where was doing a displaced step in this inferior at the
1667 moment of the exec, they no longer exist. Even if the exec'ing thread
1668 doing a displaced step, we don't want to to any fixup nor restore displaced
1669 stepping buffer bytes. */
1670 follow_inf->displaced_step_state.reset ();
1671
1672 for (thread_info *thread : follow_inf->threads ())
1673 thread->displaced_step_state.reset ();
1674
1675 /* Since an in-line step is done with everything else stopped, if there was
1676 one in progress at the time of the exec, it must have been the exec'ing
1677 thread. */
1678 clear_step_over_info ();
1679
1680 follow_inf->thread_waiting_for_vfork_done = nullptr;
1681}
1682
1683/* If ON, and the architecture supports it, GDB will use displaced
1684 stepping to step over breakpoints. If OFF, or if the architecture
1685 doesn't support it, GDB will instead use the traditional
1686 hold-and-step approach. If AUTO (which is the default), GDB will
1687 decide which technique to use to step over breakpoints depending on
1688 whether the target works in a non-stop way (see use_displaced_stepping). */
1689
1690static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1691
1692static void
1693show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1694 struct cmd_list_element *c,
1695 const char *value)
1696{
1697 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1698 gdb_printf (file,
1699 _("Debugger's willingness to use displaced stepping "
1700 "to step over breakpoints is %s (currently %s).\n"),
1701 value, target_is_non_stop_p () ? "on" : "off");
1702 else
1703 gdb_printf (file,
1704 _("Debugger's willingness to use displaced stepping "
1705 "to step over breakpoints is %s.\n"), value);
1706}
1707
1708/* Return true if the target behind THREAD supports displaced stepping. */
1709
1710static bool
1711target_supports_displaced_stepping (thread_info *thread)
1712{
1713 return thread->inf->top_target ()->supports_displaced_step (thread);
1714}
1715
1716/* Return non-zero if displaced stepping can/should be used to step
1717 over breakpoints of thread TP. */
1718
1719static bool
1720use_displaced_stepping (thread_info *tp)
1721{
1722 /* If the user disabled it explicitly, don't use displaced stepping. */
1723 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1724 return false;
1725
1726 /* If "auto", only use displaced stepping if the target operates in a non-stop
1727 way. */
1728 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1729 && !target_is_non_stop_p ())
1730 return false;
1731
1732 /* If the target doesn't support displaced stepping, don't use it. */
1733 if (!target_supports_displaced_stepping (tp))
1734 return false;
1735
1736 /* If recording, don't use displaced stepping. */
1737 if (find_record_target () != nullptr)
1738 return false;
1739
1740 /* If displaced stepping failed before for this inferior, don't bother trying
1741 again. */
1742 if (tp->inf->displaced_step_state.failed_before)
1743 return false;
1744
1745 return true;
1746}
1747
1748/* Simple function wrapper around displaced_step_thread_state::reset. */
1749
1750static void
1751displaced_step_reset (displaced_step_thread_state *displaced)
1752{
1753 displaced->reset ();
1754}
1755
1756/* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1757 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1758
1759using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
1760
1761/* Prepare to single-step, using displaced stepping.
1762
1763 Note that we cannot use displaced stepping when we have a signal to
1764 deliver. If we have a signal to deliver and an instruction to step
1765 over, then after the step, there will be no indication from the
1766 target whether the thread entered a signal handler or ignored the
1767 signal and stepped over the instruction successfully --- both cases
1768 result in a simple SIGTRAP. In the first case we mustn't do a
1769 fixup, and in the second case we must --- but we can't tell which.
1770 Comments in the code for 'random signals' in handle_inferior_event
1771 explain how we handle this case instead.
1772
1773 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1774 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1775 if displaced stepping this thread got queued; or
1776 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1777 stepped. */
1778
1779static displaced_step_prepare_status
1780displaced_step_prepare_throw (thread_info *tp)
1781{
1782 regcache *regcache = get_thread_regcache (tp);
1783 struct gdbarch *gdbarch = regcache->arch ();
1784 displaced_step_thread_state &disp_step_thread_state
1785 = tp->displaced_step_state;
1786
1787 /* We should never reach this function if the target does not
1788 support displaced stepping. */
1789 gdb_assert (target_supports_displaced_stepping (tp));
1790
1791 /* Nor if the thread isn't meant to step over a breakpoint. */
1792 gdb_assert (tp->control.trap_expected);
1793
1794 /* Disable range stepping while executing in the scratch pad. We
1795 want a single-step even if executing the displaced instruction in
1796 the scratch buffer lands within the stepping range (e.g., a
1797 jump/branch). */
1798 tp->control.may_range_step = 0;
1799
1800 /* We are about to start a displaced step for this thread. If one is already
1801 in progress, something's wrong. */
1802 gdb_assert (!disp_step_thread_state.in_progress ());
1803
1804 if (tp->inf->displaced_step_state.unavailable)
1805 {
1806 /* The gdbarch tells us it's not worth asking to try a prepare because
1807 it is likely that it will return unavailable, so don't bother asking. */
1808
1809 displaced_debug_printf ("deferring step of %s",
1810 tp->ptid.to_string ().c_str ());
1811
1812 global_thread_step_over_chain_enqueue (tp);
1813 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1814 }
1815
1816 displaced_debug_printf ("displaced-stepping %s now",
1817 tp->ptid.to_string ().c_str ());
1818
1819 scoped_restore_current_thread restore_thread;
1820
1821 switch_to_thread (tp);
1822
1823 CORE_ADDR original_pc = regcache_read_pc (regcache);
1824 CORE_ADDR displaced_pc;
1825
1826 /* Display the instruction we are going to displaced step. */
1827 if (debug_displaced)
1828 {
1829 string_file tmp_stream;
1830 int dislen = gdb_print_insn (gdbarch, original_pc, &tmp_stream,
1831 nullptr);
1832
1833 if (dislen > 0)
1834 {
1835 gdb::byte_vector insn_buf (dislen);
1836 read_memory (original_pc, insn_buf.data (), insn_buf.size ());
1837
1838 std::string insn_bytes = bytes_to_string (insn_buf);
1839
1840 displaced_debug_printf ("original insn %s: %s \t %s",
1841 paddress (gdbarch, original_pc),
1842 insn_bytes.c_str (),
1843 tmp_stream.string ().c_str ());
1844 }
1845 else
1846 displaced_debug_printf ("original insn %s: invalid length: %d",
1847 paddress (gdbarch, original_pc), dislen);
1848 }
1849
1850 auto status
1851 = tp->inf->top_target ()->displaced_step_prepare (tp, displaced_pc);
1852
1853 if (status == DISPLACED_STEP_PREPARE_STATUS_CANT)
1854 {
1855 displaced_debug_printf ("failed to prepare (%s)",
1856 tp->ptid.to_string ().c_str ());
1857
1858 return DISPLACED_STEP_PREPARE_STATUS_CANT;
1859 }
1860 else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
1861 {
1862 /* Not enough displaced stepping resources available, defer this
1863 request by placing it the queue. */
1864
1865 displaced_debug_printf ("not enough resources available, "
1866 "deferring step of %s",
1867 tp->ptid.to_string ().c_str ());
1868
1869 global_thread_step_over_chain_enqueue (tp);
1870
1871 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1872 }
1873
1874 gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK);
1875
1876 /* Save the information we need to fix things up if the step
1877 succeeds. */
1878 disp_step_thread_state.set (gdbarch);
1879
1880 tp->inf->displaced_step_state.in_progress_count++;
1881
1882 displaced_debug_printf ("prepared successfully thread=%s, "
1883 "original_pc=%s, displaced_pc=%s",
1884 tp->ptid.to_string ().c_str (),
1885 paddress (gdbarch, original_pc),
1886 paddress (gdbarch, displaced_pc));
1887
1888 /* Display the new displaced instruction(s). */
1889 if (debug_displaced)
1890 {
1891 string_file tmp_stream;
1892 CORE_ADDR addr = displaced_pc;
1893
1894 /* If displaced stepping is going to use h/w single step then we know
1895 that the replacement instruction can only be a single instruction,
1896 in that case set the end address at the next byte.
1897
1898 Otherwise the displaced stepping copy instruction routine could
1899 have generated multiple instructions, and all we know is that they
1900 must fit within the LEN bytes of the buffer. */
1901 CORE_ADDR end
1902 = addr + (gdbarch_displaced_step_hw_singlestep (gdbarch)
1903 ? 1 : gdbarch_displaced_step_buffer_length (gdbarch));
1904
1905 while (addr < end)
1906 {
1907 int dislen = gdb_print_insn (gdbarch, addr, &tmp_stream, nullptr);
1908 if (dislen <= 0)
1909 {
1910 displaced_debug_printf
1911 ("replacement insn %s: invalid length: %d",
1912 paddress (gdbarch, addr), dislen);
1913 break;
1914 }
1915
1916 gdb::byte_vector insn_buf (dislen);
1917 read_memory (addr, insn_buf.data (), insn_buf.size ());
1918
1919 std::string insn_bytes = bytes_to_string (insn_buf);
1920 std::string insn_str = tmp_stream.release ();
1921 displaced_debug_printf ("replacement insn %s: %s \t %s",
1922 paddress (gdbarch, addr),
1923 insn_bytes.c_str (),
1924 insn_str.c_str ());
1925 addr += dislen;
1926 }
1927 }
1928
1929 return DISPLACED_STEP_PREPARE_STATUS_OK;
1930}
1931
1932/* Wrapper for displaced_step_prepare_throw that disabled further
1933 attempts at displaced stepping if we get a memory error. */
1934
1935static displaced_step_prepare_status
1936displaced_step_prepare (thread_info *thread)
1937{
1938 displaced_step_prepare_status status
1939 = DISPLACED_STEP_PREPARE_STATUS_CANT;
1940
1941 try
1942 {
1943 status = displaced_step_prepare_throw (thread);
1944 }
1945 catch (const gdb_exception_error &ex)
1946 {
1947 if (ex.error != MEMORY_ERROR
1948 && ex.error != NOT_SUPPORTED_ERROR)
1949 throw;
1950
1951 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1952 ex.what ());
1953
1954 /* Be verbose if "set displaced-stepping" is "on", silent if
1955 "auto". */
1956 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1957 {
1958 warning (_("disabling displaced stepping: %s"),
1959 ex.what ());
1960 }
1961
1962 /* Disable further displaced stepping attempts. */
1963 thread->inf->displaced_step_state.failed_before = 1;
1964 }
1965
1966 return status;
1967}
1968
1969/* True if any thread of TARGET that matches RESUME_PTID requires
1970 target_thread_events enabled. This assumes TARGET does not support
1971 target thread options. */
1972
1973static bool
1974any_thread_needs_target_thread_events (process_stratum_target *target,
1975 ptid_t resume_ptid)
1976{
1977 for (thread_info *tp : all_non_exited_threads (target, resume_ptid))
1978 if (displaced_step_in_progress_thread (tp)
1979 || schedlock_applies (tp)
1980 || tp->thread_fsm () != nullptr)
1981 return true;
1982 return false;
1983}
1984
1985/* Maybe disable thread-{cloned,created,exited} event reporting after
1986 a step-over (either in-line or displaced) finishes. */
1987
1988static void
1989update_thread_events_after_step_over (thread_info *event_thread,
1990 const target_waitstatus &event_status)
1991{
1992 if (schedlock_applies (event_thread))
1993 {
1994 /* If scheduler-locking applies, continue reporting
1995 thread-created/thread-cloned events. */
1996 return;
1997 }
1998 else if (target_supports_set_thread_options (0))
1999 {
2000 /* We can control per-thread options. Disable events for the
2001 event thread, unless the thread is gone. */
2002 if (event_status.kind () != TARGET_WAITKIND_THREAD_EXITED)
2003 event_thread->set_thread_options (0);
2004 }
2005 else
2006 {
2007 /* We can only control the target-wide target_thread_events
2008 setting. Disable it, but only if other threads in the target
2009 don't need it enabled. */
2010 process_stratum_target *target = event_thread->inf->process_target ();
2011 if (!any_thread_needs_target_thread_events (target, minus_one_ptid))
2012 target_thread_events (false);
2013 }
2014}
2015
2016/* If we displaced stepped an instruction successfully, adjust registers and
2017 memory to yield the same effect the instruction would have had if we had
2018 executed it at its original address, and return
2019 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
2020 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
2021
2022 If the thread wasn't displaced stepping, return
2023 DISPLACED_STEP_FINISH_STATUS_OK as well. */
2024
2025static displaced_step_finish_status
2026displaced_step_finish (thread_info *event_thread,
2027 const target_waitstatus &event_status)
2028{
2029 /* Check whether the parent is displaced stepping. */
2030 inferior *parent_inf = event_thread->inf;
2031 target_ops *top_target = parent_inf->top_target ();
2032
2033 /* If this was a fork/vfork/clone, this event indicates that the
2034 displaced stepping of the syscall instruction has been done, so
2035 we perform cleanup for parent here. Also note that this
2036 operation also cleans up the child for vfork, because their pages
2037 are shared. */
2038
2039 /* If this is a fork (child gets its own address space copy) and
2040 some displaced step buffers were in use at the time of the fork,
2041 restore the displaced step buffer bytes in the child process.
2042
2043 Architectures which support displaced stepping and fork events
2044 must supply an implementation of
2045 gdbarch_displaced_step_restore_all_in_ptid. This is not enforced
2046 during gdbarch validation to support architectures which support
2047 displaced stepping but not forks. */
2048 if (event_status.kind () == TARGET_WAITKIND_FORKED
2049 && target_supports_displaced_stepping (event_thread))
2050 top_target->displaced_step_restore_all_in_ptid
2051 (parent_inf, event_status.child_ptid ());
2052
2053 displaced_step_thread_state *displaced = &event_thread->displaced_step_state;
2054
2055 /* Was this thread performing a displaced step? */
2056 if (!displaced->in_progress ())
2057 return DISPLACED_STEP_FINISH_STATUS_OK;
2058
2059 update_thread_events_after_step_over (event_thread, event_status);
2060
2061 gdb_assert (event_thread->inf->displaced_step_state.in_progress_count > 0);
2062 event_thread->inf->displaced_step_state.in_progress_count--;
2063
2064 /* Fixup may need to read memory/registers. Switch to the thread
2065 that we're fixing up. Also, target_stopped_by_watchpoint checks
2066 the current thread, and displaced_step_restore performs ptid-dependent
2067 memory accesses using current_inferior(). */
2068 switch_to_thread (event_thread);
2069
2070 displaced_step_reset_cleanup cleanup (displaced);
2071
2072 /* Do the fixup, and release the resources acquired to do the displaced
2073 step. */
2074 auto status = top_target->displaced_step_finish (event_thread, event_status);
2075
2076 if (event_status.kind () == TARGET_WAITKIND_FORKED
2077 || event_status.kind () == TARGET_WAITKIND_VFORKED
2078 || event_status.kind () == TARGET_WAITKIND_THREAD_CLONED)
2079 {
2080 /* Since the vfork/fork/clone syscall instruction was executed
2081 in the scratchpad, the child's PC is also within the
2082 scratchpad. Set the child's PC to the parent's PC value,
2083 which has already been fixed up. Note: we use the parent's
2084 aspace here, although we're touching the child, because the
2085 child hasn't been added to the inferior list yet at this
2086 point. */
2087
2088 struct regcache *parent_regcache = get_thread_regcache (event_thread);
2089 struct gdbarch *gdbarch = parent_regcache->arch ();
2090 struct regcache *child_regcache
2091 = get_thread_arch_regcache (parent_inf, event_status.child_ptid (),
2092 gdbarch);
2093 /* Read PC value of parent. */
2094 CORE_ADDR parent_pc = regcache_read_pc (parent_regcache);
2095
2096 displaced_debug_printf ("write child pc from %s to %s",
2097 paddress (gdbarch,
2098 regcache_read_pc (child_regcache)),
2099 paddress (gdbarch, parent_pc));
2100
2101 regcache_write_pc (child_regcache, parent_pc);
2102 }
2103
2104 return status;
2105}
2106
2107/* Data to be passed around while handling an event. This data is
2108 discarded between events. */
2109struct execution_control_state
2110{
2111 explicit execution_control_state (thread_info *thr = nullptr)
2112 : ptid (thr == nullptr ? null_ptid : thr->ptid),
2113 event_thread (thr)
2114 {
2115 }
2116
2117 process_stratum_target *target = nullptr;
2118 ptid_t ptid;
2119 /* The thread that got the event, if this was a thread event; NULL
2120 otherwise. */
2121 struct thread_info *event_thread;
2122
2123 struct target_waitstatus ws;
2124 int stop_func_filled_in = 0;
2125 CORE_ADDR stop_func_alt_start = 0;
2126 CORE_ADDR stop_func_start = 0;
2127 CORE_ADDR stop_func_end = 0;
2128 const char *stop_func_name = nullptr;
2129 int wait_some_more = 0;
2130
2131 /* True if the event thread hit the single-step breakpoint of
2132 another thread. Thus the event doesn't cause a stop, the thread
2133 needs to be single-stepped past the single-step breakpoint before
2134 we can switch back to the original stepping thread. */
2135 int hit_singlestep_breakpoint = 0;
2136};
2137
2138static void keep_going_pass_signal (struct execution_control_state *ecs);
2139static void prepare_to_wait (struct execution_control_state *ecs);
2140static bool keep_going_stepped_thread (struct thread_info *tp);
2141static step_over_what thread_still_needs_step_over (struct thread_info *tp);
2142
2143/* Are there any pending step-over requests? If so, run all we can
2144 now and return true. Otherwise, return false. */
2145
2146static bool
2147start_step_over (void)
2148{
2149 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
2150
2151 /* Don't start a new step-over if we already have an in-line
2152 step-over operation ongoing. */
2153 if (step_over_info_valid_p ())
2154 return false;
2155
2156 /* Steal the global thread step over chain. As we try to initiate displaced
2157 steps, threads will be enqueued in the global chain if no buffers are
2158 available. If we iterated on the global chain directly, we might iterate
2159 indefinitely. */
2160 thread_step_over_list threads_to_step
2161 = std::move (global_thread_step_over_list);
2162
2163 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
2164 thread_step_over_chain_length (threads_to_step));
2165
2166 bool started = false;
2167
2168 /* On scope exit (whatever the reason, return or exception), if there are
2169 threads left in the THREADS_TO_STEP chain, put back these threads in the
2170 global list. */
2171 SCOPE_EXIT
2172 {
2173 if (threads_to_step.empty ())
2174 infrun_debug_printf ("step-over queue now empty");
2175 else
2176 {
2177 infrun_debug_printf ("putting back %d threads to step in global queue",
2178 thread_step_over_chain_length (threads_to_step));
2179
2180 global_thread_step_over_chain_enqueue_chain
2181 (std::move (threads_to_step));
2182 }
2183 };
2184
2185 thread_step_over_list_safe_range range
2186 = make_thread_step_over_list_safe_range (threads_to_step);
2187
2188 for (thread_info *tp : range)
2189 {
2190 step_over_what step_what;
2191 int must_be_in_line;
2192
2193 gdb_assert (!tp->stop_requested);
2194
2195 if (tp->inf->displaced_step_state.unavailable)
2196 {
2197 /* The arch told us to not even try preparing another displaced step
2198 for this inferior. Just leave the thread in THREADS_TO_STEP, it
2199 will get moved to the global chain on scope exit. */
2200 continue;
2201 }
2202
2203 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
2204 {
2205 /* When we stop all threads, handling a vfork, any thread in the step
2206 over chain remains there. A user could also try to continue a
2207 thread stopped at a breakpoint while another thread is waiting for
2208 a vfork-done event. In any case, we don't want to start a step
2209 over right now. */
2210 continue;
2211 }
2212
2213 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
2214 while we try to prepare the displaced step, we don't add it back to
2215 the global step over chain. This is to avoid a thread staying in the
2216 step over chain indefinitely if something goes wrong when resuming it
2217 If the error is intermittent and it still needs a step over, it will
2218 get enqueued again when we try to resume it normally. */
2219 threads_to_step.erase (threads_to_step.iterator_to (*tp));
2220
2221 step_what = thread_still_needs_step_over (tp);
2222 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
2223 || ((step_what & STEP_OVER_BREAKPOINT)
2224 && !use_displaced_stepping (tp)));
2225
2226 /* We currently stop all threads of all processes to step-over
2227 in-line. If we need to start a new in-line step-over, let
2228 any pending displaced steps finish first. */
2229 if (must_be_in_line && displaced_step_in_progress_any_thread ())
2230 {
2231 global_thread_step_over_chain_enqueue (tp);
2232 continue;
2233 }
2234
2235 if (tp->control.trap_expected
2236 || tp->resumed ()
2237 || tp->executing ())
2238 {
2239 internal_error ("[%s] has inconsistent state: "
2240 "trap_expected=%d, resumed=%d, executing=%d\n",
2241 tp->ptid.to_string ().c_str (),
2242 tp->control.trap_expected,
2243 tp->resumed (),
2244 tp->executing ());
2245 }
2246
2247 infrun_debug_printf ("resuming [%s] for step-over",
2248 tp->ptid.to_string ().c_str ());
2249
2250 /* keep_going_pass_signal skips the step-over if the breakpoint
2251 is no longer inserted. In all-stop, we want to keep looking
2252 for a thread that needs a step-over instead of resuming TP,
2253 because we wouldn't be able to resume anything else until the
2254 target stops again. In non-stop, the resume always resumes
2255 only TP, so it's OK to let the thread resume freely. */
2256 if (!target_is_non_stop_p () && !step_what)
2257 continue;
2258
2259 switch_to_thread (tp);
2260 execution_control_state ecs (tp);
2261 keep_going_pass_signal (&ecs);
2262
2263 if (!ecs.wait_some_more)
2264 error (_("Command aborted."));
2265
2266 /* If the thread's step over could not be initiated because no buffers
2267 were available, it was re-added to the global step over chain. */
2268 if (tp->resumed ())
2269 {
2270 infrun_debug_printf ("[%s] was resumed.",
2271 tp->ptid.to_string ().c_str ());
2272 gdb_assert (!thread_is_in_step_over_chain (tp));
2273 }
2274 else
2275 {
2276 infrun_debug_printf ("[%s] was NOT resumed.",
2277 tp->ptid.to_string ().c_str ());
2278 gdb_assert (thread_is_in_step_over_chain (tp));
2279 }
2280
2281 /* If we started a new in-line step-over, we're done. */
2282 if (step_over_info_valid_p ())
2283 {
2284 gdb_assert (tp->control.trap_expected);
2285 started = true;
2286 break;
2287 }
2288
2289 if (!target_is_non_stop_p ())
2290 {
2291 /* On all-stop, shouldn't have resumed unless we needed a
2292 step over. */
2293 gdb_assert (tp->control.trap_expected
2294 || tp->step_after_step_resume_breakpoint);
2295
2296 /* With remote targets (at least), in all-stop, we can't
2297 issue any further remote commands until the program stops
2298 again. */
2299 started = true;
2300 break;
2301 }
2302
2303 /* Either the thread no longer needed a step-over, or a new
2304 displaced stepping sequence started. Even in the latter
2305 case, continue looking. Maybe we can also start another
2306 displaced step on a thread of other process. */
2307 }
2308
2309 return started;
2310}
2311
2312/* Update global variables holding ptids to hold NEW_PTID if they were
2313 holding OLD_PTID. */
2314static void
2315infrun_thread_ptid_changed (process_stratum_target *target,
2316 ptid_t old_ptid, ptid_t new_ptid)
2317{
2318 if (inferior_ptid == old_ptid
2319 && current_inferior ()->process_target () == target)
2320 inferior_ptid = new_ptid;
2321}
2322
2323\f
2324
2325static const char schedlock_off[] = "off";
2326static const char schedlock_on[] = "on";
2327static const char schedlock_step[] = "step";
2328static const char schedlock_replay[] = "replay";
2329static const char *const scheduler_enums[] = {
2330 schedlock_off,
2331 schedlock_on,
2332 schedlock_step,
2333 schedlock_replay,
2334 nullptr
2335};
2336static const char *scheduler_mode = schedlock_replay;
2337static void
2338show_scheduler_mode (struct ui_file *file, int from_tty,
2339 struct cmd_list_element *c, const char *value)
2340{
2341 gdb_printf (file,
2342 _("Mode for locking scheduler "
2343 "during execution is \"%s\".\n"),
2344 value);
2345}
2346
2347static void
2348set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
2349{
2350 if (!target_can_lock_scheduler ())
2351 {
2352 scheduler_mode = schedlock_off;
2353 error (_("Target '%s' cannot support this command."),
2354 target_shortname ());
2355 }
2356}
2357
2358/* True if execution commands resume all threads of all processes by
2359 default; otherwise, resume only threads of the current inferior
2360 process. */
2361bool sched_multi = false;
2362
2363/* Try to setup for software single stepping. Return true if target_resume()
2364 should use hardware single step.
2365
2366 GDBARCH the current gdbarch. */
2367
2368static bool
2369maybe_software_singlestep (struct gdbarch *gdbarch)
2370{
2371 bool hw_step = true;
2372
2373 if (execution_direction == EXEC_FORWARD
2374 && gdbarch_software_single_step_p (gdbarch))
2375 hw_step = !insert_single_step_breakpoints (gdbarch);
2376
2377 return hw_step;
2378}
2379
2380/* See infrun.h. */
2381
2382ptid_t
2383user_visible_resume_ptid (int step)
2384{
2385 ptid_t resume_ptid;
2386
2387 if (non_stop)
2388 {
2389 /* With non-stop mode on, threads are always handled
2390 individually. */
2391 resume_ptid = inferior_ptid;
2392 }
2393 else if ((scheduler_mode == schedlock_on)
2394 || (scheduler_mode == schedlock_step && step))
2395 {
2396 /* User-settable 'scheduler' mode requires solo thread
2397 resume. */
2398 resume_ptid = inferior_ptid;
2399 }
2400 else if ((scheduler_mode == schedlock_replay)
2401 && target_record_will_replay (minus_one_ptid, execution_direction))
2402 {
2403 /* User-settable 'scheduler' mode requires solo thread resume in replay
2404 mode. */
2405 resume_ptid = inferior_ptid;
2406 }
2407 else if (inferior_ptid != null_ptid
2408 && inferior_thread ()->control.in_cond_eval)
2409 {
2410 /* The inferior thread is evaluating a BP condition. Other threads
2411 might be stopped or running and we do not want to change their
2412 state, thus, resume only the current thread. */
2413 resume_ptid = inferior_ptid;
2414 }
2415 else if (!sched_multi && target_supports_multi_process ())
2416 {
2417 /* Resume all threads of the current process (and none of other
2418 processes). */
2419 resume_ptid = ptid_t (inferior_ptid.pid ());
2420 }
2421 else
2422 {
2423 /* Resume all threads of all processes. */
2424 resume_ptid = RESUME_ALL;
2425 }
2426
2427 return resume_ptid;
2428}
2429
2430/* See infrun.h. */
2431
2432process_stratum_target *
2433user_visible_resume_target (ptid_t resume_ptid)
2434{
2435 return (resume_ptid == minus_one_ptid && sched_multi
2436 ? nullptr
2437 : current_inferior ()->process_target ());
2438}
2439
2440/* Find a thread from the inferiors that we'll resume that is waiting
2441 for a vfork-done event. */
2442
2443static thread_info *
2444find_thread_waiting_for_vfork_done ()
2445{
2446 gdb_assert (!target_is_non_stop_p ());
2447
2448 if (sched_multi)
2449 {
2450 for (inferior *inf : all_non_exited_inferiors ())
2451 if (inf->thread_waiting_for_vfork_done != nullptr)
2452 return inf->thread_waiting_for_vfork_done;
2453 }
2454 else
2455 {
2456 inferior *cur_inf = current_inferior ();
2457 if (cur_inf->thread_waiting_for_vfork_done != nullptr)
2458 return cur_inf->thread_waiting_for_vfork_done;
2459 }
2460 return nullptr;
2461}
2462
2463/* Return a ptid representing the set of threads that we will resume,
2464 in the perspective of the target, assuming run control handling
2465 does not require leaving some threads stopped (e.g., stepping past
2466 breakpoint). USER_STEP indicates whether we're about to start the
2467 target for a stepping command. */
2468
2469static ptid_t
2470internal_resume_ptid (int user_step)
2471{
2472 /* In non-stop, we always control threads individually. Note that
2473 the target may always work in non-stop mode even with "set
2474 non-stop off", in which case user_visible_resume_ptid could
2475 return a wildcard ptid. */
2476 if (target_is_non_stop_p ())
2477 return inferior_ptid;
2478
2479 /* The rest of the function assumes non-stop==off and
2480 target-non-stop==off.
2481
2482 If a thread is waiting for a vfork-done event, it means breakpoints are out
2483 for this inferior (well, program space in fact). We don't want to resume
2484 any thread other than the one waiting for vfork done, otherwise these other
2485 threads could miss breakpoints. So if a thread in the resumption set is
2486 waiting for a vfork-done event, resume only that thread.
2487
2488 The resumption set width depends on whether schedule-multiple is on or off.
2489
2490 Note that if the target_resume interface was more flexible, we could be
2491 smarter here when schedule-multiple is on. For example, imagine 3
2492 inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
2493 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
2494 target(s) to resume:
2495
2496 - All threads of inferior 1
2497 - Thread 2.1
2498 - Thread 3.2
2499
2500 Since we don't have that flexibility (we can only pass one ptid), just
2501 resume the first thread waiting for a vfork-done event we find (e.g. thread
2502 2.1). */
2503 thread_info *thr = find_thread_waiting_for_vfork_done ();
2504 if (thr != nullptr)
2505 {
2506 /* If we have a thread that is waiting for a vfork-done event,
2507 then we should have switched to it earlier. Calling
2508 target_resume with thread scope is only possible when the
2509 current thread matches the thread scope. */
2510 gdb_assert (thr->ptid == inferior_ptid);
2511 gdb_assert (thr->inf->process_target ()
2512 == inferior_thread ()->inf->process_target ());
2513 return thr->ptid;
2514 }
2515
2516 return user_visible_resume_ptid (user_step);
2517}
2518
2519/* Wrapper for target_resume, that handles infrun-specific
2520 bookkeeping. */
2521
2522static void
2523do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
2524{
2525 struct thread_info *tp = inferior_thread ();
2526
2527 gdb_assert (!tp->stop_requested);
2528
2529 /* Install inferior's terminal modes. */
2530 target_terminal::inferior ();
2531
2532 /* Avoid confusing the next resume, if the next stop/resume
2533 happens to apply to another thread. */
2534 tp->set_stop_signal (GDB_SIGNAL_0);
2535
2536 /* Advise target which signals may be handled silently.
2537
2538 If we have removed breakpoints because we are stepping over one
2539 in-line (in any thread), we need to receive all signals to avoid
2540 accidentally skipping a breakpoint during execution of a signal
2541 handler.
2542
2543 Likewise if we're displaced stepping, otherwise a trap for a
2544 breakpoint in a signal handler might be confused with the
2545 displaced step finishing. We don't make the displaced_step_finish
2546 step distinguish the cases instead, because:
2547
2548 - a backtrace while stopped in the signal handler would show the
2549 scratch pad as frame older than the signal handler, instead of
2550 the real mainline code.
2551
2552 - when the thread is later resumed, the signal handler would
2553 return to the scratch pad area, which would no longer be
2554 valid. */
2555 if (step_over_info_valid_p ()
2556 || displaced_step_in_progress (tp->inf))
2557 target_pass_signals ({});
2558 else
2559 target_pass_signals (signal_pass);
2560
2561 /* Request that the target report thread-{created,cloned,exited}
2562 events in the following situations:
2563
2564 - If we are performing an in-line step-over-breakpoint, then we
2565 will remove a breakpoint from the target and only run the
2566 current thread. We don't want any new thread (spawned by the
2567 step) to start running, as it might miss the breakpoint. We
2568 need to clear the step-over state if the stepped thread exits,
2569 so we also enable thread-exit events.
2570
2571 - If we are stepping over a breakpoint out of line (displaced
2572 stepping) then we won't remove a breakpoint from the target,
2573 but, if the step spawns a new clone thread, then we will need
2574 to fixup the $pc address in the clone child too, so we need it
2575 to start stopped. We need to release the displaced stepping
2576 buffer if the stepped thread exits, so we also enable
2577 thread-exit events.
2578
2579 - If scheduler-locking applies, threads that the current thread
2580 spawns should remain halted. It's not strictly necessary to
2581 enable thread-exit events in this case, but it doesn't hurt.
2582 */
2583 if (step_over_info_valid_p ()
2584 || displaced_step_in_progress_thread (tp)
2585 || schedlock_applies (tp))
2586 {
2587 gdb_thread_options options
2588 = GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
2589 if (target_supports_set_thread_options (options))
2590 tp->set_thread_options (options);
2591 else
2592 target_thread_events (true);
2593 }
2594 else if (tp->thread_fsm () != nullptr)
2595 {
2596 gdb_thread_options options = GDB_THREAD_OPTION_EXIT;
2597 if (target_supports_set_thread_options (options))
2598 tp->set_thread_options (options);
2599 else
2600 target_thread_events (true);
2601 }
2602 else
2603 {
2604 if (target_supports_set_thread_options (0))
2605 tp->set_thread_options (0);
2606 else
2607 {
2608 process_stratum_target *resume_target = tp->inf->process_target ();
2609 if (!any_thread_needs_target_thread_events (resume_target,
2610 resume_ptid))
2611 target_thread_events (false);
2612 }
2613 }
2614
2615 /* If we're resuming more than one thread simultaneously, then any
2616 thread other than the leader is being set to run free. Clear any
2617 previous thread option for those threads. */
2618 if (resume_ptid != inferior_ptid && target_supports_set_thread_options (0))
2619 {
2620 process_stratum_target *resume_target = tp->inf->process_target ();
2621 for (thread_info *thr_iter : all_non_exited_threads (resume_target,
2622 resume_ptid))
2623 if (thr_iter != tp)
2624 thr_iter->set_thread_options (0);
2625 }
2626
2627 infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
2628 resume_ptid.to_string ().c_str (),
2629 step, gdb_signal_to_symbol_string (sig));
2630
2631 target_resume (resume_ptid, step, sig);
2632}
2633
2634/* Resume the inferior. SIG is the signal to give the inferior
2635 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2636 call 'resume', which handles exceptions. */
2637
2638static void
2639resume_1 (enum gdb_signal sig)
2640{
2641 struct thread_info *tp = inferior_thread ();
2642 regcache *regcache = get_thread_regcache (tp);
2643 struct gdbarch *gdbarch = regcache->arch ();
2644 ptid_t resume_ptid;
2645 /* This represents the user's step vs continue request. When
2646 deciding whether "set scheduler-locking step" applies, it's the
2647 user's intention that counts. */
2648 const int user_step = tp->control.stepping_command;
2649 /* This represents what we'll actually request the target to do.
2650 This can decay from a step to a continue, if e.g., we need to
2651 implement single-stepping with breakpoints (software
2652 single-step). */
2653 bool step;
2654
2655 gdb_assert (!tp->stop_requested);
2656 gdb_assert (!thread_is_in_step_over_chain (tp));
2657
2658 if (tp->has_pending_waitstatus ())
2659 {
2660 infrun_debug_printf
2661 ("thread %s has pending wait "
2662 "status %s (currently_stepping=%d).",
2663 tp->ptid.to_string ().c_str (),
2664 tp->pending_waitstatus ().to_string ().c_str (),
2665 currently_stepping (tp));
2666
2667 tp->inf->process_target ()->threads_executing = true;
2668 tp->set_resumed (true);
2669
2670 /* FIXME: What should we do if we are supposed to resume this
2671 thread with a signal? Maybe we should maintain a queue of
2672 pending signals to deliver. */
2673 if (sig != GDB_SIGNAL_0)
2674 {
2675 warning (_("Couldn't deliver signal %s to %s."),
2676 gdb_signal_to_name (sig),
2677 tp->ptid.to_string ().c_str ());
2678 }
2679
2680 tp->set_stop_signal (GDB_SIGNAL_0);
2681
2682 if (target_can_async_p ())
2683 {
2684 target_async (true);
2685 /* Tell the event loop we have an event to process. */
2686 mark_async_event_handler (infrun_async_inferior_event_token);
2687 }
2688 return;
2689 }
2690
2691 tp->stepped_breakpoint = 0;
2692
2693 /* Depends on stepped_breakpoint. */
2694 step = currently_stepping (tp);
2695
2696 if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
2697 {
2698 /* Don't try to single-step a vfork parent that is waiting for
2699 the child to get out of the shared memory region (by exec'ing
2700 or exiting). This is particularly important on software
2701 single-step archs, as the child process would trip on the
2702 software single step breakpoint inserted for the parent
2703 process. Since the parent will not actually execute any
2704 instruction until the child is out of the shared region (such
2705 are vfork's semantics), it is safe to simply continue it.
2706 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2707 the parent, and tell it to `keep_going', which automatically
2708 re-sets it stepping. */
2709 infrun_debug_printf ("resume : clear step");
2710 step = false;
2711 }
2712
2713 CORE_ADDR pc = regcache_read_pc (regcache);
2714
2715 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2716 "current thread [%s] at %s",
2717 step, gdb_signal_to_symbol_string (sig),
2718 tp->control.trap_expected,
2719 inferior_ptid.to_string ().c_str (),
2720 paddress (gdbarch, pc));
2721
2722 const address_space *aspace = tp->inf->aspace.get ();
2723
2724 /* Normally, by the time we reach `resume', the breakpoints are either
2725 removed or inserted, as appropriate. The exception is if we're sitting
2726 at a permanent breakpoint; we need to step over it, but permanent
2727 breakpoints can't be removed. So we have to test for it here. */
2728 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2729 {
2730 if (sig != GDB_SIGNAL_0)
2731 {
2732 /* We have a signal to pass to the inferior. The resume
2733 may, or may not take us to the signal handler. If this
2734 is a step, we'll need to stop in the signal handler, if
2735 there's one, (if the target supports stepping into
2736 handlers), or in the next mainline instruction, if
2737 there's no handler. If this is a continue, we need to be
2738 sure to run the handler with all breakpoints inserted.
2739 In all cases, set a breakpoint at the current address
2740 (where the handler returns to), and once that breakpoint
2741 is hit, resume skipping the permanent breakpoint. If
2742 that breakpoint isn't hit, then we've stepped into the
2743 signal handler (or hit some other event). We'll delete
2744 the step-resume breakpoint then. */
2745
2746 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2747 "deliver signal first");
2748
2749 clear_step_over_info ();
2750 tp->control.trap_expected = 0;
2751
2752 if (tp->control.step_resume_breakpoint == nullptr)
2753 {
2754 /* Set a "high-priority" step-resume, as we don't want
2755 user breakpoints at PC to trigger (again) when this
2756 hits. */
2757 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2758 gdb_assert (tp->control.step_resume_breakpoint->first_loc ()
2759 .permanent);
2760
2761 tp->step_after_step_resume_breakpoint = step;
2762 }
2763
2764 insert_breakpoints ();
2765 }
2766 else
2767 {
2768 /* There's no signal to pass, we can go ahead and skip the
2769 permanent breakpoint manually. */
2770 infrun_debug_printf ("skipping permanent breakpoint");
2771 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2772 /* Update pc to reflect the new address from which we will
2773 execute instructions. */
2774 pc = regcache_read_pc (regcache);
2775
2776 if (step)
2777 {
2778 /* We've already advanced the PC, so the stepping part
2779 is done. Now we need to arrange for a trap to be
2780 reported to handle_inferior_event. Set a breakpoint
2781 at the current PC, and run to it. Don't update
2782 prev_pc, because if we end in
2783 switch_back_to_stepped_thread, we want the "expected
2784 thread advanced also" branch to be taken. IOW, we
2785 don't want this thread to step further from PC
2786 (overstep). */
2787 gdb_assert (!step_over_info_valid_p ());
2788 insert_single_step_breakpoint (gdbarch, aspace, pc);
2789 insert_breakpoints ();
2790
2791 resume_ptid = internal_resume_ptid (user_step);
2792 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
2793 tp->set_resumed (true);
2794 return;
2795 }
2796 }
2797 }
2798
2799 /* If we have a breakpoint to step over, make sure to do a single
2800 step only. Same if we have software watchpoints. */
2801 if (tp->control.trap_expected || bpstat_should_step ())
2802 tp->control.may_range_step = 0;
2803
2804 /* If displaced stepping is enabled, step over breakpoints by executing a
2805 copy of the instruction at a different address.
2806
2807 We can't use displaced stepping when we have a signal to deliver;
2808 the comments for displaced_step_prepare explain why. The
2809 comments in the handle_inferior event for dealing with 'random
2810 signals' explain what we do instead.
2811
2812 We can't use displaced stepping when we are waiting for vfork_done
2813 event, displaced stepping breaks the vfork child similarly as single
2814 step software breakpoint. */
2815 if (tp->control.trap_expected
2816 && use_displaced_stepping (tp)
2817 && !step_over_info_valid_p ()
2818 && sig == GDB_SIGNAL_0
2819 && current_inferior ()->thread_waiting_for_vfork_done == nullptr)
2820 {
2821 displaced_step_prepare_status prepare_status
2822 = displaced_step_prepare (tp);
2823
2824 if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
2825 {
2826 infrun_debug_printf ("Got placed in step-over queue");
2827
2828 tp->control.trap_expected = 0;
2829 return;
2830 }
2831 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_CANT)
2832 {
2833 /* Fallback to stepping over the breakpoint in-line. */
2834
2835 if (target_is_non_stop_p ())
2836 stop_all_threads ("displaced stepping falling back on inline stepping");
2837
2838 set_step_over_info (aspace, regcache_read_pc (regcache), 0,
2839 tp->global_num);
2840
2841 step = maybe_software_singlestep (gdbarch);
2842
2843 insert_breakpoints ();
2844 }
2845 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
2846 {
2847 /* Update pc to reflect the new address from which we will
2848 execute instructions due to displaced stepping. */
2849 pc = regcache_read_pc (get_thread_regcache (tp));
2850
2851 step = gdbarch_displaced_step_hw_singlestep (gdbarch);
2852 }
2853 else
2854 gdb_assert_not_reached ("Invalid displaced_step_prepare_status "
2855 "value.");
2856 }
2857
2858 /* Do we need to do it the hard way, w/temp breakpoints? */
2859 else if (step)
2860 step = maybe_software_singlestep (gdbarch);
2861
2862 /* Currently, our software single-step implementation leads to different
2863 results than hardware single-stepping in one situation: when stepping
2864 into delivering a signal which has an associated signal handler,
2865 hardware single-step will stop at the first instruction of the handler,
2866 while software single-step will simply skip execution of the handler.
2867
2868 For now, this difference in behavior is accepted since there is no
2869 easy way to actually implement single-stepping into a signal handler
2870 without kernel support.
2871
2872 However, there is one scenario where this difference leads to follow-on
2873 problems: if we're stepping off a breakpoint by removing all breakpoints
2874 and then single-stepping. In this case, the software single-step
2875 behavior means that even if there is a *breakpoint* in the signal
2876 handler, GDB still would not stop.
2877
2878 Fortunately, we can at least fix this particular issue. We detect
2879 here the case where we are about to deliver a signal while software
2880 single-stepping with breakpoints removed. In this situation, we
2881 revert the decisions to remove all breakpoints and insert single-
2882 step breakpoints, and instead we install a step-resume breakpoint
2883 at the current address, deliver the signal without stepping, and
2884 once we arrive back at the step-resume breakpoint, actually step
2885 over the breakpoint we originally wanted to step over. */
2886 if (thread_has_single_step_breakpoints_set (tp)
2887 && sig != GDB_SIGNAL_0
2888 && step_over_info_valid_p ())
2889 {
2890 /* If we have nested signals or a pending signal is delivered
2891 immediately after a handler returns, might already have
2892 a step-resume breakpoint set on the earlier handler. We cannot
2893 set another step-resume breakpoint; just continue on until the
2894 original breakpoint is hit. */
2895 if (tp->control.step_resume_breakpoint == nullptr)
2896 {
2897 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2898 tp->step_after_step_resume_breakpoint = 1;
2899 }
2900
2901 delete_single_step_breakpoints (tp);
2902
2903 clear_step_over_info ();
2904 tp->control.trap_expected = 0;
2905
2906 insert_breakpoints ();
2907 }
2908
2909 /* If STEP is set, it's a request to use hardware stepping
2910 facilities. But in that case, we should never
2911 use singlestep breakpoint. */
2912 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2913
2914 /* Decide the set of threads to ask the target to resume. */
2915 if (tp->control.trap_expected)
2916 {
2917 /* We're allowing a thread to run past a breakpoint it has
2918 hit, either by single-stepping the thread with the breakpoint
2919 removed, or by displaced stepping, with the breakpoint inserted.
2920 In the former case, we need to single-step only this thread,
2921 and keep others stopped, as they can miss this breakpoint if
2922 allowed to run. That's not really a problem for displaced
2923 stepping, but, we still keep other threads stopped, in case
2924 another thread is also stopped for a breakpoint waiting for
2925 its turn in the displaced stepping queue. */
2926 resume_ptid = inferior_ptid;
2927 }
2928 else
2929 resume_ptid = internal_resume_ptid (user_step);
2930
2931 if (execution_direction != EXEC_REVERSE
2932 && step && breakpoint_inserted_here_p (aspace, pc))
2933 {
2934 /* There are two cases where we currently need to step a
2935 breakpoint instruction when we have a signal to deliver:
2936
2937 - See handle_signal_stop where we handle random signals that
2938 could take out us out of the stepping range. Normally, in
2939 that case we end up continuing (instead of stepping) over the
2940 signal handler with a breakpoint at PC, but there are cases
2941 where we should _always_ single-step, even if we have a
2942 step-resume breakpoint, like when a software watchpoint is
2943 set. Assuming single-stepping and delivering a signal at the
2944 same time would takes us to the signal handler, then we could
2945 have removed the breakpoint at PC to step over it. However,
2946 some hardware step targets (like e.g., Mac OS) can't step
2947 into signal handlers, and for those, we need to leave the
2948 breakpoint at PC inserted, as otherwise if the handler
2949 recurses and executes PC again, it'll miss the breakpoint.
2950 So we leave the breakpoint inserted anyway, but we need to
2951 record that we tried to step a breakpoint instruction, so
2952 that adjust_pc_after_break doesn't end up confused.
2953
2954 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2955 in one thread after another thread that was stepping had been
2956 momentarily paused for a step-over. When we re-resume the
2957 stepping thread, it may be resumed from that address with a
2958 breakpoint that hasn't trapped yet. Seen with
2959 gdb.threads/non-stop-fair-events.exp, on targets that don't
2960 do displaced stepping. */
2961
2962 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2963 tp->ptid.to_string ().c_str ());
2964
2965 tp->stepped_breakpoint = 1;
2966
2967 /* Most targets can step a breakpoint instruction, thus
2968 executing it normally. But if this one cannot, just
2969 continue and we will hit it anyway. */
2970 if (gdbarch_cannot_step_breakpoint (gdbarch))
2971 step = false;
2972 }
2973
2974 if (tp->control.may_range_step)
2975 {
2976 /* If we're resuming a thread with the PC out of the step
2977 range, then we're doing some nested/finer run control
2978 operation, like stepping the thread out of the dynamic
2979 linker or the displaced stepping scratch pad. We
2980 shouldn't have allowed a range step then. */
2981 gdb_assert (pc_in_thread_step_range (pc, tp));
2982 }
2983
2984 do_target_resume (resume_ptid, step, sig);
2985 tp->set_resumed (true);
2986}
2987
2988/* Resume the inferior. SIG is the signal to give the inferior
2989 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2990 rolls back state on error. */
2991
2992static void
2993resume (gdb_signal sig)
2994{
2995 try
2996 {
2997 resume_1 (sig);
2998 }
2999 catch (const gdb_exception &ex)
3000 {
3001 /* If resuming is being aborted for any reason, delete any
3002 single-step breakpoint resume_1 may have created, to avoid
3003 confusing the following resumption, and to avoid leaving
3004 single-step breakpoints perturbing other threads, in case
3005 we're running in non-stop mode. */
3006 if (inferior_ptid != null_ptid)
3007 delete_single_step_breakpoints (inferior_thread ());
3008 throw;
3009 }
3010}
3011
3012\f
3013/* Proceeding. */
3014
3015/* See infrun.h. */
3016
3017/* Counter that tracks number of user visible stops. This can be used
3018 to tell whether a command has proceeded the inferior past the
3019 current location. This allows e.g., inferior function calls in
3020 breakpoint commands to not interrupt the command list. When the
3021 call finishes successfully, the inferior is standing at the same
3022 breakpoint as if nothing happened (and so we don't call
3023 normal_stop). */
3024static ULONGEST current_stop_id;
3025
3026/* See infrun.h. */
3027
3028ULONGEST
3029get_stop_id (void)
3030{
3031 return current_stop_id;
3032}
3033
3034/* Called when we report a user visible stop. */
3035
3036static void
3037new_stop_id (void)
3038{
3039 current_stop_id++;
3040}
3041
3042/* Clear out all variables saying what to do when inferior is continued.
3043 First do this, then set the ones you want, then call `proceed'. */
3044
3045static void
3046clear_proceed_status_thread (struct thread_info *tp)
3047{
3048 infrun_debug_printf ("%s", tp->ptid.to_string ().c_str ());
3049
3050 /* If we're starting a new sequence, then the previous finished
3051 single-step is no longer relevant. */
3052 if (tp->has_pending_waitstatus ())
3053 {
3054 if (tp->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP)
3055 {
3056 infrun_debug_printf ("pending event of %s was a finished step. "
3057 "Discarding.",
3058 tp->ptid.to_string ().c_str ());
3059
3060 tp->clear_pending_waitstatus ();
3061 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
3062 }
3063 else
3064 {
3065 infrun_debug_printf
3066 ("thread %s has pending wait status %s (currently_stepping=%d).",
3067 tp->ptid.to_string ().c_str (),
3068 tp->pending_waitstatus ().to_string ().c_str (),
3069 currently_stepping (tp));
3070 }
3071 }
3072
3073 /* If this signal should not be seen by program, give it zero.
3074 Used for debugging signals. */
3075 if (!signal_pass_state (tp->stop_signal ()))
3076 tp->set_stop_signal (GDB_SIGNAL_0);
3077
3078 tp->release_thread_fsm ();
3079
3080 tp->control.trap_expected = 0;
3081 tp->control.step_range_start = 0;
3082 tp->control.step_range_end = 0;
3083 tp->control.may_range_step = 0;
3084 tp->control.step_frame_id = null_frame_id;
3085 tp->control.step_stack_frame_id = null_frame_id;
3086 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
3087 tp->control.step_start_function = nullptr;
3088 tp->stop_requested = false;
3089
3090 tp->control.stop_step = 0;
3091
3092 tp->control.proceed_to_finish = 0;
3093
3094 tp->control.stepping_command = 0;
3095
3096 /* Discard any remaining commands or status from previous stop. */
3097 bpstat_clear (&tp->control.stop_bpstat);
3098}
3099
3100/* Notify the current interpreter and observers that the target is about to
3101 proceed. */
3102
3103static void
3104notify_about_to_proceed ()
3105{
3106 top_level_interpreter ()->on_about_to_proceed ();
3107 gdb::observers::about_to_proceed.notify ();
3108}
3109
3110void
3111clear_proceed_status (int step)
3112{
3113 /* With scheduler-locking replay, stop replaying other threads if we're
3114 not replaying the user-visible resume ptid.
3115
3116 This is a convenience feature to not require the user to explicitly
3117 stop replaying the other threads. We're assuming that the user's
3118 intent is to resume tracing the recorded process. */
3119 if (!non_stop && scheduler_mode == schedlock_replay
3120 && target_record_is_replaying (minus_one_ptid)
3121 && !target_record_will_replay (user_visible_resume_ptid (step),
3122 execution_direction))
3123 target_record_stop_replaying ();
3124
3125 if (!non_stop && inferior_ptid != null_ptid)
3126 {
3127 ptid_t resume_ptid = user_visible_resume_ptid (step);
3128 process_stratum_target *resume_target
3129 = user_visible_resume_target (resume_ptid);
3130
3131 /* In all-stop mode, delete the per-thread status of all threads
3132 we're about to resume, implicitly and explicitly. */
3133 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
3134 clear_proceed_status_thread (tp);
3135 }
3136
3137 if (inferior_ptid != null_ptid)
3138 {
3139 struct inferior *inferior;
3140
3141 if (non_stop)
3142 {
3143 /* If in non-stop mode, only delete the per-thread status of
3144 the current thread. */
3145 clear_proceed_status_thread (inferior_thread ());
3146 }
3147
3148 inferior = current_inferior ();
3149 inferior->control.stop_soon = NO_STOP_QUIETLY;
3150 }
3151
3152 notify_about_to_proceed ();
3153}
3154
3155/* Returns true if TP is still stopped at a breakpoint that needs
3156 stepping-over in order to make progress. If the breakpoint is gone
3157 meanwhile, we can skip the whole step-over dance. */
3158
3159static bool
3160thread_still_needs_step_over_bp (struct thread_info *tp)
3161{
3162 if (tp->stepping_over_breakpoint)
3163 {
3164 struct regcache *regcache = get_thread_regcache (tp);
3165
3166 if (breakpoint_here_p (tp->inf->aspace.get (),
3167 regcache_read_pc (regcache))
3168 == ordinary_breakpoint_here)
3169 return true;
3170
3171 tp->stepping_over_breakpoint = 0;
3172 }
3173
3174 return false;
3175}
3176
3177/* Check whether thread TP still needs to start a step-over in order
3178 to make progress when resumed. Returns an bitwise or of enum
3179 step_over_what bits, indicating what needs to be stepped over. */
3180
3181static step_over_what
3182thread_still_needs_step_over (struct thread_info *tp)
3183{
3184 step_over_what what = 0;
3185
3186 if (thread_still_needs_step_over_bp (tp))
3187 what |= STEP_OVER_BREAKPOINT;
3188
3189 if (tp->stepping_over_watchpoint
3190 && !target_have_steppable_watchpoint ())
3191 what |= STEP_OVER_WATCHPOINT;
3192
3193 return what;
3194}
3195
3196/* Returns true if scheduler locking applies. STEP indicates whether
3197 we're about to do a step/next-like command to a thread. */
3198
3199static bool
3200schedlock_applies (struct thread_info *tp)
3201{
3202 return (scheduler_mode == schedlock_on
3203 || (scheduler_mode == schedlock_step
3204 && tp->control.stepping_command)
3205 || (scheduler_mode == schedlock_replay
3206 && target_record_will_replay (minus_one_ptid,
3207 execution_direction)));
3208}
3209
3210/* When FORCE_P is false, set process_stratum_target::COMMIT_RESUMED_STATE
3211 in all target stacks that have threads executing and don't have threads
3212 with pending events.
3213
3214 When FORCE_P is true, set process_stratum_target::COMMIT_RESUMED_STATE
3215 in all target stacks that have threads executing regardless of whether
3216 there are pending events or not.
3217
3218 Passing FORCE_P as false makes sense when GDB is going to wait for
3219 events from all threads and will therefore spot the pending events.
3220 However, if GDB is only going to wait for events from select threads
3221 (i.e. when performing an inferior call) then a pending event on some
3222 other thread will not be spotted, and if we fail to commit the resume
3223 state for the thread performing the inferior call, then the inferior
3224 call will never complete (or even start). */
3225
3226static void
3227maybe_set_commit_resumed_all_targets (bool force_p)
3228{
3229 scoped_restore_current_thread restore_thread;
3230
3231 for (inferior *inf : all_non_exited_inferiors ())
3232 {
3233 process_stratum_target *proc_target = inf->process_target ();
3234
3235 if (proc_target->commit_resumed_state)
3236 {
3237 /* We already set this in a previous iteration, via another
3238 inferior sharing the process_stratum target. */
3239 continue;
3240 }
3241
3242 /* If the target has no resumed threads, it would be useless to
3243 ask it to commit the resumed threads. */
3244 if (!proc_target->threads_executing)
3245 {
3246 infrun_debug_printf ("not requesting commit-resumed for target "
3247 "%s, no resumed threads",
3248 proc_target->shortname ());
3249 continue;
3250 }
3251
3252 /* As an optimization, if a thread from this target has some
3253 status to report, handle it before requiring the target to
3254 commit its resumed threads: handling the status might lead to
3255 resuming more threads. */
3256 if (!force_p && proc_target->has_resumed_with_pending_wait_status ())
3257 {
3258 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
3259 " thread has a pending waitstatus",
3260 proc_target->shortname ());
3261 continue;
3262 }
3263
3264 switch_to_inferior_no_thread (inf);
3265
3266 if (!force_p && target_has_pending_events ())
3267 {
3268 infrun_debug_printf ("not requesting commit-resumed for target %s, "
3269 "target has pending events",
3270 proc_target->shortname ());
3271 continue;
3272 }
3273
3274 infrun_debug_printf ("enabling commit-resumed for target %s",
3275 proc_target->shortname ());
3276
3277 proc_target->commit_resumed_state = true;
3278 }
3279}
3280
3281/* See infrun.h. */
3282
3283void
3284maybe_call_commit_resumed_all_targets ()
3285{
3286 scoped_restore_current_thread restore_thread;
3287
3288 for (inferior *inf : all_non_exited_inferiors ())
3289 {
3290 process_stratum_target *proc_target = inf->process_target ();
3291
3292 if (!proc_target->commit_resumed_state)
3293 continue;
3294
3295 switch_to_inferior_no_thread (inf);
3296
3297 infrun_debug_printf ("calling commit_resumed for target %s",
3298 proc_target->shortname());
3299
3300 target_commit_resumed ();
3301 }
3302}
3303
3304/* To track nesting of scoped_disable_commit_resumed objects, ensuring
3305 that only the outermost one attempts to re-enable
3306 commit-resumed. */
3307static bool enable_commit_resumed = true;
3308
3309/* See infrun.h. */
3310
3311scoped_disable_commit_resumed::scoped_disable_commit_resumed
3312 (const char *reason)
3313 : m_reason (reason),
3314 m_prev_enable_commit_resumed (enable_commit_resumed)
3315{
3316 infrun_debug_printf ("reason=%s", m_reason);
3317
3318 enable_commit_resumed = false;
3319
3320 for (inferior *inf : all_non_exited_inferiors ())
3321 {
3322 process_stratum_target *proc_target = inf->process_target ();
3323
3324 if (m_prev_enable_commit_resumed)
3325 {
3326 /* This is the outermost instance: force all
3327 COMMIT_RESUMED_STATE to false. */
3328 proc_target->commit_resumed_state = false;
3329 }
3330 else
3331 {
3332 /* This is not the outermost instance, we expect
3333 COMMIT_RESUMED_STATE to have been cleared by the
3334 outermost instance. */
3335 gdb_assert (!proc_target->commit_resumed_state);
3336 }
3337 }
3338}
3339
3340/* See infrun.h. */
3341
3342void
3343scoped_disable_commit_resumed::reset ()
3344{
3345 if (m_reset)
3346 return;
3347 m_reset = true;
3348
3349 infrun_debug_printf ("reason=%s", m_reason);
3350
3351 gdb_assert (!enable_commit_resumed);
3352
3353 enable_commit_resumed = m_prev_enable_commit_resumed;
3354
3355 if (m_prev_enable_commit_resumed)
3356 {
3357 /* This is the outermost instance, re-enable
3358 COMMIT_RESUMED_STATE on the targets where it's possible. */
3359 maybe_set_commit_resumed_all_targets (false);
3360 }
3361 else
3362 {
3363 /* This is not the outermost instance, we expect
3364 COMMIT_RESUMED_STATE to still be false. */
3365 for (inferior *inf : all_non_exited_inferiors ())
3366 {
3367 process_stratum_target *proc_target = inf->process_target ();
3368 gdb_assert (!proc_target->commit_resumed_state);
3369 }
3370 }
3371}
3372
3373/* See infrun.h. */
3374
3375scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
3376{
3377 reset ();
3378}
3379
3380/* See infrun.h. */
3381
3382void
3383scoped_disable_commit_resumed::reset_and_commit ()
3384{
3385 reset ();
3386 maybe_call_commit_resumed_all_targets ();
3387}
3388
3389/* See infrun.h. */
3390
3391scoped_enable_commit_resumed::scoped_enable_commit_resumed
3392 (const char *reason, bool force_p)
3393 : m_reason (reason),
3394 m_prev_enable_commit_resumed (enable_commit_resumed)
3395{
3396 infrun_debug_printf ("reason=%s", m_reason);
3397
3398 if (!enable_commit_resumed)
3399 {
3400 enable_commit_resumed = true;
3401
3402 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
3403 possible. */
3404 maybe_set_commit_resumed_all_targets (force_p);
3405
3406 maybe_call_commit_resumed_all_targets ();
3407 }
3408}
3409
3410/* See infrun.h. */
3411
3412scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
3413{
3414 infrun_debug_printf ("reason=%s", m_reason);
3415
3416 gdb_assert (enable_commit_resumed);
3417
3418 enable_commit_resumed = m_prev_enable_commit_resumed;
3419
3420 if (!enable_commit_resumed)
3421 {
3422 /* Force all COMMIT_RESUMED_STATE back to false. */
3423 for (inferior *inf : all_non_exited_inferiors ())
3424 {
3425 process_stratum_target *proc_target = inf->process_target ();
3426 proc_target->commit_resumed_state = false;
3427 }
3428 }
3429}
3430
3431/* Check that all the targets we're about to resume are in non-stop
3432 mode. Ideally, we'd only care whether all targets support
3433 target-async, but we're not there yet. E.g., stop_all_threads
3434 doesn't know how to handle all-stop targets. Also, the remote
3435 protocol in all-stop mode is synchronous, irrespective of
3436 target-async, which means that things like a breakpoint re-set
3437 triggered by one target would try to read memory from all targets
3438 and fail. */
3439
3440static void
3441check_multi_target_resumption (process_stratum_target *resume_target)
3442{
3443 if (!non_stop && resume_target == nullptr)
3444 {
3445 scoped_restore_current_thread restore_thread;
3446
3447 /* This is used to track whether we're resuming more than one
3448 target. */
3449 process_stratum_target *first_connection = nullptr;
3450
3451 /* The first inferior we see with a target that does not work in
3452 always-non-stop mode. */
3453 inferior *first_not_non_stop = nullptr;
3454
3455 for (inferior *inf : all_non_exited_inferiors ())
3456 {
3457 switch_to_inferior_no_thread (inf);
3458
3459 if (!target_has_execution ())
3460 continue;
3461
3462 process_stratum_target *proc_target
3463 = current_inferior ()->process_target();
3464
3465 if (!target_is_non_stop_p ())
3466 first_not_non_stop = inf;
3467
3468 if (first_connection == nullptr)
3469 first_connection = proc_target;
3470 else if (first_connection != proc_target
3471 && first_not_non_stop != nullptr)
3472 {
3473 switch_to_inferior_no_thread (first_not_non_stop);
3474
3475 proc_target = current_inferior ()->process_target();
3476
3477 error (_("Connection %d (%s) does not support "
3478 "multi-target resumption."),
3479 proc_target->connection_number,
3480 make_target_connection_string (proc_target).c_str ());
3481 }
3482 }
3483 }
3484}
3485
3486/* Helper function for `proceed`. Check if thread TP is suitable for
3487 resuming, and, if it is, switch to the thread and call
3488 `keep_going_pass_signal`. If TP is not suitable for resuming then this
3489 function will just return without switching threads. */
3490
3491static void
3492proceed_resume_thread_checked (thread_info *tp)
3493{
3494 if (!tp->inf->has_execution ())
3495 {
3496 infrun_debug_printf ("[%s] target has no execution",
3497 tp->ptid.to_string ().c_str ());
3498 return;
3499 }
3500
3501 if (tp->resumed ())
3502 {
3503 infrun_debug_printf ("[%s] resumed",
3504 tp->ptid.to_string ().c_str ());
3505 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
3506 return;
3507 }
3508
3509 if (thread_is_in_step_over_chain (tp))
3510 {
3511 infrun_debug_printf ("[%s] needs step-over",
3512 tp->ptid.to_string ().c_str ());
3513 return;
3514 }
3515
3516 /* When handling a vfork GDB removes all breakpoints from the program
3517 space in which the vfork is being handled. If we are following the
3518 parent then GDB will set the thread_waiting_for_vfork_done member of
3519 the parent inferior. In this case we should take care to only resume
3520 the vfork parent thread, the kernel will hold this thread suspended
3521 until the vfork child has exited or execd, at which point the parent
3522 will be resumed and a VFORK_DONE event sent to GDB. */
3523 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
3524 {
3525 if (target_is_non_stop_p ())
3526 {
3527 /* For non-stop targets, regardless of whether GDB is using
3528 all-stop or non-stop mode, threads are controlled
3529 individually.
3530
3531 When a thread is handling a vfork, breakpoints are removed
3532 from the inferior (well, program space in fact), so it is
3533 critical that we don't try to resume any thread other than the
3534 vfork parent. */
3535 if (tp != tp->inf->thread_waiting_for_vfork_done)
3536 {
3537 infrun_debug_printf ("[%s] thread %s of this inferior is "
3538 "waiting for vfork-done",
3539 tp->ptid.to_string ().c_str (),
3540 tp->inf->thread_waiting_for_vfork_done
3541 ->ptid.to_string ().c_str ());
3542 return;
3543 }
3544 }
3545 else
3546 {
3547 /* For all-stop targets, when we attempt to resume the inferior,
3548 we will only resume the vfork parent thread, this is handled
3549 in internal_resume_ptid.
3550
3551 Additionally, we will always be called with the vfork parent
3552 thread as the current thread (TP) thanks to follow_fork, as
3553 such the following assertion should hold.
3554
3555 Beyond this there is nothing more that needs to be done
3556 here. */
3557 gdb_assert (tp == tp->inf->thread_waiting_for_vfork_done);
3558 }
3559 }
3560
3561 /* When handling a vfork GDB removes all breakpoints from the program
3562 space in which the vfork is being handled. If we are following the
3563 child then GDB will set vfork_child member of the vfork parent
3564 inferior. Once the child has either exited or execd then GDB will
3565 detach from the parent process. Until that point GDB should not
3566 resume any thread in the parent process. */
3567 if (tp->inf->vfork_child != nullptr)
3568 {
3569 infrun_debug_printf ("[%s] thread is part of a vfork parent, child is %d",
3570 tp->ptid.to_string ().c_str (),
3571 tp->inf->vfork_child->pid);
3572 return;
3573 }
3574
3575 infrun_debug_printf ("resuming %s",
3576 tp->ptid.to_string ().c_str ());
3577
3578 execution_control_state ecs (tp);
3579 switch_to_thread (tp);
3580 keep_going_pass_signal (&ecs);
3581 if (!ecs.wait_some_more)
3582 error (_("Command aborted."));
3583}
3584
3585/* Basic routine for continuing the program in various fashions.
3586
3587 ADDR is the address to resume at, or -1 for resume where stopped.
3588 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3589 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
3590
3591 You should call clear_proceed_status before calling proceed. */
3592
3593void
3594proceed (CORE_ADDR addr, enum gdb_signal siggnal)
3595{
3596 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
3597
3598 struct gdbarch *gdbarch;
3599 CORE_ADDR pc;
3600
3601 /* If we're stopped at a fork/vfork, switch to either the parent or child
3602 thread as defined by the "set follow-fork-mode" command, or, if both
3603 the parent and child are controlled by GDB, and schedule-multiple is
3604 on, follow the child. If none of the above apply then we just proceed
3605 resuming the current thread. */
3606 if (!follow_fork ())
3607 {
3608 /* The target for some reason decided not to resume. */
3609 normal_stop ();
3610 if (target_can_async_p ())
3611 inferior_event_handler (INF_EXEC_COMPLETE);
3612 return;
3613 }
3614
3615 /* We'll update this if & when we switch to a new thread. */
3616 update_previous_thread ();
3617
3618 thread_info *cur_thr = inferior_thread ();
3619 infrun_debug_printf ("cur_thr = %s", cur_thr->ptid.to_string ().c_str ());
3620
3621 regcache *regcache = get_thread_regcache (cur_thr);
3622 gdbarch = regcache->arch ();
3623 pc = regcache_read_pc_protected (regcache);
3624
3625 /* Fill in with reasonable starting values. */
3626 init_thread_stepping_state (cur_thr);
3627
3628 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
3629
3630 ptid_t resume_ptid
3631 = user_visible_resume_ptid (cur_thr->control.stepping_command);
3632 process_stratum_target *resume_target
3633 = user_visible_resume_target (resume_ptid);
3634
3635 check_multi_target_resumption (resume_target);
3636
3637 if (addr == (CORE_ADDR) -1)
3638 {
3639 const address_space *aspace = cur_thr->inf->aspace.get ();
3640
3641 if (cur_thr->stop_pc_p ()
3642 && pc == cur_thr->stop_pc ()
3643 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
3644 && execution_direction != EXEC_REVERSE)
3645 /* There is a breakpoint at the address we will resume at,
3646 step one instruction before inserting breakpoints so that
3647 we do not stop right away (and report a second hit at this
3648 breakpoint).
3649
3650 Note, we don't do this in reverse, because we won't
3651 actually be executing the breakpoint insn anyway.
3652 We'll be (un-)executing the previous instruction. */
3653 cur_thr->stepping_over_breakpoint = 1;
3654 else if (gdbarch_single_step_through_delay_p (gdbarch)
3655 && gdbarch_single_step_through_delay (gdbarch,
3656 get_current_frame ()))
3657 /* We stepped onto an instruction that needs to be stepped
3658 again before re-inserting the breakpoint, do so. */
3659 cur_thr->stepping_over_breakpoint = 1;
3660 }
3661 else
3662 {
3663 regcache_write_pc (regcache, addr);
3664 }
3665
3666 if (siggnal != GDB_SIGNAL_DEFAULT)
3667 cur_thr->set_stop_signal (siggnal);
3668
3669 /* If an exception is thrown from this point on, make sure to
3670 propagate GDB's knowledge of the executing state to the
3671 frontend/user running state. */
3672 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
3673
3674 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3675 threads (e.g., we might need to set threads stepping over
3676 breakpoints first), from the user/frontend's point of view, all
3677 threads in RESUME_PTID are now running. Unless we're calling an
3678 inferior function, as in that case we pretend the inferior
3679 doesn't run at all. */
3680 if (!cur_thr->control.in_infcall)
3681 set_running (resume_target, resume_ptid, true);
3682
3683 infrun_debug_printf ("addr=%s, signal=%s, resume_ptid=%s",
3684 paddress (gdbarch, addr),
3685 gdb_signal_to_symbol_string (siggnal),
3686 resume_ptid.to_string ().c_str ());
3687
3688 annotate_starting ();
3689
3690 /* Make sure that output from GDB appears before output from the
3691 inferior. */
3692 gdb_flush (gdb_stdout);
3693
3694 /* Since we've marked the inferior running, give it the terminal. A
3695 QUIT/Ctrl-C from here on is forwarded to the target (which can
3696 still detect attempts to unblock a stuck connection with repeated
3697 Ctrl-C from within target_pass_ctrlc). */
3698 target_terminal::inferior ();
3699
3700 /* In a multi-threaded task we may select another thread and
3701 then continue or step.
3702
3703 But if a thread that we're resuming had stopped at a breakpoint,
3704 it will immediately cause another breakpoint stop without any
3705 execution (i.e. it will report a breakpoint hit incorrectly). So
3706 we must step over it first.
3707
3708 Look for threads other than the current (TP) that reported a
3709 breakpoint hit and haven't been resumed yet since. */
3710
3711 /* If scheduler locking applies, we can avoid iterating over all
3712 threads. */
3713 if (!non_stop && !schedlock_applies (cur_thr))
3714 {
3715 for (thread_info *tp : all_non_exited_threads (resume_target,
3716 resume_ptid))
3717 {
3718 switch_to_thread_no_regs (tp);
3719
3720 /* Ignore the current thread here. It's handled
3721 afterwards. */
3722 if (tp == cur_thr)
3723 continue;
3724
3725 if (!thread_still_needs_step_over (tp))
3726 continue;
3727
3728 gdb_assert (!thread_is_in_step_over_chain (tp));
3729
3730 infrun_debug_printf ("need to step-over [%s] first",
3731 tp->ptid.to_string ().c_str ());
3732
3733 global_thread_step_over_chain_enqueue (tp);
3734 }
3735
3736 switch_to_thread (cur_thr);
3737 }
3738
3739 /* Enqueue the current thread last, so that we move all other
3740 threads over their breakpoints first. */
3741 if (cur_thr->stepping_over_breakpoint)
3742 global_thread_step_over_chain_enqueue (cur_thr);
3743
3744 /* If the thread isn't started, we'll still need to set its prev_pc,
3745 so that switch_back_to_stepped_thread knows the thread hasn't
3746 advanced. Must do this before resuming any thread, as in
3747 all-stop/remote, once we resume we can't send any other packet
3748 until the target stops again. */
3749 cur_thr->prev_pc = regcache_read_pc_protected (regcache);
3750
3751 {
3752 scoped_disable_commit_resumed disable_commit_resumed ("proceeding");
3753 bool step_over_started = start_step_over ();
3754
3755 if (step_over_info_valid_p ())
3756 {
3757 /* Either this thread started a new in-line step over, or some
3758 other thread was already doing one. In either case, don't
3759 resume anything else until the step-over is finished. */
3760 }
3761 else if (step_over_started && !target_is_non_stop_p ())
3762 {
3763 /* A new displaced stepping sequence was started. In all-stop,
3764 we can't talk to the target anymore until it next stops. */
3765 }
3766 else if (!non_stop && target_is_non_stop_p ())
3767 {
3768 INFRUN_SCOPED_DEBUG_START_END
3769 ("resuming threads, all-stop-on-top-of-non-stop");
3770
3771 /* In all-stop, but the target is always in non-stop mode.
3772 Start all other threads that are implicitly resumed too. */
3773 for (thread_info *tp : all_non_exited_threads (resume_target,
3774 resume_ptid))
3775 {
3776 switch_to_thread_no_regs (tp);
3777 proceed_resume_thread_checked (tp);
3778 }
3779 }
3780 else
3781 proceed_resume_thread_checked (cur_thr);
3782
3783 disable_commit_resumed.reset_and_commit ();
3784 }
3785
3786 finish_state.release ();
3787
3788 /* If we've switched threads above, switch back to the previously
3789 current thread. We don't want the user to see a different
3790 selected thread. */
3791 switch_to_thread (cur_thr);
3792
3793 /* Tell the event loop to wait for it to stop. If the target
3794 supports asynchronous execution, it'll do this from within
3795 target_resume. */
3796 if (!target_can_async_p ())
3797 mark_async_event_handler (infrun_async_inferior_event_token);
3798}
3799\f
3800
3801/* Start remote-debugging of a machine over a serial link. */
3802
3803void
3804start_remote (int from_tty)
3805{
3806 inferior *inf = current_inferior ();
3807 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
3808
3809 /* Always go on waiting for the target, regardless of the mode. */
3810 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3811 indicate to wait_for_inferior that a target should timeout if
3812 nothing is returned (instead of just blocking). Because of this,
3813 targets expecting an immediate response need to, internally, set
3814 things up so that the target_wait() is forced to eventually
3815 timeout. */
3816 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3817 differentiate to its caller what the state of the target is after
3818 the initial open has been performed. Here we're assuming that
3819 the target has stopped. It should be possible to eventually have
3820 target_open() return to the caller an indication that the target
3821 is currently running and GDB state should be set to the same as
3822 for an async run. */
3823 wait_for_inferior (inf);
3824
3825 /* Now that the inferior has stopped, do any bookkeeping like
3826 loading shared libraries. We want to do this before normal_stop,
3827 so that the displayed frame is up to date. */
3828 post_create_inferior (from_tty, true);
3829
3830 normal_stop ();
3831}
3832
3833/* Initialize static vars when a new inferior begins. */
3834
3835void
3836init_wait_for_inferior (void)
3837{
3838 /* These are meaningless until the first time through wait_for_inferior. */
3839
3840 breakpoint_init_inferior (current_inferior (), inf_starting);
3841
3842 clear_proceed_status (0);
3843
3844 nullify_last_target_wait_ptid ();
3845
3846 update_previous_thread ();
3847}
3848
3849\f
3850
3851static void handle_inferior_event (struct execution_control_state *ecs);
3852
3853static void handle_step_into_function (struct gdbarch *gdbarch,
3854 struct execution_control_state *ecs);
3855static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3856 struct execution_control_state *ecs);
3857static void handle_signal_stop (struct execution_control_state *ecs);
3858static void check_exception_resume (struct execution_control_state *,
3859 const frame_info_ptr &);
3860
3861static void end_stepping_range (struct execution_control_state *ecs);
3862static void stop_waiting (struct execution_control_state *ecs);
3863static void keep_going (struct execution_control_state *ecs);
3864static void process_event_stop_test (struct execution_control_state *ecs);
3865static bool switch_back_to_stepped_thread (struct execution_control_state *ecs);
3866
3867/* This function is attached as a "thread_stop_requested" observer.
3868 Cleanup local state that assumed the PTID was to be resumed, and
3869 report the stop to the frontend. */
3870
3871static void
3872infrun_thread_stop_requested (ptid_t ptid)
3873{
3874 process_stratum_target *curr_target = current_inferior ()->process_target ();
3875
3876 /* PTID was requested to stop. If the thread was already stopped,
3877 but the user/frontend doesn't know about that yet (e.g., the
3878 thread had been temporarily paused for some step-over), set up
3879 for reporting the stop now. */
3880 for (thread_info *tp : all_threads (curr_target, ptid))
3881 {
3882 if (tp->state != THREAD_RUNNING)
3883 continue;
3884 if (tp->executing ())
3885 continue;
3886
3887 /* Remove matching threads from the step-over queue, so
3888 start_step_over doesn't try to resume them
3889 automatically. */
3890 if (thread_is_in_step_over_chain (tp))
3891 global_thread_step_over_chain_remove (tp);
3892
3893 /* If the thread is stopped, but the user/frontend doesn't
3894 know about that yet, queue a pending event, as if the
3895 thread had just stopped now. Unless the thread already had
3896 a pending event. */
3897 if (!tp->has_pending_waitstatus ())
3898 {
3899 target_waitstatus ws;
3900 ws.set_stopped (GDB_SIGNAL_0);
3901 tp->set_pending_waitstatus (ws);
3902 }
3903
3904 /* Clear the inline-frame state, since we're re-processing the
3905 stop. */
3906 clear_inline_frame_state (tp);
3907
3908 /* If this thread was paused because some other thread was
3909 doing an inline-step over, let that finish first. Once
3910 that happens, we'll restart all threads and consume pending
3911 stop events then. */
3912 if (step_over_info_valid_p ())
3913 continue;
3914
3915 /* Otherwise we can process the (new) pending event now. Set
3916 it so this pending event is considered by
3917 do_target_wait. */
3918 tp->set_resumed (true);
3919 }
3920}
3921
3922/* Delete the step resume, single-step and longjmp/exception resume
3923 breakpoints of TP. */
3924
3925static void
3926delete_thread_infrun_breakpoints (struct thread_info *tp)
3927{
3928 delete_step_resume_breakpoint (tp);
3929 delete_exception_resume_breakpoint (tp);
3930 delete_single_step_breakpoints (tp);
3931}
3932
3933/* If the target still has execution, call FUNC for each thread that
3934 just stopped. In all-stop, that's all the non-exited threads; in
3935 non-stop, that's the current thread, only. */
3936
3937typedef void (*for_each_just_stopped_thread_callback_func)
3938 (struct thread_info *tp);
3939
3940static void
3941for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
3942{
3943 if (!target_has_execution () || inferior_ptid == null_ptid)
3944 return;
3945
3946 if (target_is_non_stop_p ())
3947 {
3948 /* If in non-stop mode, only the current thread stopped. */
3949 func (inferior_thread ());
3950 }
3951 else
3952 {
3953 /* In all-stop mode, all threads have stopped. */
3954 for (thread_info *tp : all_non_exited_threads ())
3955 func (tp);
3956 }
3957}
3958
3959/* Delete the step resume and longjmp/exception resume breakpoints of
3960 the threads that just stopped. */
3961
3962static void
3963delete_just_stopped_threads_infrun_breakpoints (void)
3964{
3965 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
3966}
3967
3968/* Delete the single-step breakpoints of the threads that just
3969 stopped. */
3970
3971static void
3972delete_just_stopped_threads_single_step_breakpoints (void)
3973{
3974 for_each_just_stopped_thread (delete_single_step_breakpoints);
3975}
3976
3977/* See infrun.h. */
3978
3979void
3980print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3981 const struct target_waitstatus &ws,
3982 process_stratum_target *proc_target)
3983{
3984 infrun_debug_printf ("target_wait (%s [%s], status) =",
3985 waiton_ptid.to_string ().c_str (),
3986 target_pid_to_str (waiton_ptid).c_str ());
3987 infrun_debug_printf (" %s [%s],",
3988 result_ptid.to_string ().c_str (),
3989 target_pid_to_str (result_ptid).c_str ());
3990 infrun_debug_printf (" %s", ws.to_string ().c_str ());
3991
3992 if (proc_target != nullptr)
3993 infrun_debug_printf (" from target %d (%s)",
3994 proc_target->connection_number,
3995 proc_target->shortname ());
3996}
3997
3998/* Wrapper for print_target_wait_results above for convenience. */
3999
4000static void
4001print_target_wait_results (ptid_t waiton_ptid,
4002 const execution_control_state &ecs)
4003{
4004 print_target_wait_results (waiton_ptid, ecs.ptid, ecs.ws, ecs.target);
4005}
4006
4007/* Select a thread at random, out of those which are resumed and have
4008 had events. */
4009
4010static struct thread_info *
4011random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
4012{
4013 process_stratum_target *proc_target = inf->process_target ();
4014 thread_info *thread
4015 = proc_target->random_resumed_with_pending_wait_status (inf, waiton_ptid);
4016
4017 if (thread == nullptr)
4018 {
4019 infrun_debug_printf ("None found.");
4020 return nullptr;
4021 }
4022
4023 infrun_debug_printf ("Found %s.", thread->ptid.to_string ().c_str ());
4024 gdb_assert (thread->resumed ());
4025 gdb_assert (thread->has_pending_waitstatus ());
4026
4027 return thread;
4028}
4029
4030/* Wrapper for target_wait that first checks whether threads have
4031 pending statuses to report before actually asking the target for
4032 more events. INF is the inferior we're using to call target_wait
4033 on. */
4034
4035static ptid_t
4036do_target_wait_1 (inferior *inf, ptid_t ptid,
4037 target_waitstatus *status, target_wait_flags options)
4038{
4039 struct thread_info *tp;
4040
4041 /* We know that we are looking for an event in the target of inferior
4042 INF, but we don't know which thread the event might come from. As
4043 such we want to make sure that INFERIOR_PTID is reset so that none of
4044 the wait code relies on it - doing so is always a mistake. */
4045 switch_to_inferior_no_thread (inf);
4046
4047 /* First check if there is a resumed thread with a wait status
4048 pending. */
4049 if (ptid == minus_one_ptid || ptid.is_pid ())
4050 {
4051 tp = random_pending_event_thread (inf, ptid);
4052 }
4053 else
4054 {
4055 infrun_debug_printf ("Waiting for specific thread %s.",
4056 ptid.to_string ().c_str ());
4057
4058 /* We have a specific thread to check. */
4059 tp = inf->find_thread (ptid);
4060 gdb_assert (tp != nullptr);
4061 if (!tp->has_pending_waitstatus ())
4062 tp = nullptr;
4063 }
4064
4065 if (tp != nullptr
4066 && (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4067 || tp->stop_reason () == TARGET_STOPPED_BY_HW_BREAKPOINT))
4068 {
4069 struct regcache *regcache = get_thread_regcache (tp);
4070 struct gdbarch *gdbarch = regcache->arch ();
4071 CORE_ADDR pc;
4072 int discard = 0;
4073
4074 pc = regcache_read_pc (regcache);
4075
4076 if (pc != tp->stop_pc ())
4077 {
4078 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
4079 tp->ptid.to_string ().c_str (),
4080 paddress (gdbarch, tp->stop_pc ()),
4081 paddress (gdbarch, pc));
4082 discard = 1;
4083 }
4084 else if (!breakpoint_inserted_here_p (tp->inf->aspace.get (), pc))
4085 {
4086 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
4087 tp->ptid.to_string ().c_str (),
4088 paddress (gdbarch, pc));
4089
4090 discard = 1;
4091 }
4092
4093 if (discard)
4094 {
4095 infrun_debug_printf ("pending event of %s cancelled.",
4096 tp->ptid.to_string ().c_str ());
4097
4098 tp->clear_pending_waitstatus ();
4099 target_waitstatus ws;
4100 ws.set_spurious ();
4101 tp->set_pending_waitstatus (ws);
4102 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
4103 }
4104 }
4105
4106 if (tp != nullptr)
4107 {
4108 infrun_debug_printf ("Using pending wait status %s for %s.",
4109 tp->pending_waitstatus ().to_string ().c_str (),
4110 tp->ptid.to_string ().c_str ());
4111
4112 /* Now that we've selected our final event LWP, un-adjust its PC
4113 if it was a software breakpoint (and the target doesn't
4114 always adjust the PC itself). */
4115 if (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4116 && !target_supports_stopped_by_sw_breakpoint ())
4117 {
4118 struct regcache *regcache;
4119 struct gdbarch *gdbarch;
4120 int decr_pc;
4121
4122 regcache = get_thread_regcache (tp);
4123 gdbarch = regcache->arch ();
4124
4125 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4126 if (decr_pc != 0)
4127 {
4128 CORE_ADDR pc;
4129
4130 pc = regcache_read_pc (regcache);
4131 regcache_write_pc (regcache, pc + decr_pc);
4132 }
4133 }
4134
4135 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
4136 *status = tp->pending_waitstatus ();
4137 tp->clear_pending_waitstatus ();
4138
4139 /* Wake up the event loop again, until all pending events are
4140 processed. */
4141 if (target_is_async_p ())
4142 mark_async_event_handler (infrun_async_inferior_event_token);
4143 return tp->ptid;
4144 }
4145
4146 /* But if we don't find one, we'll have to wait. */
4147
4148 /* We can't ask a non-async target to do a non-blocking wait, so this will be
4149 a blocking wait. */
4150 if (!target_can_async_p ())
4151 options &= ~TARGET_WNOHANG;
4152
4153 return target_wait (ptid, status, options);
4154}
4155
4156/* Wrapper for target_wait that first checks whether threads have
4157 pending statuses to report before actually asking the target for
4158 more events. Polls for events from all inferiors/targets. */
4159
4160static bool
4161do_target_wait (ptid_t wait_ptid, execution_control_state *ecs,
4162 target_wait_flags options)
4163{
4164 int num_inferiors = 0;
4165 int random_selector;
4166
4167 /* For fairness, we pick the first inferior/target to poll at random
4168 out of all inferiors that may report events, and then continue
4169 polling the rest of the inferior list starting from that one in a
4170 circular fashion until the whole list is polled once. */
4171
4172 ptid_t wait_ptid_pid {wait_ptid.pid ()};
4173 auto inferior_matches = [&wait_ptid_pid] (inferior *inf)
4174 {
4175 return (inf->process_target () != nullptr
4176 && ptid_t (inf->pid).matches (wait_ptid_pid));
4177 };
4178
4179 /* First see how many matching inferiors we have. */
4180 for (inferior *inf : all_inferiors ())
4181 if (inferior_matches (inf))
4182 num_inferiors++;
4183
4184 if (num_inferiors == 0)
4185 {
4186 ecs->ws.set_ignore ();
4187 return false;
4188 }
4189
4190 /* Now randomly pick an inferior out of those that matched. */
4191 random_selector = (int)
4192 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
4193
4194 if (num_inferiors > 1)
4195 infrun_debug_printf ("Found %d inferiors, starting at #%d",
4196 num_inferiors, random_selector);
4197
4198 /* Select the Nth inferior that matched. */
4199
4200 inferior *selected = nullptr;
4201
4202 for (inferior *inf : all_inferiors ())
4203 if (inferior_matches (inf))
4204 if (random_selector-- == 0)
4205 {
4206 selected = inf;
4207 break;
4208 }
4209
4210 /* Now poll for events out of each of the matching inferior's
4211 targets, starting from the selected one. */
4212
4213 auto do_wait = [&] (inferior *inf)
4214 {
4215 ecs->ptid = do_target_wait_1 (inf, wait_ptid, &ecs->ws, options);
4216 ecs->target = inf->process_target ();
4217 return (ecs->ws.kind () != TARGET_WAITKIND_IGNORE);
4218 };
4219
4220 /* Needed in 'all-stop + target-non-stop' mode, because we end up
4221 here spuriously after the target is all stopped and we've already
4222 reported the stop to the user, polling for events. */
4223 scoped_restore_current_thread restore_thread;
4224
4225 intrusive_list_iterator<inferior> start
4226 = inferior_list.iterator_to (*selected);
4227
4228 for (intrusive_list_iterator<inferior> it = start;
4229 it != inferior_list.end ();
4230 ++it)
4231 {
4232 inferior *inf = &*it;
4233
4234 if (inferior_matches (inf) && do_wait (inf))
4235 return true;
4236 }
4237
4238 for (intrusive_list_iterator<inferior> it = inferior_list.begin ();
4239 it != start;
4240 ++it)
4241 {
4242 inferior *inf = &*it;
4243
4244 if (inferior_matches (inf) && do_wait (inf))
4245 return true;
4246 }
4247
4248 ecs->ws.set_ignore ();
4249 return false;
4250}
4251
4252/* An event reported by wait_one. */
4253
4254struct wait_one_event
4255{
4256 /* The target the event came out of. */
4257 process_stratum_target *target;
4258
4259 /* The PTID the event was for. */
4260 ptid_t ptid;
4261
4262 /* The waitstatus. */
4263 target_waitstatus ws;
4264};
4265
4266static bool handle_one (const wait_one_event &event);
4267static int finish_step_over (struct execution_control_state *ecs);
4268
4269/* Prepare and stabilize the inferior for detaching it. E.g.,
4270 detaching while a thread is displaced stepping is a recipe for
4271 crashing it, as nothing would readjust the PC out of the scratch
4272 pad. */
4273
4274void
4275prepare_for_detach (void)
4276{
4277 struct inferior *inf = current_inferior ();
4278 ptid_t pid_ptid = ptid_t (inf->pid);
4279 scoped_restore_current_thread restore_thread;
4280
4281 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
4282
4283 /* Remove all threads of INF from the global step-over chain. We
4284 want to stop any ongoing step-over, not start any new one. */
4285 thread_step_over_list_safe_range range
4286 = make_thread_step_over_list_safe_range (global_thread_step_over_list);
4287
4288 for (thread_info *tp : range)
4289 if (tp->inf == inf)
4290 {
4291 infrun_debug_printf ("removing thread %s from global step over chain",
4292 tp->ptid.to_string ().c_str ());
4293 global_thread_step_over_chain_remove (tp);
4294 }
4295
4296 /* If we were already in the middle of an inline step-over, and the
4297 thread stepping belongs to the inferior we're detaching, we need
4298 to restart the threads of other inferiors. */
4299 if (step_over_info.thread != -1)
4300 {
4301 infrun_debug_printf ("inline step-over in-process while detaching");
4302
4303 thread_info *thr = find_thread_global_id (step_over_info.thread);
4304 if (thr->inf == inf)
4305 {
4306 /* Since we removed threads of INF from the step-over chain,
4307 we know this won't start a step-over for INF. */
4308 clear_step_over_info ();
4309
4310 if (target_is_non_stop_p ())
4311 {
4312 /* Start a new step-over in another thread if there's
4313 one that needs it. */
4314 start_step_over ();
4315
4316 /* Restart all other threads (except the
4317 previously-stepping thread, since that one is still
4318 running). */
4319 if (!step_over_info_valid_p ())
4320 restart_threads (thr);
4321 }
4322 }
4323 }
4324
4325 if (displaced_step_in_progress (inf))
4326 {
4327 infrun_debug_printf ("displaced-stepping in-process while detaching");
4328
4329 /* Stop threads currently displaced stepping, aborting it. */
4330
4331 for (thread_info *thr : inf->non_exited_threads ())
4332 {
4333 if (thr->displaced_step_state.in_progress ())
4334 {
4335 if (thr->executing ())
4336 {
4337 if (!thr->stop_requested)
4338 {
4339 target_stop (thr->ptid);
4340 thr->stop_requested = true;
4341 }
4342 }
4343 else
4344 thr->set_resumed (false);
4345 }
4346 }
4347
4348 while (displaced_step_in_progress (inf))
4349 {
4350 wait_one_event event;
4351
4352 event.target = inf->process_target ();
4353 event.ptid = do_target_wait_1 (inf, pid_ptid, &event.ws, 0);
4354
4355 if (debug_infrun)
4356 print_target_wait_results (pid_ptid, event.ptid, event.ws,
4357 event.target);
4358
4359 handle_one (event);
4360 }
4361
4362 /* It's OK to leave some of the threads of INF stopped, since
4363 they'll be detached shortly. */
4364 }
4365}
4366
4367/* If all-stop, but there exists a non-stop target, stop all threads
4368 now that we're presenting the stop to the user. */
4369
4370static void
4371stop_all_threads_if_all_stop_mode ()
4372{
4373 if (!non_stop && exists_non_stop_target ())
4374 stop_all_threads ("presenting stop to user in all-stop");
4375}
4376
4377/* Wait for control to return from inferior to debugger.
4378
4379 If inferior gets a signal, we may decide to start it up again
4380 instead of returning. That is why there is a loop in this function.
4381 When this function actually returns it means the inferior
4382 should be left stopped and GDB should read more commands. */
4383
4384static void
4385wait_for_inferior (inferior *inf)
4386{
4387 infrun_debug_printf ("wait_for_inferior ()");
4388
4389 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
4390
4391 /* If an error happens while handling the event, propagate GDB's
4392 knowledge of the executing state to the frontend/user running
4393 state. */
4394 scoped_finish_thread_state finish_state
4395 (inf->process_target (), minus_one_ptid);
4396
4397 while (1)
4398 {
4399 execution_control_state ecs;
4400
4401 overlay_cache_invalid = 1;
4402
4403 /* Flush target cache before starting to handle each event.
4404 Target was running and cache could be stale. This is just a
4405 heuristic. Running threads may modify target memory, but we
4406 don't get any event. */
4407 target_dcache_invalidate (current_program_space->aspace);
4408
4409 ecs.ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs.ws, 0);
4410 ecs.target = inf->process_target ();
4411
4412 if (debug_infrun)
4413 print_target_wait_results (minus_one_ptid, ecs);
4414
4415 /* Now figure out what to do with the result of the result. */
4416 handle_inferior_event (&ecs);
4417
4418 if (!ecs.wait_some_more)
4419 break;
4420 }
4421
4422 stop_all_threads_if_all_stop_mode ();
4423
4424 /* No error, don't finish the state yet. */
4425 finish_state.release ();
4426}
4427
4428/* Cleanup that reinstalls the readline callback handler, if the
4429 target is running in the background. If while handling the target
4430 event something triggered a secondary prompt, like e.g., a
4431 pagination prompt, we'll have removed the callback handler (see
4432 gdb_readline_wrapper_line). Need to do this as we go back to the
4433 event loop, ready to process further input. Note this has no
4434 effect if the handler hasn't actually been removed, because calling
4435 rl_callback_handler_install resets the line buffer, thus losing
4436 input. */
4437
4438static void
4439reinstall_readline_callback_handler_cleanup ()
4440{
4441 struct ui *ui = current_ui;
4442
4443 if (!ui->async)
4444 {
4445 /* We're not going back to the top level event loop yet. Don't
4446 install the readline callback, as it'd prep the terminal,
4447 readline-style (raw, noecho) (e.g., --batch). We'll install
4448 it the next time the prompt is displayed, when we're ready
4449 for input. */
4450 return;
4451 }
4452
4453 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
4454 gdb_rl_callback_handler_reinstall ();
4455}
4456
4457/* Clean up the FSMs of threads that are now stopped. In non-stop,
4458 that's just the event thread. In all-stop, that's all threads. In
4459 all-stop, threads that had a pending exit no longer have a reason
4460 to be around, as their FSMs/commands are canceled, so we delete
4461 them. This avoids "info threads" listing such threads as if they
4462 were alive (and failing to read their registers), the user being
4463 able to select and resume them (and that failing), etc. */
4464
4465static void
4466clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
4467{
4468 /* The first clean_up call below assumes the event thread is the current
4469 one. */
4470 if (ecs->event_thread != nullptr)
4471 gdb_assert (ecs->event_thread == inferior_thread ());
4472
4473 if (ecs->event_thread != nullptr
4474 && ecs->event_thread->thread_fsm () != nullptr)
4475 ecs->event_thread->thread_fsm ()->clean_up (ecs->event_thread);
4476
4477 if (!non_stop)
4478 {
4479 scoped_restore_current_thread restore_thread;
4480
4481 for (thread_info *thr : all_threads_safe ())
4482 {
4483 if (thr->state == THREAD_EXITED)
4484 continue;
4485
4486 if (thr == ecs->event_thread)
4487 continue;
4488
4489 if (thr->thread_fsm () != nullptr)
4490 {
4491 switch_to_thread (thr);
4492 thr->thread_fsm ()->clean_up (thr);
4493 }
4494
4495 /* As we are cancelling the command/FSM of this thread,
4496 whatever was the reason we needed to report a thread
4497 exited event to the user, that reason is gone. Delete
4498 the thread, so that the user doesn't see it in the thread
4499 list, the next proceed doesn't try to resume it, etc. */
4500 if (thr->has_pending_waitstatus ()
4501 && (thr->pending_waitstatus ().kind ()
4502 == TARGET_WAITKIND_THREAD_EXITED))
4503 delete_thread (thr);
4504 }
4505 }
4506}
4507
4508/* Helper for all_uis_check_sync_execution_done that works on the
4509 current UI. */
4510
4511static void
4512check_curr_ui_sync_execution_done (void)
4513{
4514 struct ui *ui = current_ui;
4515
4516 if (ui->prompt_state == PROMPT_NEEDED
4517 && ui->async
4518 && !gdb_in_secondary_prompt_p (ui))
4519 {
4520 target_terminal::ours ();
4521 top_level_interpreter ()->on_sync_execution_done ();
4522 ui->register_file_handler ();
4523 }
4524}
4525
4526/* See infrun.h. */
4527
4528void
4529all_uis_check_sync_execution_done (void)
4530{
4531 SWITCH_THRU_ALL_UIS ()
4532 {
4533 check_curr_ui_sync_execution_done ();
4534 }
4535}
4536
4537/* See infrun.h. */
4538
4539void
4540all_uis_on_sync_execution_starting (void)
4541{
4542 SWITCH_THRU_ALL_UIS ()
4543 {
4544 if (current_ui->prompt_state == PROMPT_NEEDED)
4545 async_disable_stdin ();
4546 }
4547}
4548
4549/* A quit_handler callback installed while we're handling inferior
4550 events. */
4551
4552static void
4553infrun_quit_handler ()
4554{
4555 if (target_terminal::is_ours ())
4556 {
4557 /* Do nothing.
4558
4559 default_quit_handler would throw a quit in this case, but if
4560 we're handling an event while we have the terminal, it means
4561 the target is running a background execution command, and
4562 thus when users press Ctrl-C, they're wanting to interrupt
4563 whatever command they were executing in the command line.
4564 E.g.:
4565
4566 (gdb) c&
4567 (gdb) foo bar whatever<ctrl-c>
4568
4569 That Ctrl-C should clear the input line, not interrupt event
4570 handling if it happens that the user types Ctrl-C at just the
4571 "wrong" time!
4572
4573 It's as-if background event handling was handled by a
4574 separate background thread.
4575
4576 To be clear, the Ctrl-C is not lost -- it will be processed
4577 by the next QUIT call once we're out of fetch_inferior_event
4578 again. */
4579 }
4580 else
4581 {
4582 if (check_quit_flag ())
4583 target_pass_ctrlc ();
4584 }
4585}
4586
4587/* Asynchronous version of wait_for_inferior. It is called by the
4588 event loop whenever a change of state is detected on the file
4589 descriptor corresponding to the target. It can be called more than
4590 once to complete a single execution command. In such cases we need
4591 to keep the state in a global variable ECSS. If it is the last time
4592 that this function is called for a single execution command, then
4593 report to the user that the inferior has stopped, and do the
4594 necessary cleanups. */
4595
4596void
4597fetch_inferior_event ()
4598{
4599 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
4600
4601 execution_control_state ecs;
4602 int cmd_done = 0;
4603
4604 /* Events are always processed with the main UI as current UI. This
4605 way, warnings, debug output, etc. are always consistently sent to
4606 the main console. */
4607 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
4608
4609 /* Temporarily disable pagination. Otherwise, the user would be
4610 given an option to press 'q' to quit, which would cause an early
4611 exit and could leave GDB in a half-baked state. */
4612 scoped_restore save_pagination
4613 = make_scoped_restore (&pagination_enabled, false);
4614
4615 /* Install a quit handler that does nothing if we have the terminal
4616 (meaning the target is running a background execution command),
4617 so that Ctrl-C never interrupts GDB before the event is fully
4618 handled. */
4619 scoped_restore restore_quit_handler
4620 = make_scoped_restore (&quit_handler, infrun_quit_handler);
4621
4622 /* Make sure a SIGINT does not interrupt an extension language while
4623 we're handling an event. That could interrupt a Python unwinder
4624 or a Python observer or some such. A Ctrl-C should either be
4625 forwarded to the inferior if the inferior has the terminal, or,
4626 if GDB has the terminal, should interrupt the command the user is
4627 typing in the CLI. */
4628 scoped_disable_cooperative_sigint_handling restore_coop_sigint;
4629
4630 /* End up with readline processing input, if necessary. */
4631 {
4632 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
4633
4634 /* We're handling a live event, so make sure we're doing live
4635 debugging. If we're looking at traceframes while the target is
4636 running, we're going to need to get back to that mode after
4637 handling the event. */
4638 std::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
4639 if (non_stop)
4640 {
4641 maybe_restore_traceframe.emplace ();
4642 set_current_traceframe (-1);
4643 }
4644
4645 /* The user/frontend should not notice a thread switch due to
4646 internal events. Make sure we revert to the user selected
4647 thread and frame after handling the event and running any
4648 breakpoint commands. */
4649 scoped_restore_current_thread restore_thread;
4650
4651 overlay_cache_invalid = 1;
4652 /* Flush target cache before starting to handle each event. Target
4653 was running and cache could be stale. This is just a heuristic.
4654 Running threads may modify target memory, but we don't get any
4655 event. */
4656 target_dcache_invalidate (current_program_space->aspace);
4657
4658 scoped_restore save_exec_dir
4659 = make_scoped_restore (&execution_direction,
4660 target_execution_direction ());
4661
4662 /* Allow targets to pause their resumed threads while we handle
4663 the event. */
4664 scoped_disable_commit_resumed disable_commit_resumed ("handling event");
4665
4666 /* Is the current thread performing an inferior function call as part
4667 of a breakpoint condition evaluation? */
4668 bool in_cond_eval = (inferior_ptid != null_ptid
4669 && inferior_thread ()->control.in_cond_eval);
4670
4671 /* If the thread is in the middle of the condition evaluation, wait for
4672 an event from the current thread. Otherwise, wait for an event from
4673 any thread. */
4674 ptid_t waiton_ptid = in_cond_eval ? inferior_ptid : minus_one_ptid;
4675
4676 if (!do_target_wait (waiton_ptid, &ecs, TARGET_WNOHANG))
4677 {
4678 infrun_debug_printf ("do_target_wait returned no event");
4679 disable_commit_resumed.reset_and_commit ();
4680 return;
4681 }
4682
4683 gdb_assert (ecs.ws.kind () != TARGET_WAITKIND_IGNORE);
4684
4685 /* Switch to the inferior that generated the event, so we can do
4686 target calls. If the event was not associated to a ptid, */
4687 if (ecs.ptid != null_ptid
4688 && ecs.ptid != minus_one_ptid)
4689 switch_to_inferior_no_thread (find_inferior_ptid (ecs.target, ecs.ptid));
4690 else
4691 switch_to_target_no_thread (ecs.target);
4692
4693 if (debug_infrun)
4694 print_target_wait_results (minus_one_ptid, ecs);
4695
4696 /* If an error happens while handling the event, propagate GDB's
4697 knowledge of the executing state to the frontend/user running
4698 state. */
4699 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs.ptid;
4700 scoped_finish_thread_state finish_state (ecs.target, finish_ptid);
4701
4702 /* Get executed before scoped_restore_current_thread above to apply
4703 still for the thread which has thrown the exception. */
4704 auto defer_bpstat_clear
4705 = make_scope_exit (bpstat_clear_actions);
4706 auto defer_delete_threads
4707 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
4708
4709 int stop_id = get_stop_id ();
4710
4711 /* Now figure out what to do with the result of the result. */
4712 handle_inferior_event (&ecs);
4713
4714 if (!ecs.wait_some_more)
4715 {
4716 struct inferior *inf = find_inferior_ptid (ecs.target, ecs.ptid);
4717 bool should_stop = true;
4718 struct thread_info *thr = ecs.event_thread;
4719
4720 delete_just_stopped_threads_infrun_breakpoints ();
4721
4722 if (thr != nullptr && thr->thread_fsm () != nullptr)
4723 should_stop = thr->thread_fsm ()->should_stop (thr);
4724
4725 if (!should_stop)
4726 {
4727 keep_going (&ecs);
4728 }
4729 else
4730 {
4731 bool should_notify_stop = true;
4732 bool proceeded = false;
4733
4734 /* If the thread that stopped just completed an inferior
4735 function call as part of a condition evaluation, then we
4736 don't want to stop all the other threads. */
4737 if (ecs.event_thread == nullptr
4738 || !ecs.event_thread->control.in_cond_eval)
4739 stop_all_threads_if_all_stop_mode ();
4740
4741 clean_up_just_stopped_threads_fsms (&ecs);
4742
4743 if (stop_id != get_stop_id ())
4744 {
4745 /* If the stop-id has changed then a stop has already been
4746 presented to the user in handle_inferior_event, this is
4747 likely a failed inferior call. As the stop has already
4748 been announced then we should not notify again.
4749
4750 Also, if the prompt state is not PROMPT_NEEDED then GDB
4751 will not be ready for user input after this function. */
4752 should_notify_stop = false;
4753 gdb_assert (current_ui->prompt_state == PROMPT_NEEDED);
4754 }
4755 else if (thr != nullptr && thr->thread_fsm () != nullptr)
4756 should_notify_stop
4757 = thr->thread_fsm ()->should_notify_stop ();
4758
4759 if (should_notify_stop)
4760 {
4761 /* We may not find an inferior if this was a process exit. */
4762 if (inf == nullptr || inf->control.stop_soon == NO_STOP_QUIETLY)
4763 proceeded = normal_stop ();
4764 }
4765
4766 if (!proceeded && !in_cond_eval)
4767 {
4768 inferior_event_handler (INF_EXEC_COMPLETE);
4769 cmd_done = 1;
4770 }
4771
4772 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4773 previously selected thread is gone. We have two
4774 choices - switch to no thread selected, or restore the
4775 previously selected thread (now exited). We chose the
4776 later, just because that's what GDB used to do. After
4777 this, "info threads" says "The current thread <Thread
4778 ID 2> has terminated." instead of "No thread
4779 selected.". */
4780 if (!non_stop
4781 && cmd_done
4782 && ecs.ws.kind () != TARGET_WAITKIND_NO_RESUMED)
4783 restore_thread.dont_restore ();
4784 }
4785 }
4786
4787 defer_delete_threads.release ();
4788 defer_bpstat_clear.release ();
4789
4790 /* No error, don't finish the thread states yet. */
4791 finish_state.release ();
4792
4793 disable_commit_resumed.reset_and_commit ();
4794
4795 /* This scope is used to ensure that readline callbacks are
4796 reinstalled here. */
4797 }
4798
4799 /* Handling this event might have caused some inferiors to become prunable.
4800 For example, the exit of an inferior that was automatically added. Try
4801 to get rid of them. Keeping those around slows down things linearly.
4802
4803 Note that this never removes the current inferior. Therefore, call this
4804 after RESTORE_THREAD went out of scope, in case the event inferior (which was
4805 temporarily made the current inferior) is meant to be deleted.
4806
4807 Call this before all_uis_check_sync_execution_done, so that notifications about
4808 removed inferiors appear before the prompt. */
4809 prune_inferiors ();
4810
4811 /* If a UI was in sync execution mode, and now isn't, restore its
4812 prompt (a synchronous execution command has finished, and we're
4813 ready for input). */
4814 all_uis_check_sync_execution_done ();
4815
4816 if (cmd_done
4817 && exec_done_display_p
4818 && (inferior_ptid == null_ptid
4819 || inferior_thread ()->state != THREAD_RUNNING))
4820 gdb_printf (_("completed.\n"));
4821}
4822
4823/* See infrun.h. */
4824
4825void
4826set_step_info (thread_info *tp, const frame_info_ptr &frame,
4827 struct symtab_and_line sal)
4828{
4829 /* This can be removed once this function no longer implicitly relies on the
4830 inferior_ptid value. */
4831 gdb_assert (inferior_ptid == tp->ptid);
4832
4833 tp->control.step_frame_id = get_frame_id (frame);
4834 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
4835
4836 tp->current_symtab = sal.symtab;
4837 tp->current_line = sal.line;
4838
4839 infrun_debug_printf
4840 ("symtab = %s, line = %d, step_frame_id = %s, step_stack_frame_id = %s",
4841 tp->current_symtab != nullptr ? tp->current_symtab->filename : "<null>",
4842 tp->current_line,
4843 tp->control.step_frame_id.to_string ().c_str (),
4844 tp->control.step_stack_frame_id.to_string ().c_str ());
4845}
4846
4847/* Clear context switchable stepping state. */
4848
4849void
4850init_thread_stepping_state (struct thread_info *tss)
4851{
4852 tss->stepped_breakpoint = 0;
4853 tss->stepping_over_breakpoint = 0;
4854 tss->stepping_over_watchpoint = 0;
4855 tss->step_after_step_resume_breakpoint = 0;
4856}
4857
4858/* See infrun.h. */
4859
4860void
4861set_last_target_status (process_stratum_target *target, ptid_t ptid,
4862 const target_waitstatus &status)
4863{
4864 target_last_proc_target = target;
4865 target_last_wait_ptid = ptid;
4866 target_last_waitstatus = status;
4867}
4868
4869/* See infrun.h. */
4870
4871void
4872get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4873 target_waitstatus *status)
4874{
4875 if (target != nullptr)
4876 *target = target_last_proc_target;
4877 if (ptid != nullptr)
4878 *ptid = target_last_wait_ptid;
4879 if (status != nullptr)
4880 *status = target_last_waitstatus;
4881}
4882
4883/* See infrun.h. */
4884
4885void
4886nullify_last_target_wait_ptid (void)
4887{
4888 target_last_proc_target = nullptr;
4889 target_last_wait_ptid = minus_one_ptid;
4890 target_last_waitstatus = {};
4891}
4892
4893/* Switch thread contexts. */
4894
4895static void
4896context_switch (execution_control_state *ecs)
4897{
4898 if (ecs->ptid != inferior_ptid
4899 && (inferior_ptid == null_ptid
4900 || ecs->event_thread != inferior_thread ()))
4901 {
4902 infrun_debug_printf ("Switching context from %s to %s",
4903 inferior_ptid.to_string ().c_str (),
4904 ecs->ptid.to_string ().c_str ());
4905 }
4906
4907 switch_to_thread (ecs->event_thread);
4908}
4909
4910/* If the target can't tell whether we've hit breakpoints
4911 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4912 check whether that could have been caused by a breakpoint. If so,
4913 adjust the PC, per gdbarch_decr_pc_after_break. */
4914
4915static void
4916adjust_pc_after_break (struct thread_info *thread,
4917 const target_waitstatus &ws)
4918{
4919 struct regcache *regcache;
4920 struct gdbarch *gdbarch;
4921 CORE_ADDR breakpoint_pc, decr_pc;
4922
4923 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4924 we aren't, just return.
4925
4926 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4927 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4928 implemented by software breakpoints should be handled through the normal
4929 breakpoint layer.
4930
4931 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4932 different signals (SIGILL or SIGEMT for instance), but it is less
4933 clear where the PC is pointing afterwards. It may not match
4934 gdbarch_decr_pc_after_break. I don't know any specific target that
4935 generates these signals at breakpoints (the code has been in GDB since at
4936 least 1992) so I can not guess how to handle them here.
4937
4938 In earlier versions of GDB, a target with
4939 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4940 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4941 target with both of these set in GDB history, and it seems unlikely to be
4942 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4943
4944 if (ws.kind () != TARGET_WAITKIND_STOPPED)
4945 return;
4946
4947 if (ws.sig () != GDB_SIGNAL_TRAP)
4948 return;
4949
4950 /* In reverse execution, when a breakpoint is hit, the instruction
4951 under it has already been de-executed. The reported PC always
4952 points at the breakpoint address, so adjusting it further would
4953 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4954 architecture:
4955
4956 B1 0x08000000 : INSN1
4957 B2 0x08000001 : INSN2
4958 0x08000002 : INSN3
4959 PC -> 0x08000003 : INSN4
4960
4961 Say you're stopped at 0x08000003 as above. Reverse continuing
4962 from that point should hit B2 as below. Reading the PC when the
4963 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4964 been de-executed already.
4965
4966 B1 0x08000000 : INSN1
4967 B2 PC -> 0x08000001 : INSN2
4968 0x08000002 : INSN3
4969 0x08000003 : INSN4
4970
4971 We can't apply the same logic as for forward execution, because
4972 we would wrongly adjust the PC to 0x08000000, since there's a
4973 breakpoint at PC - 1. We'd then report a hit on B1, although
4974 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4975 behavior. */
4976 if (execution_direction == EXEC_REVERSE)
4977 return;
4978
4979 /* If the target can tell whether the thread hit a SW breakpoint,
4980 trust it. Targets that can tell also adjust the PC
4981 themselves. */
4982 if (target_supports_stopped_by_sw_breakpoint ())
4983 return;
4984
4985 /* Note that relying on whether a breakpoint is planted in memory to
4986 determine this can fail. E.g,. the breakpoint could have been
4987 removed since. Or the thread could have been told to step an
4988 instruction the size of a breakpoint instruction, and only
4989 _after_ was a breakpoint inserted at its address. */
4990
4991 /* If this target does not decrement the PC after breakpoints, then
4992 we have nothing to do. */
4993 regcache = get_thread_regcache (thread);
4994 gdbarch = regcache->arch ();
4995
4996 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4997 if (decr_pc == 0)
4998 return;
4999
5000 const address_space *aspace = thread->inf->aspace.get ();
5001
5002 /* Find the location where (if we've hit a breakpoint) the
5003 breakpoint would be. */
5004 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
5005
5006 /* If the target can't tell whether a software breakpoint triggered,
5007 fallback to figuring it out based on breakpoints we think were
5008 inserted in the target, and on whether the thread was stepped or
5009 continued. */
5010
5011 /* Check whether there actually is a software breakpoint inserted at
5012 that location.
5013
5014 If in non-stop mode, a race condition is possible where we've
5015 removed a breakpoint, but stop events for that breakpoint were
5016 already queued and arrive later. To suppress those spurious
5017 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
5018 and retire them after a number of stop events are reported. Note
5019 this is an heuristic and can thus get confused. The real fix is
5020 to get the "stopped by SW BP and needs adjustment" info out of
5021 the target/kernel (and thus never reach here; see above). */
5022 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
5023 || (target_is_non_stop_p ()
5024 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
5025 {
5026 std::optional<scoped_restore_tmpl<int>> restore_operation_disable;
5027
5028 if (record_full_is_used ())
5029 restore_operation_disable.emplace
5030 (record_full_gdb_operation_disable_set ());
5031
5032 /* When using hardware single-step, a SIGTRAP is reported for both
5033 a completed single-step and a software breakpoint. Need to
5034 differentiate between the two, as the latter needs adjusting
5035 but the former does not.
5036
5037 The SIGTRAP can be due to a completed hardware single-step only if
5038 - we didn't insert software single-step breakpoints
5039 - this thread is currently being stepped
5040
5041 If any of these events did not occur, we must have stopped due
5042 to hitting a software breakpoint, and have to back up to the
5043 breakpoint address.
5044
5045 As a special case, we could have hardware single-stepped a
5046 software breakpoint. In this case (prev_pc == breakpoint_pc),
5047 we also need to back up to the breakpoint address. */
5048
5049 if (thread_has_single_step_breakpoints_set (thread)
5050 || !currently_stepping (thread)
5051 || (thread->stepped_breakpoint
5052 && thread->prev_pc == breakpoint_pc))
5053 regcache_write_pc (regcache, breakpoint_pc);
5054 }
5055}
5056
5057static bool
5058stepped_in_from (const frame_info_ptr &initial_frame, frame_id step_frame_id)
5059{
5060 frame_info_ptr frame = initial_frame;
5061
5062 for (frame = get_prev_frame (frame);
5063 frame != nullptr;
5064 frame = get_prev_frame (frame))
5065 {
5066 if (get_frame_id (frame) == step_frame_id)
5067 return true;
5068
5069 if (get_frame_type (frame) != INLINE_FRAME)
5070 break;
5071 }
5072
5073 return false;
5074}
5075
5076/* Look for an inline frame that is marked for skip.
5077 If PREV_FRAME is TRUE start at the previous frame,
5078 otherwise start at the current frame. Stop at the
5079 first non-inline frame, or at the frame where the
5080 step started. */
5081
5082static bool
5083inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
5084{
5085 frame_info_ptr frame = get_current_frame ();
5086
5087 if (prev_frame)
5088 frame = get_prev_frame (frame);
5089
5090 for (; frame != nullptr; frame = get_prev_frame (frame))
5091 {
5092 const char *fn = nullptr;
5093 symtab_and_line sal;
5094 struct symbol *sym;
5095
5096 if (get_frame_id (frame) == tp->control.step_frame_id)
5097 break;
5098 if (get_frame_type (frame) != INLINE_FRAME)
5099 break;
5100
5101 sal = find_frame_sal (frame);
5102 sym = get_frame_function (frame);
5103
5104 if (sym != nullptr)
5105 fn = sym->print_name ();
5106
5107 if (sal.line != 0
5108 && function_name_is_marked_for_skip (fn, sal))
5109 return true;
5110 }
5111
5112 return false;
5113}
5114
5115/* If the event thread has the stop requested flag set, pretend it
5116 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
5117 target_stop). */
5118
5119static bool
5120handle_stop_requested (struct execution_control_state *ecs)
5121{
5122 if (ecs->event_thread->stop_requested)
5123 {
5124 ecs->ws.set_stopped (GDB_SIGNAL_0);
5125 handle_signal_stop (ecs);
5126 return true;
5127 }
5128 return false;
5129}
5130
5131/* Auxiliary function that handles syscall entry/return events.
5132 It returns true if the inferior should keep going (and GDB
5133 should ignore the event), or false if the event deserves to be
5134 processed. */
5135
5136static bool
5137handle_syscall_event (struct execution_control_state *ecs)
5138{
5139 struct regcache *regcache;
5140 int syscall_number;
5141
5142 context_switch (ecs);
5143
5144 regcache = get_thread_regcache (ecs->event_thread);
5145 syscall_number = ecs->ws.syscall_number ();
5146 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
5147
5148 if (catch_syscall_enabled ()
5149 && catching_syscall_number (syscall_number))
5150 {
5151 infrun_debug_printf ("syscall number=%d", syscall_number);
5152
5153 ecs->event_thread->control.stop_bpstat
5154 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
5155 ecs->event_thread->stop_pc (),
5156 ecs->event_thread, ecs->ws);
5157
5158 if (handle_stop_requested (ecs))
5159 return false;
5160
5161 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5162 {
5163 /* Catchpoint hit. */
5164 return false;
5165 }
5166 }
5167
5168 if (handle_stop_requested (ecs))
5169 return false;
5170
5171 /* If no catchpoint triggered for this, then keep going. */
5172 keep_going (ecs);
5173
5174 return true;
5175}
5176
5177/* Lazily fill in the execution_control_state's stop_func_* fields. */
5178
5179static void
5180fill_in_stop_func (struct gdbarch *gdbarch,
5181 struct execution_control_state *ecs)
5182{
5183 if (!ecs->stop_func_filled_in)
5184 {
5185 const block *block;
5186 const general_symbol_info *gsi;
5187
5188 /* Don't care about return value; stop_func_start and stop_func_name
5189 will both be 0 if it doesn't work. */
5190 find_pc_partial_function_sym (ecs->event_thread->stop_pc (),
5191 &gsi,
5192 &ecs->stop_func_start,
5193 &ecs->stop_func_end,
5194 &block);
5195 ecs->stop_func_name = gsi == nullptr ? nullptr : gsi->print_name ();
5196
5197 /* The call to find_pc_partial_function, above, will set
5198 stop_func_start and stop_func_end to the start and end
5199 of the range containing the stop pc. If this range
5200 contains the entry pc for the block (which is always the
5201 case for contiguous blocks), advance stop_func_start past
5202 the function's start offset and entrypoint. Note that
5203 stop_func_start is NOT advanced when in a range of a
5204 non-contiguous block that does not contain the entry pc. */
5205 if (block != nullptr
5206 && ecs->stop_func_start <= block->entry_pc ()
5207 && block->entry_pc () < ecs->stop_func_end)
5208 {
5209 ecs->stop_func_start
5210 += gdbarch_deprecated_function_start_offset (gdbarch);
5211
5212 /* PowerPC functions have a Local Entry Point (LEP) and a Global
5213 Entry Point (GEP). There is only one Entry Point (GEP = LEP) for
5214 other architectures. */
5215 ecs->stop_func_alt_start = ecs->stop_func_start;
5216
5217 if (gdbarch_skip_entrypoint_p (gdbarch))
5218 ecs->stop_func_start
5219 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
5220 }
5221
5222 ecs->stop_func_filled_in = 1;
5223 }
5224}
5225
5226
5227/* Return the STOP_SOON field of the inferior pointed at by ECS. */
5228
5229static enum stop_kind
5230get_inferior_stop_soon (execution_control_state *ecs)
5231{
5232 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5233
5234 gdb_assert (inf != nullptr);
5235 return inf->control.stop_soon;
5236}
5237
5238/* Poll for one event out of the current target. Store the resulting
5239 waitstatus in WS, and return the event ptid. Does not block. */
5240
5241static ptid_t
5242poll_one_curr_target (struct target_waitstatus *ws)
5243{
5244 ptid_t event_ptid;
5245
5246 overlay_cache_invalid = 1;
5247
5248 /* Flush target cache before starting to handle each event.
5249 Target was running and cache could be stale. This is just a
5250 heuristic. Running threads may modify target memory, but we
5251 don't get any event. */
5252 target_dcache_invalidate (current_program_space->aspace);
5253
5254 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
5255
5256 if (debug_infrun)
5257 print_target_wait_results (minus_one_ptid, event_ptid, *ws,
5258 current_inferior ()->process_target ());
5259
5260 return event_ptid;
5261}
5262
5263/* Wait for one event out of any target. */
5264
5265static wait_one_event
5266wait_one ()
5267{
5268 while (1)
5269 {
5270 for (inferior *inf : all_inferiors ())
5271 {
5272 process_stratum_target *target = inf->process_target ();
5273 if (target == nullptr
5274 || !target->is_async_p ()
5275 || !target->threads_executing)
5276 continue;
5277
5278 switch_to_inferior_no_thread (inf);
5279
5280 wait_one_event event;
5281 event.target = target;
5282 event.ptid = poll_one_curr_target (&event.ws);
5283
5284 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5285 {
5286 /* If nothing is resumed, remove the target from the
5287 event loop. */
5288 target_async (false);
5289 }
5290 else if (event.ws.kind () != TARGET_WAITKIND_IGNORE)
5291 return event;
5292 }
5293
5294 /* Block waiting for some event. */
5295
5296 fd_set readfds;
5297 int nfds = 0;
5298
5299 FD_ZERO (&readfds);
5300
5301 for (inferior *inf : all_inferiors ())
5302 {
5303 process_stratum_target *target = inf->process_target ();
5304 if (target == nullptr
5305 || !target->is_async_p ()
5306 || !target->threads_executing)
5307 continue;
5308
5309 int fd = target->async_wait_fd ();
5310 FD_SET (fd, &readfds);
5311 if (nfds <= fd)
5312 nfds = fd + 1;
5313 }
5314
5315 if (nfds == 0)
5316 {
5317 /* No waitable targets left. All must be stopped. */
5318 infrun_debug_printf ("no waitable targets left");
5319
5320 target_waitstatus ws;
5321 ws.set_no_resumed ();
5322 return {nullptr, minus_one_ptid, std::move (ws)};
5323 }
5324
5325 QUIT;
5326
5327 int numfds = interruptible_select (nfds, &readfds, 0, nullptr, 0);
5328 if (numfds < 0)
5329 {
5330 if (errno == EINTR)
5331 continue;
5332 else
5333 perror_with_name ("interruptible_select");
5334 }
5335 }
5336}
5337
5338/* Save the thread's event and stop reason to process it later. */
5339
5340static void
5341save_waitstatus (struct thread_info *tp, const target_waitstatus &ws)
5342{
5343 infrun_debug_printf ("saving status %s for %s",
5344 ws.to_string ().c_str (),
5345 tp->ptid.to_string ().c_str ());
5346
5347 /* Record for later. */
5348 tp->set_pending_waitstatus (ws);
5349
5350 if (ws.kind () == TARGET_WAITKIND_STOPPED
5351 && ws.sig () == GDB_SIGNAL_TRAP)
5352 {
5353 struct regcache *regcache = get_thread_regcache (tp);
5354 const address_space *aspace = tp->inf->aspace.get ();
5355 CORE_ADDR pc = regcache_read_pc (regcache);
5356
5357 adjust_pc_after_break (tp, tp->pending_waitstatus ());
5358
5359 scoped_restore_current_thread restore_thread;
5360 switch_to_thread (tp);
5361
5362 if (target_stopped_by_watchpoint ())
5363 tp->set_stop_reason (TARGET_STOPPED_BY_WATCHPOINT);
5364 else if (target_supports_stopped_by_sw_breakpoint ()
5365 && target_stopped_by_sw_breakpoint ())
5366 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
5367 else if (target_supports_stopped_by_hw_breakpoint ()
5368 && target_stopped_by_hw_breakpoint ())
5369 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
5370 else if (!target_supports_stopped_by_hw_breakpoint ()
5371 && hardware_breakpoint_inserted_here_p (aspace, pc))
5372 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
5373 else if (!target_supports_stopped_by_sw_breakpoint ()
5374 && software_breakpoint_inserted_here_p (aspace, pc))
5375 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
5376 else if (!thread_has_single_step_breakpoints_set (tp)
5377 && currently_stepping (tp))
5378 tp->set_stop_reason (TARGET_STOPPED_BY_SINGLE_STEP);
5379 }
5380}
5381
5382/* Mark the non-executing threads accordingly. In all-stop, all
5383 threads of all processes are stopped when we get any event
5384 reported. In non-stop mode, only the event thread stops. */
5385
5386static void
5387mark_non_executing_threads (process_stratum_target *target,
5388 ptid_t event_ptid,
5389 const target_waitstatus &ws)
5390{
5391 ptid_t mark_ptid;
5392
5393 if (!target_is_non_stop_p ())
5394 mark_ptid = minus_one_ptid;
5395 else if (ws.kind () == TARGET_WAITKIND_SIGNALLED
5396 || ws.kind () == TARGET_WAITKIND_EXITED)
5397 {
5398 /* If we're handling a process exit in non-stop mode, even
5399 though threads haven't been deleted yet, one would think
5400 that there is nothing to do, as threads of the dead process
5401 will be soon deleted, and threads of any other process were
5402 left running. However, on some targets, threads survive a
5403 process exit event. E.g., for the "checkpoint" command,
5404 when the current checkpoint/fork exits, linux-fork.c
5405 automatically switches to another fork from within
5406 target_mourn_inferior, by associating the same
5407 inferior/thread to another fork. We haven't mourned yet at
5408 this point, but we must mark any threads left in the
5409 process as not-executing so that finish_thread_state marks
5410 them stopped (in the user's perspective) if/when we present
5411 the stop to the user. */
5412 mark_ptid = ptid_t (event_ptid.pid ());
5413 }
5414 else
5415 mark_ptid = event_ptid;
5416
5417 set_executing (target, mark_ptid, false);
5418
5419 /* Likewise the resumed flag. */
5420 set_resumed (target, mark_ptid, false);
5421}
5422
5423/* Handle one event after stopping threads. If the eventing thread
5424 reports back any interesting event, we leave it pending. If the
5425 eventing thread was in the middle of a displaced step, we
5426 cancel/finish it, and unless the thread's inferior is being
5427 detached, put the thread back in the step-over chain. Returns true
5428 if there are no resumed threads left in the target (thus there's no
5429 point in waiting further), false otherwise. */
5430
5431static bool
5432handle_one (const wait_one_event &event)
5433{
5434 infrun_debug_printf
5435 ("%s %s", event.ws.to_string ().c_str (),
5436 event.ptid.to_string ().c_str ());
5437
5438 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5439 {
5440 /* All resumed threads exited. */
5441 return true;
5442 }
5443 else if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED
5444 || event.ws.kind () == TARGET_WAITKIND_EXITED
5445 || event.ws.kind () == TARGET_WAITKIND_SIGNALLED)
5446 {
5447 /* One thread/process exited/signalled. */
5448
5449 thread_info *t = nullptr;
5450
5451 /* The target may have reported just a pid. If so, try
5452 the first non-exited thread. */
5453 if (event.ptid.is_pid ())
5454 {
5455 int pid = event.ptid.pid ();
5456 inferior *inf = find_inferior_pid (event.target, pid);
5457 for (thread_info *tp : inf->non_exited_threads ())
5458 {
5459 t = tp;
5460 break;
5461 }
5462
5463 /* If there is no available thread, the event would
5464 have to be appended to a per-inferior event list,
5465 which does not exist (and if it did, we'd have
5466 to adjust run control command to be able to
5467 resume such an inferior). We assert here instead
5468 of going into an infinite loop. */
5469 gdb_assert (t != nullptr);
5470
5471 infrun_debug_printf
5472 ("using %s", t->ptid.to_string ().c_str ());
5473 }
5474 else
5475 {
5476 t = event.target->find_thread (event.ptid);
5477 /* Check if this is the first time we see this thread.
5478 Don't bother adding if it individually exited. */
5479 if (t == nullptr
5480 && event.ws.kind () != TARGET_WAITKIND_THREAD_EXITED)
5481 t = add_thread (event.target, event.ptid);
5482 }
5483
5484 if (t != nullptr)
5485 {
5486 /* Set the threads as non-executing to avoid
5487 another stop attempt on them. */
5488 switch_to_thread_no_regs (t);
5489 mark_non_executing_threads (event.target, event.ptid,
5490 event.ws);
5491 save_waitstatus (t, event.ws);
5492 t->stop_requested = false;
5493
5494 if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED)
5495 {
5496 if (displaced_step_finish (t, event.ws)
5497 != DISPLACED_STEP_FINISH_STATUS_OK)
5498 {
5499 gdb_assert_not_reached ("displaced_step_finish on "
5500 "exited thread failed");
5501 }
5502 }
5503 }
5504 }
5505 else
5506 {
5507 thread_info *t = event.target->find_thread (event.ptid);
5508 if (t == nullptr)
5509 t = add_thread (event.target, event.ptid);
5510
5511 t->stop_requested = false;
5512 t->set_executing (false);
5513 t->set_resumed (false);
5514 t->control.may_range_step = 0;
5515
5516 /* This may be the first time we see the inferior report
5517 a stop. */
5518 if (t->inf->needs_setup)
5519 {
5520 switch_to_thread_no_regs (t);
5521 setup_inferior (0);
5522 }
5523
5524 if (event.ws.kind () == TARGET_WAITKIND_STOPPED
5525 && event.ws.sig () == GDB_SIGNAL_0)
5526 {
5527 /* We caught the event that we intended to catch, so
5528 there's no event to save as pending. */
5529
5530 if (displaced_step_finish (t, event.ws)
5531 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5532 {
5533 /* Add it back to the step-over queue. */
5534 infrun_debug_printf
5535 ("displaced-step of %s canceled",
5536 t->ptid.to_string ().c_str ());
5537
5538 t->control.trap_expected = 0;
5539 if (!t->inf->detaching)
5540 global_thread_step_over_chain_enqueue (t);
5541 }
5542 }
5543 else
5544 {
5545 struct regcache *regcache;
5546
5547 infrun_debug_printf
5548 ("target_wait %s, saving status for %s",
5549 event.ws.to_string ().c_str (),
5550 t->ptid.to_string ().c_str ());
5551
5552 /* Record for later. */
5553 save_waitstatus (t, event.ws);
5554
5555 if (displaced_step_finish (t, event.ws)
5556 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5557 {
5558 /* Add it back to the step-over queue. */
5559 t->control.trap_expected = 0;
5560 if (!t->inf->detaching)
5561 global_thread_step_over_chain_enqueue (t);
5562 }
5563
5564 regcache = get_thread_regcache (t);
5565 t->set_stop_pc (regcache_read_pc (regcache));
5566
5567 infrun_debug_printf ("saved stop_pc=%s for %s "
5568 "(currently_stepping=%d)",
5569 paddress (current_inferior ()->arch (),
5570 t->stop_pc ()),
5571 t->ptid.to_string ().c_str (),
5572 currently_stepping (t));
5573 }
5574 }
5575
5576 return false;
5577}
5578
5579/* Helper for stop_all_threads. wait_one waits for events until it
5580 sees a TARGET_WAITKIND_NO_RESUMED event. When it sees one, it
5581 disables target_async for the target to stop waiting for events
5582 from it. TARGET_WAITKIND_NO_RESUMED can be delayed though,
5583 consider, debugging against gdbserver:
5584
5585 #1 - Threads 1-5 are running, and thread 1 hits a breakpoint.
5586
5587 #2 - gdb processes the breakpoint hit for thread 1, stops all
5588 threads, and steps thread 1 over the breakpoint. while
5589 stopping threads, some other threads reported interesting
5590 events, which were left pending in the thread's objects
5591 (infrun's queue).
5592
5593 #2 - Thread 1 exits (it stepped an exit syscall), and gdbserver
5594 reports the thread exit for thread 1. The event ends up in
5595 remote's stop reply queue.
5596
5597 #3 - That was the last resumed thread, so gdbserver reports
5598 no-resumed, and that event also ends up in remote's stop
5599 reply queue, queued after the thread exit from #2.
5600
5601 #4 - gdb processes the thread exit event, which finishes the
5602 step-over, and so gdb restarts all threads (threads with
5603 pending events are left marked resumed, but aren't set
5604 executing). The no-resumed event is still left pending in
5605 the remote stop reply queue.
5606
5607 #5 - Since there are now resumed threads with pending breakpoint
5608 hits, gdb picks one at random to process next.
5609
5610 #5 - gdb picks the breakpoint hit for thread 2 this time, and that
5611 breakpoint also needs to be stepped over, so gdb stops all
5612 threads again.
5613
5614 #6 - stop_all_threads counts number of expected stops and calls
5615 wait_one once for each.
5616
5617 #7 - The first wait_one call collects the no-resumed event from #3
5618 above.
5619
5620 #9 - Seeing the no-resumed event, wait_one disables target async
5621 for the remote target, to stop waiting for events from it.
5622 wait_one from here on always return no-resumed directly
5623 without reaching the target.
5624
5625 #10 - stop_all_threads still hasn't seen all the stops it expects,
5626 so it does another pass.
5627
5628 #11 - Since the remote target is not async (disabled in #9),
5629 wait_one doesn't wait on it, so it won't see the expected
5630 stops, and instead returns no-resumed directly.
5631
5632 #12 - stop_all_threads still haven't seen all the stops, so it
5633 does another pass. goto #11, looping forever.
5634
5635 To handle this, we explicitly (re-)enable target async on all
5636 targets that can async every time stop_all_threads goes wait for
5637 the expected stops. */
5638
5639static void
5640reenable_target_async ()
5641{
5642 for (inferior *inf : all_inferiors ())
5643 {
5644 process_stratum_target *target = inf->process_target ();
5645 if (target != nullptr
5646 && target->threads_executing
5647 && target->can_async_p ()
5648 && !target->is_async_p ())
5649 {
5650 switch_to_inferior_no_thread (inf);
5651 target_async (1);
5652 }
5653 }
5654}
5655
5656/* See infrun.h. */
5657
5658void
5659stop_all_threads (const char *reason, inferior *inf)
5660{
5661 /* We may need multiple passes to discover all threads. */
5662 int pass;
5663 int iterations = 0;
5664
5665 gdb_assert (exists_non_stop_target ());
5666
5667 INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason,
5668 inf != nullptr ? inf->num : -1);
5669
5670 infrun_debug_show_threads ("non-exited threads",
5671 all_non_exited_threads ());
5672
5673 scoped_restore_current_thread restore_thread;
5674
5675 /* Enable thread events on relevant targets. */
5676 for (auto *target : all_non_exited_process_targets ())
5677 {
5678 if (inf != nullptr && inf->process_target () != target)
5679 continue;
5680
5681 switch_to_target_no_thread (target);
5682 target_thread_events (true);
5683 }
5684
5685 SCOPE_EXIT
5686 {
5687 /* Disable thread events on relevant targets. */
5688 for (auto *target : all_non_exited_process_targets ())
5689 {
5690 if (inf != nullptr && inf->process_target () != target)
5691 continue;
5692
5693 switch_to_target_no_thread (target);
5694 target_thread_events (false);
5695 }
5696
5697 /* Use debug_prefixed_printf directly to get a meaningful function
5698 name. */
5699 if (debug_infrun)
5700 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
5701 };
5702
5703 /* Request threads to stop, and then wait for the stops. Because
5704 threads we already know about can spawn more threads while we're
5705 trying to stop them, and we only learn about new threads when we
5706 update the thread list, do this in a loop, and keep iterating
5707 until two passes find no threads that need to be stopped. */
5708 for (pass = 0; pass < 2; pass++, iterations++)
5709 {
5710 infrun_debug_printf ("pass=%d, iterations=%d", pass, iterations);
5711 while (1)
5712 {
5713 int waits_needed = 0;
5714
5715 for (auto *target : all_non_exited_process_targets ())
5716 {
5717 if (inf != nullptr && inf->process_target () != target)
5718 continue;
5719
5720 switch_to_target_no_thread (target);
5721 update_thread_list ();
5722 }
5723
5724 /* Go through all threads looking for threads that we need
5725 to tell the target to stop. */
5726 for (thread_info *t : all_non_exited_threads ())
5727 {
5728 if (inf != nullptr && t->inf != inf)
5729 continue;
5730
5731 /* For a single-target setting with an all-stop target,
5732 we would not even arrive here. For a multi-target
5733 setting, until GDB is able to handle a mixture of
5734 all-stop and non-stop targets, simply skip all-stop
5735 targets' threads. This should be fine due to the
5736 protection of 'check_multi_target_resumption'. */
5737
5738 switch_to_thread_no_regs (t);
5739 if (!target_is_non_stop_p ())
5740 continue;
5741
5742 if (t->executing ())
5743 {
5744 /* If already stopping, don't request a stop again.
5745 We just haven't seen the notification yet. */
5746 if (!t->stop_requested)
5747 {
5748 infrun_debug_printf (" %s executing, need stop",
5749 t->ptid.to_string ().c_str ());
5750 target_stop (t->ptid);
5751 t->stop_requested = true;
5752 }
5753 else
5754 {
5755 infrun_debug_printf (" %s executing, already stopping",
5756 t->ptid.to_string ().c_str ());
5757 }
5758
5759 if (t->stop_requested)
5760 waits_needed++;
5761 }
5762 else
5763 {
5764 infrun_debug_printf (" %s not executing",
5765 t->ptid.to_string ().c_str ());
5766
5767 /* The thread may be not executing, but still be
5768 resumed with a pending status to process. */
5769 t->set_resumed (false);
5770 }
5771 }
5772
5773 if (waits_needed == 0)
5774 break;
5775
5776 /* If we find new threads on the second iteration, restart
5777 over. We want to see two iterations in a row with all
5778 threads stopped. */
5779 if (pass > 0)
5780 pass = -1;
5781
5782 reenable_target_async ();
5783
5784 for (int i = 0; i < waits_needed; i++)
5785 {
5786 wait_one_event event = wait_one ();
5787 if (handle_one (event))
5788 break;
5789 }
5790 }
5791 }
5792}
5793
5794/* Handle a TARGET_WAITKIND_NO_RESUMED event. Return true if we
5795 handled the event and should continue waiting. Return false if we
5796 should stop and report the event to the user. */
5797
5798static bool
5799handle_no_resumed (struct execution_control_state *ecs)
5800{
5801 if (target_can_async_p ())
5802 {
5803 bool any_sync = false;
5804
5805 for (ui *ui : all_uis ())
5806 {
5807 if (ui->prompt_state == PROMPT_BLOCKED)
5808 {
5809 any_sync = true;
5810 break;
5811 }
5812 }
5813 if (!any_sync)
5814 {
5815 /* There were no unwaited-for children left in the target, but,
5816 we're not synchronously waiting for events either. Just
5817 ignore. */
5818
5819 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
5820 prepare_to_wait (ecs);
5821 return true;
5822 }
5823 }
5824
5825 /* Otherwise, if we were running a synchronous execution command, we
5826 may need to cancel it and give the user back the terminal.
5827
5828 In non-stop mode, the target can't tell whether we've already
5829 consumed previous stop events, so it can end up sending us a
5830 no-resumed event like so:
5831
5832 #0 - thread 1 is left stopped
5833
5834 #1 - thread 2 is resumed and hits breakpoint
5835 -> TARGET_WAITKIND_STOPPED
5836
5837 #2 - thread 3 is resumed and exits
5838 this is the last resumed thread, so
5839 -> TARGET_WAITKIND_NO_RESUMED
5840
5841 #3 - gdb processes stop for thread 2 and decides to re-resume
5842 it.
5843
5844 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
5845 thread 2 is now resumed, so the event should be ignored.
5846
5847 IOW, if the stop for thread 2 doesn't end a foreground command,
5848 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5849 event. But it could be that the event meant that thread 2 itself
5850 (or whatever other thread was the last resumed thread) exited.
5851
5852 To address this we refresh the thread list and check whether we
5853 have resumed threads _now_. In the example above, this removes
5854 thread 3 from the thread list. If thread 2 was re-resumed, we
5855 ignore this event. If we find no thread resumed, then we cancel
5856 the synchronous command and show "no unwaited-for " to the
5857 user. */
5858
5859 inferior *curr_inf = current_inferior ();
5860
5861 scoped_restore_current_thread restore_thread;
5862 update_thread_list ();
5863
5864 /* If:
5865
5866 - the current target has no thread executing, and
5867 - the current inferior is native, and
5868 - the current inferior is the one which has the terminal, and
5869 - we did nothing,
5870
5871 then a Ctrl-C from this point on would remain stuck in the
5872 kernel, until a thread resumes and dequeues it. That would
5873 result in the GDB CLI not reacting to Ctrl-C, not able to
5874 interrupt the program. To address this, if the current inferior
5875 no longer has any thread executing, we give the terminal to some
5876 other inferior that has at least one thread executing. */
5877 bool swap_terminal = true;
5878
5879 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5880 whether to report it to the user. */
5881 bool ignore_event = false;
5882
5883 for (thread_info *thread : all_non_exited_threads ())
5884 {
5885 if (swap_terminal && thread->executing ())
5886 {
5887 if (thread->inf != curr_inf)
5888 {
5889 target_terminal::ours ();
5890
5891 switch_to_thread (thread);
5892 target_terminal::inferior ();
5893 }
5894 swap_terminal = false;
5895 }
5896
5897 if (!ignore_event && thread->resumed ())
5898 {
5899 /* Either there were no unwaited-for children left in the
5900 target at some point, but there are now, or some target
5901 other than the eventing one has unwaited-for children
5902 left. Just ignore. */
5903 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5904 "(ignoring: found resumed)");
5905
5906 ignore_event = true;
5907 }
5908
5909 if (ignore_event && !swap_terminal)
5910 break;
5911 }
5912
5913 if (ignore_event)
5914 {
5915 switch_to_inferior_no_thread (curr_inf);
5916 prepare_to_wait (ecs);
5917 return true;
5918 }
5919
5920 /* Go ahead and report the event. */
5921 return false;
5922}
5923
5924/* Handle a TARGET_WAITKIND_THREAD_EXITED event. Return true if we
5925 handled the event and should continue waiting. Return false if we
5926 should stop and report the event to the user. */
5927
5928static bool
5929handle_thread_exited (execution_control_state *ecs)
5930{
5931 context_switch (ecs);
5932
5933 /* Clear these so we don't re-start the thread stepping over a
5934 breakpoint/watchpoint. */
5935 ecs->event_thread->stepping_over_breakpoint = 0;
5936 ecs->event_thread->stepping_over_watchpoint = 0;
5937
5938 /* If the thread had an FSM, then abort the command. But only after
5939 finishing the step over, as in non-stop mode, aborting this
5940 thread's command should not interfere with other threads. We
5941 must check this before finish_step over, however, which may
5942 update the thread list and delete the event thread. */
5943 bool abort_cmd = (ecs->event_thread->thread_fsm () != nullptr);
5944
5945 /* Mark the thread exited right now, because finish_step_over may
5946 update the thread list and that may delete the thread silently
5947 (depending on target), while we always want to emit the "[Thread
5948 ... exited]" notification. Don't actually delete the thread yet,
5949 because we need to pass its pointer down to finish_step_over. */
5950 set_thread_exited (ecs->event_thread);
5951
5952 /* Maybe the thread was doing a step-over, if so release
5953 resources and start any further pending step-overs.
5954
5955 If we are on a non-stop target and the thread was doing an
5956 in-line step, this also restarts the other threads. */
5957 int ret = finish_step_over (ecs);
5958
5959 /* finish_step_over returns true if it moves ecs' wait status
5960 back into the thread, so that we go handle another pending
5961 event before this one. But we know it never does that if
5962 the event thread has exited. */
5963 gdb_assert (ret == 0);
5964
5965 if (abort_cmd)
5966 {
5967 /* We're stopping for the thread exit event. Switch to the
5968 event thread again, as finish_step_over may have switched
5969 threads. */
5970 switch_to_thread (ecs->event_thread);
5971 ecs->event_thread = nullptr;
5972 return false;
5973 }
5974
5975 /* If finish_step_over started a new in-line step-over, don't
5976 try to restart anything else. */
5977 if (step_over_info_valid_p ())
5978 {
5979 delete_thread (ecs->event_thread);
5980 return true;
5981 }
5982
5983 /* Maybe we are on an all-stop target and we got this event
5984 while doing a step-like command on another thread. If so,
5985 go back to doing that. If this thread was stepping,
5986 switch_back_to_stepped_thread will consider that the thread
5987 was interrupted mid-step and will try keep stepping it. We
5988 don't want that, the thread is gone. So clear the proceed
5989 status so it doesn't do that. */
5990 clear_proceed_status_thread (ecs->event_thread);
5991 if (switch_back_to_stepped_thread (ecs))
5992 {
5993 delete_thread (ecs->event_thread);
5994 return true;
5995 }
5996
5997 inferior *inf = ecs->event_thread->inf;
5998 bool slock_applies = schedlock_applies (ecs->event_thread);
5999
6000 delete_thread (ecs->event_thread);
6001 ecs->event_thread = nullptr;
6002
6003 /* Continue handling the event as if we had gotten a
6004 TARGET_WAITKIND_NO_RESUMED. */
6005 auto handle_as_no_resumed = [ecs] ()
6006 {
6007 /* handle_no_resumed doesn't really look at the event kind, but
6008 normal_stop does. */
6009 ecs->ws.set_no_resumed ();
6010 ecs->event_thread = nullptr;
6011 ecs->ptid = minus_one_ptid;
6012
6013 /* Re-record the last target status. */
6014 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
6015
6016 return handle_no_resumed (ecs);
6017 };
6018
6019 /* If we are on an all-stop target, the target has stopped all
6020 threads to report the event. We don't actually want to
6021 stop, so restart the threads. */
6022 if (!target_is_non_stop_p ())
6023 {
6024 if (slock_applies)
6025 {
6026 /* Since the target is !non-stop, then everything is stopped
6027 at this point, and we can't assume we'll get further
6028 events until we resume the target again. Handle this
6029 event like if it were a TARGET_WAITKIND_NO_RESUMED. Note
6030 this refreshes the thread list and checks whether there
6031 are other resumed threads before deciding whether to
6032 print "no-unwaited-for left". This is important because
6033 the user could have done:
6034
6035 (gdb) set scheduler-locking on
6036 (gdb) thread 1
6037 (gdb) c&
6038 (gdb) thread 2
6039 (gdb) c
6040
6041 ... and only one of the threads exited. */
6042 return handle_as_no_resumed ();
6043 }
6044 else
6045 {
6046 /* Switch to the first non-exited thread we can find, and
6047 resume. */
6048 auto range = inf->non_exited_threads ();
6049 if (range.begin () == range.end ())
6050 {
6051 /* Looks like the target reported a
6052 TARGET_WAITKIND_THREAD_EXITED for its last known
6053 thread. */
6054 return handle_as_no_resumed ();
6055 }
6056 thread_info *non_exited_thread = *range.begin ();
6057 switch_to_thread (non_exited_thread);
6058 insert_breakpoints ();
6059 resume (GDB_SIGNAL_0);
6060 }
6061 }
6062
6063 prepare_to_wait (ecs);
6064 return true;
6065}
6066
6067/* Given an execution control state that has been freshly filled in by
6068 an event from the inferior, figure out what it means and take
6069 appropriate action.
6070
6071 The alternatives are:
6072
6073 1) stop_waiting and return; to really stop and return to the
6074 debugger.
6075
6076 2) keep_going and return; to wait for the next event (set
6077 ecs->event_thread->stepping_over_breakpoint to 1 to single step
6078 once). */
6079
6080static void
6081handle_inferior_event (struct execution_control_state *ecs)
6082{
6083 /* Make sure that all temporary struct value objects that were
6084 created during the handling of the event get deleted at the
6085 end. */
6086 scoped_value_mark free_values;
6087
6088 infrun_debug_printf ("%s", ecs->ws.to_string ().c_str ());
6089
6090 if (ecs->ws.kind () == TARGET_WAITKIND_IGNORE)
6091 {
6092 /* We had an event in the inferior, but we are not interested in
6093 handling it at this level. The lower layers have already
6094 done what needs to be done, if anything.
6095
6096 One of the possible circumstances for this is when the
6097 inferior produces output for the console. The inferior has
6098 not stopped, and we are ignoring the event. Another possible
6099 circumstance is any event which the lower level knows will be
6100 reported multiple times without an intervening resume. */
6101 prepare_to_wait (ecs);
6102 return;
6103 }
6104
6105 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED
6106 && handle_no_resumed (ecs))
6107 return;
6108
6109 /* Cache the last target/ptid/waitstatus. */
6110 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
6111
6112 /* Always clear state belonging to the previous time we stopped. */
6113 stop_stack_dummy = STOP_NONE;
6114
6115 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED)
6116 {
6117 /* No unwaited-for children left. IOW, all resumed children
6118 have exited. */
6119 stop_waiting (ecs);
6120 return;
6121 }
6122
6123 if (ecs->ws.kind () != TARGET_WAITKIND_EXITED
6124 && ecs->ws.kind () != TARGET_WAITKIND_SIGNALLED)
6125 {
6126 ecs->event_thread = ecs->target->find_thread (ecs->ptid);
6127 /* If it's a new thread, add it to the thread database. */
6128 if (ecs->event_thread == nullptr)
6129 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
6130
6131 /* Disable range stepping. If the next step request could use a
6132 range, this will be end up re-enabled then. */
6133 ecs->event_thread->control.may_range_step = 0;
6134 }
6135
6136 /* Dependent on valid ECS->EVENT_THREAD. */
6137 adjust_pc_after_break (ecs->event_thread, ecs->ws);
6138
6139 /* Dependent on the current PC value modified by adjust_pc_after_break. */
6140 reinit_frame_cache ();
6141
6142 breakpoint_retire_moribund ();
6143
6144 /* First, distinguish signals caused by the debugger from signals
6145 that have to do with the program's own actions. Note that
6146 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
6147 on the operating system version. Here we detect when a SIGILL or
6148 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
6149 something similar for SIGSEGV, since a SIGSEGV will be generated
6150 when we're trying to execute a breakpoint instruction on a
6151 non-executable stack. This happens for call dummy breakpoints
6152 for architectures like SPARC that place call dummies on the
6153 stack. */
6154 if (ecs->ws.kind () == TARGET_WAITKIND_STOPPED
6155 && (ecs->ws.sig () == GDB_SIGNAL_ILL
6156 || ecs->ws.sig () == GDB_SIGNAL_SEGV
6157 || ecs->ws.sig () == GDB_SIGNAL_EMT))
6158 {
6159 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
6160
6161 if (breakpoint_inserted_here_p (ecs->event_thread->inf->aspace.get (),
6162 regcache_read_pc (regcache)))
6163 {
6164 infrun_debug_printf ("Treating signal as SIGTRAP");
6165 ecs->ws.set_stopped (GDB_SIGNAL_TRAP);
6166 }
6167 }
6168
6169 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
6170
6171 switch (ecs->ws.kind ())
6172 {
6173 case TARGET_WAITKIND_LOADED:
6174 {
6175 context_switch (ecs);
6176 /* Ignore gracefully during startup of the inferior, as it might
6177 be the shell which has just loaded some objects, otherwise
6178 add the symbols for the newly loaded objects. Also ignore at
6179 the beginning of an attach or remote session; we will query
6180 the full list of libraries once the connection is
6181 established. */
6182
6183 stop_kind stop_soon = get_inferior_stop_soon (ecs);
6184 if (stop_soon == NO_STOP_QUIETLY)
6185 {
6186 struct regcache *regcache;
6187
6188 regcache = get_thread_regcache (ecs->event_thread);
6189
6190 handle_solib_event ();
6191
6192 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
6193 address_space *aspace = ecs->event_thread->inf->aspace.get ();
6194 ecs->event_thread->control.stop_bpstat
6195 = bpstat_stop_status_nowatch (aspace,
6196 ecs->event_thread->stop_pc (),
6197 ecs->event_thread, ecs->ws);
6198
6199 if (handle_stop_requested (ecs))
6200 return;
6201
6202 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6203 {
6204 /* A catchpoint triggered. */
6205 process_event_stop_test (ecs);
6206 return;
6207 }
6208
6209 /* If requested, stop when the dynamic linker notifies
6210 gdb of events. This allows the user to get control
6211 and place breakpoints in initializer routines for
6212 dynamically loaded objects (among other things). */
6213 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6214 if (stop_on_solib_events)
6215 {
6216 /* Make sure we print "Stopped due to solib-event" in
6217 normal_stop. */
6218 stop_print_frame = true;
6219
6220 stop_waiting (ecs);
6221 return;
6222 }
6223 }
6224
6225 /* If we are skipping through a shell, or through shared library
6226 loading that we aren't interested in, resume the program. If
6227 we're running the program normally, also resume. */
6228 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
6229 {
6230 /* Loading of shared libraries might have changed breakpoint
6231 addresses. Make sure new breakpoints are inserted. */
6232 if (stop_soon == NO_STOP_QUIETLY)
6233 insert_breakpoints ();
6234 resume (GDB_SIGNAL_0);
6235 prepare_to_wait (ecs);
6236 return;
6237 }
6238
6239 /* But stop if we're attaching or setting up a remote
6240 connection. */
6241 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6242 || stop_soon == STOP_QUIETLY_REMOTE)
6243 {
6244 infrun_debug_printf ("quietly stopped");
6245 stop_waiting (ecs);
6246 return;
6247 }
6248
6249 internal_error (_("unhandled stop_soon: %d"), (int) stop_soon);
6250 }
6251
6252 case TARGET_WAITKIND_SPURIOUS:
6253 if (handle_stop_requested (ecs))
6254 return;
6255 context_switch (ecs);
6256 resume (GDB_SIGNAL_0);
6257 prepare_to_wait (ecs);
6258 return;
6259
6260 case TARGET_WAITKIND_THREAD_CREATED:
6261 if (handle_stop_requested (ecs))
6262 return;
6263 context_switch (ecs);
6264 if (!switch_back_to_stepped_thread (ecs))
6265 keep_going (ecs);
6266 return;
6267
6268 case TARGET_WAITKIND_THREAD_EXITED:
6269 if (handle_thread_exited (ecs))
6270 return;
6271 stop_waiting (ecs);
6272 break;
6273
6274 case TARGET_WAITKIND_EXITED:
6275 case TARGET_WAITKIND_SIGNALLED:
6276 {
6277 /* Depending on the system, ecs->ptid may point to a thread or
6278 to a process. On some targets, target_mourn_inferior may
6279 need to have access to the just-exited thread. That is the
6280 case of GNU/Linux's "checkpoint" support, for example.
6281 Call the switch_to_xxx routine as appropriate. */
6282 thread_info *thr = ecs->target->find_thread (ecs->ptid);
6283 if (thr != nullptr)
6284 switch_to_thread (thr);
6285 else
6286 {
6287 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
6288 switch_to_inferior_no_thread (inf);
6289 }
6290 }
6291 handle_vfork_child_exec_or_exit (0);
6292 target_terminal::ours (); /* Must do this before mourn anyway. */
6293
6294 /* Clearing any previous state of convenience variables. */
6295 clear_exit_convenience_vars ();
6296
6297 if (ecs->ws.kind () == TARGET_WAITKIND_EXITED)
6298 {
6299 /* Record the exit code in the convenience variable $_exitcode, so
6300 that the user can inspect this again later. */
6301 set_internalvar_integer (lookup_internalvar ("_exitcode"),
6302 (LONGEST) ecs->ws.exit_status ());
6303
6304 /* Also record this in the inferior itself. */
6305 current_inferior ()->has_exit_code = true;
6306 current_inferior ()->exit_code = (LONGEST) ecs->ws.exit_status ();
6307
6308 /* Support the --return-child-result option. */
6309 return_child_result_value = ecs->ws.exit_status ();
6310
6311 interps_notify_exited (ecs->ws.exit_status ());
6312 }
6313 else
6314 {
6315 struct gdbarch *gdbarch = current_inferior ()->arch ();
6316
6317 if (gdbarch_gdb_signal_to_target_p (gdbarch))
6318 {
6319 /* Set the value of the internal variable $_exitsignal,
6320 which holds the signal uncaught by the inferior. */
6321 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
6322 gdbarch_gdb_signal_to_target (gdbarch,
6323 ecs->ws.sig ()));
6324 }
6325 else
6326 {
6327 /* We don't have access to the target's method used for
6328 converting between signal numbers (GDB's internal
6329 representation <-> target's representation).
6330 Therefore, we cannot do a good job at displaying this
6331 information to the user. It's better to just warn
6332 her about it (if infrun debugging is enabled), and
6333 give up. */
6334 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
6335 "signal number.");
6336 }
6337
6338 interps_notify_signal_exited (ecs->ws.sig ());
6339 }
6340
6341 gdb_flush (gdb_stdout);
6342 target_mourn_inferior (inferior_ptid);
6343 stop_print_frame = false;
6344 stop_waiting (ecs);
6345 return;
6346
6347 case TARGET_WAITKIND_FORKED:
6348 case TARGET_WAITKIND_VFORKED:
6349 case TARGET_WAITKIND_THREAD_CLONED:
6350
6351 displaced_step_finish (ecs->event_thread, ecs->ws);
6352
6353 /* Start a new step-over in another thread if there's one that
6354 needs it. */
6355 start_step_over ();
6356
6357 context_switch (ecs);
6358
6359 /* Immediately detach breakpoints from the child before there's
6360 any chance of letting the user delete breakpoints from the
6361 breakpoint lists. If we don't do this early, it's easy to
6362 leave left over traps in the child, vis: "break foo; catch
6363 fork; c; <fork>; del; c; <child calls foo>". We only follow
6364 the fork on the last `continue', and by that time the
6365 breakpoint at "foo" is long gone from the breakpoint table.
6366 If we vforked, then we don't need to unpatch here, since both
6367 parent and child are sharing the same memory pages; we'll
6368 need to unpatch at follow/detach time instead to be certain
6369 that new breakpoints added between catchpoint hit time and
6370 vfork follow are detached. */
6371 if (ecs->ws.kind () == TARGET_WAITKIND_FORKED)
6372 {
6373 /* This won't actually modify the breakpoint list, but will
6374 physically remove the breakpoints from the child. */
6375 detach_breakpoints (ecs->ws.child_ptid ());
6376 }
6377
6378 delete_just_stopped_threads_single_step_breakpoints ();
6379
6380 /* In case the event is caught by a catchpoint, remember that
6381 the event is to be followed at the next resume of the thread,
6382 and not immediately. */
6383 ecs->event_thread->pending_follow = ecs->ws;
6384
6385 ecs->event_thread->set_stop_pc
6386 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6387
6388 ecs->event_thread->control.stop_bpstat
6389 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
6390 ecs->event_thread->stop_pc (),
6391 ecs->event_thread, ecs->ws);
6392
6393 if (handle_stop_requested (ecs))
6394 return;
6395
6396 /* If no catchpoint triggered for this, then keep going. Note
6397 that we're interested in knowing the bpstat actually causes a
6398 stop, not just if it may explain the signal. Software
6399 watchpoints, for example, always appear in the bpstat. */
6400 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6401 {
6402 bool follow_child
6403 = (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6404 && follow_fork_mode_string == follow_fork_mode_child);
6405
6406 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6407
6408 process_stratum_target *targ
6409 = ecs->event_thread->inf->process_target ();
6410
6411 bool should_resume;
6412 if (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED)
6413 should_resume = follow_fork ();
6414 else
6415 {
6416 should_resume = true;
6417 inferior *inf = ecs->event_thread->inf;
6418 inf->top_target ()->follow_clone (ecs->ws.child_ptid ());
6419 ecs->event_thread->pending_follow.set_spurious ();
6420 }
6421
6422 /* Note that one of these may be an invalid pointer,
6423 depending on detach_fork. */
6424 thread_info *parent = ecs->event_thread;
6425 thread_info *child = targ->find_thread (ecs->ws.child_ptid ());
6426
6427 /* At this point, the parent is marked running, and the
6428 child is marked stopped. */
6429
6430 /* If not resuming the parent, mark it stopped. */
6431 if (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6432 && follow_child && !detach_fork && !non_stop && !sched_multi)
6433 parent->set_running (false);
6434
6435 /* If resuming the child, mark it running. */
6436 if ((ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED
6437 && !schedlock_applies (ecs->event_thread))
6438 || (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6439 && (follow_child
6440 || (!detach_fork && (non_stop || sched_multi)))))
6441 child->set_running (true);
6442
6443 /* In non-stop mode, also resume the other branch. */
6444 if ((ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED
6445 && target_is_non_stop_p ()
6446 && !schedlock_applies (ecs->event_thread))
6447 || (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6448 && (!detach_fork && (non_stop
6449 || (sched_multi
6450 && target_is_non_stop_p ())))))
6451 {
6452 if (follow_child)
6453 switch_to_thread (parent);
6454 else
6455 switch_to_thread (child);
6456
6457 ecs->event_thread = inferior_thread ();
6458 ecs->ptid = inferior_ptid;
6459 keep_going (ecs);
6460 }
6461
6462 if (follow_child)
6463 switch_to_thread (child);
6464 else
6465 switch_to_thread (parent);
6466
6467 ecs->event_thread = inferior_thread ();
6468 ecs->ptid = inferior_ptid;
6469
6470 if (should_resume)
6471 {
6472 /* Never call switch_back_to_stepped_thread if we are waiting for
6473 vfork-done (waiting for an external vfork child to exec or
6474 exit). We will resume only the vforking thread for the purpose
6475 of collecting the vfork-done event, and we will restart any
6476 step once the critical shared address space window is done. */
6477 if ((!follow_child
6478 && detach_fork
6479 && parent->inf->thread_waiting_for_vfork_done != nullptr)
6480 || !switch_back_to_stepped_thread (ecs))
6481 keep_going (ecs);
6482 }
6483 else
6484 stop_waiting (ecs);
6485 return;
6486 }
6487 process_event_stop_test (ecs);
6488 return;
6489
6490 case TARGET_WAITKIND_VFORK_DONE:
6491 /* Done with the shared memory region. Re-insert breakpoints in
6492 the parent, and keep going. */
6493
6494 context_switch (ecs);
6495
6496 handle_vfork_done (ecs->event_thread);
6497 gdb_assert (inferior_thread () == ecs->event_thread);
6498
6499 if (handle_stop_requested (ecs))
6500 return;
6501
6502 if (!switch_back_to_stepped_thread (ecs))
6503 {
6504 gdb_assert (inferior_thread () == ecs->event_thread);
6505 /* This also takes care of reinserting breakpoints in the
6506 previously locked inferior. */
6507 keep_going (ecs);
6508 }
6509 return;
6510
6511 case TARGET_WAITKIND_EXECD:
6512
6513 /* Note we can't read registers yet (the stop_pc), because we
6514 don't yet know the inferior's post-exec architecture.
6515 'stop_pc' is explicitly read below instead. */
6516 switch_to_thread_no_regs (ecs->event_thread);
6517
6518 /* Do whatever is necessary to the parent branch of the vfork. */
6519 handle_vfork_child_exec_or_exit (1);
6520
6521 /* This causes the eventpoints and symbol table to be reset.
6522 Must do this now, before trying to determine whether to
6523 stop. */
6524 follow_exec (inferior_ptid, ecs->ws.execd_pathname ());
6525
6526 /* In follow_exec we may have deleted the original thread and
6527 created a new one. Make sure that the event thread is the
6528 execd thread for that case (this is a nop otherwise). */
6529 ecs->event_thread = inferior_thread ();
6530
6531 ecs->event_thread->set_stop_pc
6532 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6533
6534 ecs->event_thread->control.stop_bpstat
6535 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
6536 ecs->event_thread->stop_pc (),
6537 ecs->event_thread, ecs->ws);
6538
6539 if (handle_stop_requested (ecs))
6540 return;
6541
6542 /* If no catchpoint triggered for this, then keep going. */
6543 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6544 {
6545 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6546 keep_going (ecs);
6547 return;
6548 }
6549 process_event_stop_test (ecs);
6550 return;
6551
6552 /* Be careful not to try to gather much state about a thread
6553 that's in a syscall. It's frequently a losing proposition. */
6554 case TARGET_WAITKIND_SYSCALL_ENTRY:
6555 /* Getting the current syscall number. */
6556 if (handle_syscall_event (ecs) == 0)
6557 process_event_stop_test (ecs);
6558 return;
6559
6560 /* Before examining the threads further, step this thread to
6561 get it entirely out of the syscall. (We get notice of the
6562 event when the thread is just on the verge of exiting a
6563 syscall. Stepping one instruction seems to get it back
6564 into user code.) */
6565 case TARGET_WAITKIND_SYSCALL_RETURN:
6566 if (handle_syscall_event (ecs) == 0)
6567 process_event_stop_test (ecs);
6568 return;
6569
6570 case TARGET_WAITKIND_STOPPED:
6571 handle_signal_stop (ecs);
6572 return;
6573
6574 case TARGET_WAITKIND_NO_HISTORY:
6575 /* Reverse execution: target ran out of history info. */
6576
6577 /* Switch to the stopped thread. */
6578 context_switch (ecs);
6579 infrun_debug_printf ("stopped");
6580
6581 delete_just_stopped_threads_single_step_breakpoints ();
6582 ecs->event_thread->set_stop_pc
6583 (regcache_read_pc (get_thread_regcache (inferior_thread ())));
6584
6585 if (handle_stop_requested (ecs))
6586 return;
6587
6588 interps_notify_no_history ();
6589 stop_waiting (ecs);
6590 return;
6591 }
6592}
6593
6594/* Restart threads back to what they were trying to do back when we
6595 paused them (because of an in-line step-over or vfork, for example).
6596 The EVENT_THREAD thread is ignored (not restarted).
6597
6598 If INF is non-nullptr, only resume threads from INF. */
6599
6600static void
6601restart_threads (struct thread_info *event_thread, inferior *inf)
6602{
6603 INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d",
6604 event_thread->ptid.to_string ().c_str (),
6605 inf != nullptr ? inf->num : -1);
6606
6607 gdb_assert (!step_over_info_valid_p ());
6608
6609 /* In case the instruction just stepped spawned a new thread. */
6610 update_thread_list ();
6611
6612 for (thread_info *tp : all_non_exited_threads ())
6613 {
6614 if (inf != nullptr && tp->inf != inf)
6615 continue;
6616
6617 if (tp->inf->detaching)
6618 {
6619 infrun_debug_printf ("restart threads: [%s] inferior detaching",
6620 tp->ptid.to_string ().c_str ());
6621 continue;
6622 }
6623
6624 switch_to_thread_no_regs (tp);
6625
6626 if (tp == event_thread)
6627 {
6628 infrun_debug_printf ("restart threads: [%s] is event thread",
6629 tp->ptid.to_string ().c_str ());
6630 continue;
6631 }
6632
6633 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
6634 {
6635 infrun_debug_printf ("restart threads: [%s] not meant to be running",
6636 tp->ptid.to_string ().c_str ());
6637 continue;
6638 }
6639
6640 if (tp->resumed ())
6641 {
6642 infrun_debug_printf ("restart threads: [%s] resumed",
6643 tp->ptid.to_string ().c_str ());
6644 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
6645 continue;
6646 }
6647
6648 if (thread_is_in_step_over_chain (tp))
6649 {
6650 infrun_debug_printf ("restart threads: [%s] needs step-over",
6651 tp->ptid.to_string ().c_str ());
6652 gdb_assert (!tp->resumed ());
6653 continue;
6654 }
6655
6656
6657 if (tp->has_pending_waitstatus ())
6658 {
6659 infrun_debug_printf ("restart threads: [%s] has pending status",
6660 tp->ptid.to_string ().c_str ());
6661 tp->set_resumed (true);
6662 continue;
6663 }
6664
6665 gdb_assert (!tp->stop_requested);
6666
6667 /* If some thread needs to start a step-over at this point, it
6668 should still be in the step-over queue, and thus skipped
6669 above. */
6670 if (thread_still_needs_step_over (tp))
6671 {
6672 internal_error ("thread [%s] needs a step-over, but not in "
6673 "step-over queue\n",
6674 tp->ptid.to_string ().c_str ());
6675 }
6676
6677 if (currently_stepping (tp))
6678 {
6679 infrun_debug_printf ("restart threads: [%s] was stepping",
6680 tp->ptid.to_string ().c_str ());
6681 keep_going_stepped_thread (tp);
6682 }
6683 else
6684 {
6685 infrun_debug_printf ("restart threads: [%s] continuing",
6686 tp->ptid.to_string ().c_str ());
6687 execution_control_state ecs (tp);
6688 switch_to_thread (tp);
6689 keep_going_pass_signal (&ecs);
6690 }
6691 }
6692}
6693
6694/* Callback for iterate_over_threads. Find a resumed thread that has
6695 a pending waitstatus. */
6696
6697static bool
6698resumed_thread_with_pending_status (struct thread_info *tp)
6699{
6700 return tp->resumed () && tp->has_pending_waitstatus ();
6701}
6702
6703/* Called when we get an event that may finish an in-line or
6704 out-of-line (displaced stepping) step-over started previously.
6705 Return true if the event is processed and we should go back to the
6706 event loop; false if the caller should continue processing the
6707 event. */
6708
6709static int
6710finish_step_over (struct execution_control_state *ecs)
6711{
6712 displaced_step_finish (ecs->event_thread, ecs->ws);
6713
6714 bool had_step_over_info = step_over_info_valid_p ();
6715
6716 if (had_step_over_info)
6717 {
6718 /* If we're stepping over a breakpoint with all threads locked,
6719 then only the thread that was stepped should be reporting
6720 back an event. */
6721 gdb_assert (ecs->event_thread->control.trap_expected);
6722
6723 update_thread_events_after_step_over (ecs->event_thread, ecs->ws);
6724
6725 clear_step_over_info ();
6726 }
6727
6728 if (!target_is_non_stop_p ())
6729 return 0;
6730
6731 /* Start a new step-over in another thread if there's one that
6732 needs it. */
6733 start_step_over ();
6734
6735 /* If we were stepping over a breakpoint before, and haven't started
6736 a new in-line step-over sequence, then restart all other threads
6737 (except the event thread). We can't do this in all-stop, as then
6738 e.g., we wouldn't be able to issue any other remote packet until
6739 these other threads stop. */
6740 if (had_step_over_info && !step_over_info_valid_p ())
6741 {
6742 struct thread_info *pending;
6743
6744 /* If we only have threads with pending statuses, the restart
6745 below won't restart any thread and so nothing re-inserts the
6746 breakpoint we just stepped over. But we need it inserted
6747 when we later process the pending events, otherwise if
6748 another thread has a pending event for this breakpoint too,
6749 we'd discard its event (because the breakpoint that
6750 originally caused the event was no longer inserted). */
6751 context_switch (ecs);
6752 insert_breakpoints ();
6753
6754 restart_threads (ecs->event_thread);
6755
6756 /* If we have events pending, go through handle_inferior_event
6757 again, picking up a pending event at random. This avoids
6758 thread starvation. */
6759
6760 /* But not if we just stepped over a watchpoint in order to let
6761 the instruction execute so we can evaluate its expression.
6762 The set of watchpoints that triggered is recorded in the
6763 breakpoint objects themselves (see bp->watchpoint_triggered).
6764 If we processed another event first, that other event could
6765 clobber this info. */
6766 if (ecs->event_thread->stepping_over_watchpoint)
6767 return 0;
6768
6769 /* The code below is meant to avoid one thread hogging the event
6770 loop by doing constant in-line step overs. If the stepping
6771 thread exited, there's no risk for this to happen, so we can
6772 safely let our caller process the event immediately. */
6773 if (ecs->ws.kind () == TARGET_WAITKIND_THREAD_EXITED)
6774 return 0;
6775
6776 pending = iterate_over_threads (resumed_thread_with_pending_status);
6777 if (pending != nullptr)
6778 {
6779 struct thread_info *tp = ecs->event_thread;
6780 struct regcache *regcache;
6781
6782 infrun_debug_printf ("found resumed threads with "
6783 "pending events, saving status");
6784
6785 gdb_assert (pending != tp);
6786
6787 /* Record the event thread's event for later. */
6788 save_waitstatus (tp, ecs->ws);
6789 /* This was cleared early, by handle_inferior_event. Set it
6790 so this pending event is considered by
6791 do_target_wait. */
6792 tp->set_resumed (true);
6793
6794 gdb_assert (!tp->executing ());
6795
6796 regcache = get_thread_regcache (tp);
6797 tp->set_stop_pc (regcache_read_pc (regcache));
6798
6799 infrun_debug_printf ("saved stop_pc=%s for %s "
6800 "(currently_stepping=%d)",
6801 paddress (current_inferior ()->arch (),
6802 tp->stop_pc ()),
6803 tp->ptid.to_string ().c_str (),
6804 currently_stepping (tp));
6805
6806 /* This in-line step-over finished; clear this so we won't
6807 start a new one. This is what handle_signal_stop would
6808 do, if we returned false. */
6809 tp->stepping_over_breakpoint = 0;
6810
6811 /* Wake up the event loop again. */
6812 mark_async_event_handler (infrun_async_inferior_event_token);
6813
6814 prepare_to_wait (ecs);
6815 return 1;
6816 }
6817 }
6818
6819 return 0;
6820}
6821
6822/* See infrun.h. */
6823
6824void
6825notify_signal_received (gdb_signal sig)
6826{
6827 interps_notify_signal_received (sig);
6828 gdb::observers::signal_received.notify (sig);
6829}
6830
6831/* See infrun.h. */
6832
6833void
6834notify_normal_stop (bpstat *bs, int print_frame)
6835{
6836 interps_notify_normal_stop (bs, print_frame);
6837 gdb::observers::normal_stop.notify (bs, print_frame);
6838}
6839
6840/* See infrun.h. */
6841
6842void notify_user_selected_context_changed (user_selected_what selection)
6843{
6844 interps_notify_user_selected_context_changed (selection);
6845 gdb::observers::user_selected_context_changed.notify (selection);
6846}
6847
6848/* Come here when the program has stopped with a signal. */
6849
6850static void
6851handle_signal_stop (struct execution_control_state *ecs)
6852{
6853 frame_info_ptr frame;
6854 struct gdbarch *gdbarch;
6855 int stopped_by_watchpoint;
6856 enum stop_kind stop_soon;
6857 int random_signal;
6858
6859 gdb_assert (ecs->ws.kind () == TARGET_WAITKIND_STOPPED);
6860
6861 ecs->event_thread->set_stop_signal (ecs->ws.sig ());
6862
6863 /* Do we need to clean up the state of a thread that has
6864 completed a displaced single-step? (Doing so usually affects
6865 the PC, so do it here, before we set stop_pc.) */
6866 if (finish_step_over (ecs))
6867 return;
6868
6869 /* If we either finished a single-step or hit a breakpoint, but
6870 the user wanted this thread to be stopped, pretend we got a
6871 SIG0 (generic unsignaled stop). */
6872 if (ecs->event_thread->stop_requested
6873 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6874 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6875
6876 ecs->event_thread->set_stop_pc
6877 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6878
6879 context_switch (ecs);
6880
6881 if (deprecated_context_hook)
6882 deprecated_context_hook (ecs->event_thread->global_num);
6883
6884 if (debug_infrun)
6885 {
6886 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
6887 struct gdbarch *reg_gdbarch = regcache->arch ();
6888
6889 infrun_debug_printf
6890 ("stop_pc=%s", paddress (reg_gdbarch, ecs->event_thread->stop_pc ()));
6891 if (target_stopped_by_watchpoint ())
6892 {
6893 CORE_ADDR addr;
6894
6895 infrun_debug_printf ("stopped by watchpoint");
6896
6897 if (target_stopped_data_address (current_inferior ()->top_target (),
6898 &addr))
6899 infrun_debug_printf ("stopped data address=%s",
6900 paddress (reg_gdbarch, addr));
6901 else
6902 infrun_debug_printf ("(no data address available)");
6903 }
6904 }
6905
6906 /* This is originated from start_remote(), start_inferior() and
6907 shared libraries hook functions. */
6908 stop_soon = get_inferior_stop_soon (ecs);
6909 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
6910 {
6911 infrun_debug_printf ("quietly stopped");
6912 stop_print_frame = true;
6913 stop_waiting (ecs);
6914 return;
6915 }
6916
6917 /* This originates from attach_command(). We need to overwrite
6918 the stop_signal here, because some kernels don't ignore a
6919 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6920 See more comments in inferior.h. On the other hand, if we
6921 get a non-SIGSTOP, report it to the user - assume the backend
6922 will handle the SIGSTOP if it should show up later.
6923
6924 Also consider that the attach is complete when we see a
6925 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6926 target extended-remote report it instead of a SIGSTOP
6927 (e.g. gdbserver). We already rely on SIGTRAP being our
6928 signal, so this is no exception.
6929
6930 Also consider that the attach is complete when we see a
6931 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6932 the target to stop all threads of the inferior, in case the
6933 low level attach operation doesn't stop them implicitly. If
6934 they weren't stopped implicitly, then the stub will report a
6935 GDB_SIGNAL_0, meaning: stopped for no particular reason
6936 other than GDB's request. */
6937 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6938 && (ecs->event_thread->stop_signal () == GDB_SIGNAL_STOP
6939 || ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6940 || ecs->event_thread->stop_signal () == GDB_SIGNAL_0))
6941 {
6942 stop_print_frame = true;
6943 stop_waiting (ecs);
6944 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6945 return;
6946 }
6947
6948 /* At this point, get hold of the now-current thread's frame. */
6949 frame = get_current_frame ();
6950 gdbarch = get_frame_arch (frame);
6951
6952 /* Pull the single step breakpoints out of the target. */
6953 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6954 {
6955 struct regcache *regcache;
6956 CORE_ADDR pc;
6957
6958 regcache = get_thread_regcache (ecs->event_thread);
6959 const address_space *aspace = ecs->event_thread->inf->aspace.get ();
6960
6961 pc = regcache_read_pc (regcache);
6962
6963 /* However, before doing so, if this single-step breakpoint was
6964 actually for another thread, set this thread up for moving
6965 past it. */
6966 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
6967 aspace, pc))
6968 {
6969 if (single_step_breakpoint_inserted_here_p (aspace, pc))
6970 {
6971 infrun_debug_printf ("[%s] hit another thread's single-step "
6972 "breakpoint",
6973 ecs->ptid.to_string ().c_str ());
6974 ecs->hit_singlestep_breakpoint = 1;
6975 }
6976 }
6977 else
6978 {
6979 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6980 ecs->ptid.to_string ().c_str ());
6981 }
6982 }
6983 delete_just_stopped_threads_single_step_breakpoints ();
6984
6985 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6986 && ecs->event_thread->control.trap_expected
6987 && ecs->event_thread->stepping_over_watchpoint)
6988 stopped_by_watchpoint = 0;
6989 else
6990 stopped_by_watchpoint = watchpoints_triggered (ecs->ws);
6991
6992 /* If necessary, step over this watchpoint. We'll be back to display
6993 it in a moment. */
6994 if (stopped_by_watchpoint
6995 && (target_have_steppable_watchpoint ()
6996 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
6997 {
6998 /* At this point, we are stopped at an instruction which has
6999 attempted to write to a piece of memory under control of
7000 a watchpoint. The instruction hasn't actually executed
7001 yet. If we were to evaluate the watchpoint expression
7002 now, we would get the old value, and therefore no change
7003 would seem to have occurred.
7004
7005 In order to make watchpoints work `right', we really need
7006 to complete the memory write, and then evaluate the
7007 watchpoint expression. We do this by single-stepping the
7008 target.
7009
7010 It may not be necessary to disable the watchpoint to step over
7011 it. For example, the PA can (with some kernel cooperation)
7012 single step over a watchpoint without disabling the watchpoint.
7013
7014 It is far more common to need to disable a watchpoint to step
7015 the inferior over it. If we have non-steppable watchpoints,
7016 we must disable the current watchpoint; it's simplest to
7017 disable all watchpoints.
7018
7019 Any breakpoint at PC must also be stepped over -- if there's
7020 one, it will have already triggered before the watchpoint
7021 triggered, and we either already reported it to the user, or
7022 it didn't cause a stop and we called keep_going. In either
7023 case, if there was a breakpoint at PC, we must be trying to
7024 step past it. */
7025 ecs->event_thread->stepping_over_watchpoint = 1;
7026 keep_going (ecs);
7027 return;
7028 }
7029
7030 ecs->event_thread->stepping_over_breakpoint = 0;
7031 ecs->event_thread->stepping_over_watchpoint = 0;
7032 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
7033 ecs->event_thread->control.stop_step = 0;
7034 stop_print_frame = true;
7035 stopped_by_random_signal = 0;
7036 bpstat *stop_chain = nullptr;
7037
7038 /* Hide inlined functions starting here, unless we just performed stepi or
7039 nexti. After stepi and nexti, always show the innermost frame (not any
7040 inline function call sites). */
7041 if (ecs->event_thread->control.step_range_end != 1)
7042 {
7043 const address_space *aspace = ecs->event_thread->inf->aspace.get ();
7044
7045 /* skip_inline_frames is expensive, so we avoid it if we can
7046 determine that the address is one where functions cannot have
7047 been inlined. This improves performance with inferiors that
7048 load a lot of shared libraries, because the solib event
7049 breakpoint is defined as the address of a function (i.e. not
7050 inline). Note that we have to check the previous PC as well
7051 as the current one to catch cases when we have just
7052 single-stepped off a breakpoint prior to reinstating it.
7053 Note that we're assuming that the code we single-step to is
7054 not inline, but that's not definitive: there's nothing
7055 preventing the event breakpoint function from containing
7056 inlined code, and the single-step ending up there. If the
7057 user had set a breakpoint on that inlined code, the missing
7058 skip_inline_frames call would break things. Fortunately
7059 that's an extremely unlikely scenario. */
7060 if (!pc_at_non_inline_function (aspace,
7061 ecs->event_thread->stop_pc (),
7062 ecs->ws)
7063 && !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7064 && ecs->event_thread->control.trap_expected
7065 && pc_at_non_inline_function (aspace,
7066 ecs->event_thread->prev_pc,
7067 ecs->ws)))
7068 {
7069 stop_chain = build_bpstat_chain (aspace,
7070 ecs->event_thread->stop_pc (),
7071 ecs->ws);
7072 skip_inline_frames (ecs->event_thread, stop_chain);
7073 }
7074 }
7075
7076 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7077 && ecs->event_thread->control.trap_expected
7078 && gdbarch_single_step_through_delay_p (gdbarch)
7079 && currently_stepping (ecs->event_thread))
7080 {
7081 /* We're trying to step off a breakpoint. Turns out that we're
7082 also on an instruction that needs to be stepped multiple
7083 times before it's been fully executing. E.g., architectures
7084 with a delay slot. It needs to be stepped twice, once for
7085 the instruction and once for the delay slot. */
7086 int step_through_delay
7087 = gdbarch_single_step_through_delay (gdbarch, frame);
7088
7089 if (step_through_delay)
7090 infrun_debug_printf ("step through delay");
7091
7092 if (ecs->event_thread->control.step_range_end == 0
7093 && step_through_delay)
7094 {
7095 /* The user issued a continue when stopped at a breakpoint.
7096 Set up for another trap and get out of here. */
7097 ecs->event_thread->stepping_over_breakpoint = 1;
7098 keep_going (ecs);
7099 return;
7100 }
7101 else if (step_through_delay)
7102 {
7103 /* The user issued a step when stopped at a breakpoint.
7104 Maybe we should stop, maybe we should not - the delay
7105 slot *might* correspond to a line of source. In any
7106 case, don't decide that here, just set
7107 ecs->stepping_over_breakpoint, making sure we
7108 single-step again before breakpoints are re-inserted. */
7109 ecs->event_thread->stepping_over_breakpoint = 1;
7110 }
7111 }
7112
7113 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
7114 handles this event. */
7115 ecs->event_thread->control.stop_bpstat
7116 = bpstat_stop_status (ecs->event_thread->inf->aspace.get (),
7117 ecs->event_thread->stop_pc (),
7118 ecs->event_thread, ecs->ws, stop_chain);
7119
7120 /* Following in case break condition called a
7121 function. */
7122 stop_print_frame = true;
7123
7124 /* This is where we handle "moribund" watchpoints. Unlike
7125 software breakpoints traps, hardware watchpoint traps are
7126 always distinguishable from random traps. If no high-level
7127 watchpoint is associated with the reported stop data address
7128 anymore, then the bpstat does not explain the signal ---
7129 simply make sure to ignore it if `stopped_by_watchpoint' is
7130 set. */
7131
7132 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7133 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
7134 GDB_SIGNAL_TRAP)
7135 && stopped_by_watchpoint)
7136 {
7137 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
7138 "ignoring");
7139 }
7140
7141 /* NOTE: cagney/2003-03-29: These checks for a random signal
7142 at one stage in the past included checks for an inferior
7143 function call's call dummy's return breakpoint. The original
7144 comment, that went with the test, read:
7145
7146 ``End of a stack dummy. Some systems (e.g. Sony news) give
7147 another signal besides SIGTRAP, so check here as well as
7148 above.''
7149
7150 If someone ever tries to get call dummys on a
7151 non-executable stack to work (where the target would stop
7152 with something like a SIGSEGV), then those tests might need
7153 to be re-instated. Given, however, that the tests were only
7154 enabled when momentary breakpoints were not being used, I
7155 suspect that it won't be the case.
7156
7157 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
7158 be necessary for call dummies on a non-executable stack on
7159 SPARC. */
7160
7161 /* See if the breakpoints module can explain the signal. */
7162 random_signal
7163 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
7164 ecs->event_thread->stop_signal ());
7165
7166 /* Maybe this was a trap for a software breakpoint that has since
7167 been removed. */
7168 if (random_signal && target_stopped_by_sw_breakpoint ())
7169 {
7170 if (gdbarch_program_breakpoint_here_p (gdbarch,
7171 ecs->event_thread->stop_pc ()))
7172 {
7173 struct regcache *regcache;
7174 int decr_pc;
7175
7176 /* Re-adjust PC to what the program would see if GDB was not
7177 debugging it. */
7178 regcache = get_thread_regcache (ecs->event_thread);
7179 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
7180 if (decr_pc != 0)
7181 {
7182 std::optional<scoped_restore_tmpl<int>>
7183 restore_operation_disable;
7184
7185 if (record_full_is_used ())
7186 restore_operation_disable.emplace
7187 (record_full_gdb_operation_disable_set ());
7188
7189 regcache_write_pc (regcache,
7190 ecs->event_thread->stop_pc () + decr_pc);
7191 }
7192 }
7193 else
7194 {
7195 /* A delayed software breakpoint event. Ignore the trap. */
7196 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
7197 random_signal = 0;
7198 }
7199 }
7200
7201 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
7202 has since been removed. */
7203 if (random_signal && target_stopped_by_hw_breakpoint ())
7204 {
7205 /* A delayed hardware breakpoint event. Ignore the trap. */
7206 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
7207 "trap, ignoring");
7208 random_signal = 0;
7209 }
7210
7211 /* If not, perhaps stepping/nexting can. */
7212 if (random_signal)
7213 random_signal = !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7214 && currently_stepping (ecs->event_thread));
7215
7216 /* Perhaps the thread hit a single-step breakpoint of _another_
7217 thread. Single-step breakpoints are transparent to the
7218 breakpoints module. */
7219 if (random_signal)
7220 random_signal = !ecs->hit_singlestep_breakpoint;
7221
7222 /* No? Perhaps we got a moribund watchpoint. */
7223 if (random_signal)
7224 random_signal = !stopped_by_watchpoint;
7225
7226 /* Always stop if the user explicitly requested this thread to
7227 remain stopped. */
7228 if (ecs->event_thread->stop_requested)
7229 {
7230 random_signal = 1;
7231 infrun_debug_printf ("user-requested stop");
7232 }
7233
7234 /* For the program's own signals, act according to
7235 the signal handling tables. */
7236
7237 if (random_signal)
7238 {
7239 /* Signal not for debugging purposes. */
7240 enum gdb_signal stop_signal = ecs->event_thread->stop_signal ();
7241
7242 infrun_debug_printf ("random signal (%s)",
7243 gdb_signal_to_symbol_string (stop_signal));
7244
7245 stopped_by_random_signal = 1;
7246
7247 /* Always stop on signals if we're either just gaining control
7248 of the program, or the user explicitly requested this thread
7249 to remain stopped. */
7250 if (stop_soon != NO_STOP_QUIETLY
7251 || ecs->event_thread->stop_requested
7252 || signal_stop_state (ecs->event_thread->stop_signal ()))
7253 {
7254 stop_waiting (ecs);
7255 return;
7256 }
7257
7258 /* Notify observers the signal has "handle print" set. Note we
7259 returned early above if stopping; normal_stop handles the
7260 printing in that case. */
7261 if (signal_print[ecs->event_thread->stop_signal ()])
7262 {
7263 /* The signal table tells us to print about this signal. */
7264 target_terminal::ours_for_output ();
7265 notify_signal_received (ecs->event_thread->stop_signal ());
7266 target_terminal::inferior ();
7267 }
7268
7269 /* Clear the signal if it should not be passed. */
7270 if (signal_program[ecs->event_thread->stop_signal ()] == 0)
7271 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
7272
7273 if (ecs->event_thread->prev_pc == ecs->event_thread->stop_pc ()
7274 && ecs->event_thread->control.trap_expected
7275 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
7276 {
7277 /* We were just starting a new sequence, attempting to
7278 single-step off of a breakpoint and expecting a SIGTRAP.
7279 Instead this signal arrives. This signal will take us out
7280 of the stepping range so GDB needs to remember to, when
7281 the signal handler returns, resume stepping off that
7282 breakpoint. */
7283 /* To simplify things, "continue" is forced to use the same
7284 code paths as single-step - set a breakpoint at the
7285 signal return address and then, once hit, step off that
7286 breakpoint. */
7287 infrun_debug_printf ("signal arrived while stepping over breakpoint");
7288
7289 insert_hp_step_resume_breakpoint_at_frame (frame);
7290 ecs->event_thread->step_after_step_resume_breakpoint = 1;
7291 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7292 ecs->event_thread->control.trap_expected = 0;
7293
7294 /* If we were nexting/stepping some other thread, switch to
7295 it, so that we don't continue it, losing control. */
7296 if (!switch_back_to_stepped_thread (ecs))
7297 keep_going (ecs);
7298 return;
7299 }
7300
7301 if (ecs->event_thread->stop_signal () != GDB_SIGNAL_0
7302 && (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
7303 ecs->event_thread)
7304 || ecs->event_thread->control.step_range_end == 1)
7305 && (get_stack_frame_id (frame)
7306 == ecs->event_thread->control.step_stack_frame_id)
7307 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
7308 {
7309 /* The inferior is about to take a signal that will take it
7310 out of the single step range. Set a breakpoint at the
7311 current PC (which is presumably where the signal handler
7312 will eventually return) and then allow the inferior to
7313 run free.
7314
7315 Note that this is only needed for a signal delivered
7316 while in the single-step range. Nested signals aren't a
7317 problem as they eventually all return. */
7318 infrun_debug_printf ("signal may take us out of single-step range");
7319
7320 clear_step_over_info ();
7321 insert_hp_step_resume_breakpoint_at_frame (frame);
7322 ecs->event_thread->step_after_step_resume_breakpoint = 1;
7323 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7324 ecs->event_thread->control.trap_expected = 0;
7325 keep_going (ecs);
7326 return;
7327 }
7328
7329 /* Note: step_resume_breakpoint may be non-NULL. This occurs
7330 when either there's a nested signal, or when there's a
7331 pending signal enabled just as the signal handler returns
7332 (leaving the inferior at the step-resume-breakpoint without
7333 actually executing it). Either way continue until the
7334 breakpoint is really hit. */
7335
7336 if (!switch_back_to_stepped_thread (ecs))
7337 {
7338 infrun_debug_printf ("random signal, keep going");
7339
7340 keep_going (ecs);
7341 }
7342 return;
7343 }
7344
7345 process_event_stop_test (ecs);
7346}
7347
7348/* Return the address for the beginning of the line. */
7349
7350CORE_ADDR
7351update_line_range_start (CORE_ADDR pc, struct execution_control_state *ecs)
7352{
7353 /* The line table may have multiple entries for the same source code line.
7354 Given the PC, check the line table and return the PC that corresponds
7355 to the line table entry for the source line that PC is in. */
7356 CORE_ADDR start_line_pc = ecs->event_thread->control.step_range_start;
7357 std::optional<CORE_ADDR> real_range_start;
7358
7359 /* Call find_line_range_start to get the smallest address in the
7360 linetable for multiple Line X entries in the line table. */
7361 real_range_start = find_line_range_start (pc);
7362
7363 if (real_range_start.has_value ())
7364 start_line_pc = *real_range_start;
7365
7366 return start_line_pc;
7367}
7368
7369namespace {
7370
7371/* Helper class for process_event_stop_test implementing lazy evaluation. */
7372template<typename T>
7373class lazy_loader
7374{
7375 using fetcher_t = std::function<T ()>;
7376
7377public:
7378 explicit lazy_loader (fetcher_t &&f) : m_loader (std::move (f))
7379 { }
7380
7381 T &operator* ()
7382 {
7383 if (!m_value.has_value ())
7384 m_value.emplace (m_loader ());
7385 return m_value.value ();
7386 }
7387
7388 T *operator-> ()
7389 {
7390 return &**this;
7391 }
7392
7393private:
7394 std::optional<T> m_value;
7395 fetcher_t m_loader;
7396};
7397
7398}
7399
7400/* Come here when we've got some debug event / signal we can explain
7401 (IOW, not a random signal), and test whether it should cause a
7402 stop, or whether we should resume the inferior (transparently).
7403 E.g., could be a breakpoint whose condition evaluates false; we
7404 could be still stepping within the line; etc. */
7405
7406static void
7407process_event_stop_test (struct execution_control_state *ecs)
7408{
7409 struct symtab_and_line stop_pc_sal;
7410 frame_info_ptr frame;
7411 struct gdbarch *gdbarch;
7412 CORE_ADDR jmp_buf_pc;
7413 struct bpstat_what what;
7414
7415 /* Handle cases caused by hitting a breakpoint. */
7416
7417 frame = get_current_frame ();
7418 gdbarch = get_frame_arch (frame);
7419
7420 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
7421
7422 if (what.call_dummy)
7423 {
7424 stop_stack_dummy = what.call_dummy;
7425 }
7426
7427 /* A few breakpoint types have callbacks associated (e.g.,
7428 bp_jit_event). Run them now. */
7429 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
7430
7431 /* Shorthand to make if statements smaller. */
7432 struct frame_id original_frame_id
7433 = ecs->event_thread->control.step_frame_id;
7434 lazy_loader<frame_id> curr_frame_id
7435 ([] () { return get_frame_id (get_current_frame ()); });
7436
7437 switch (what.main_action)
7438 {
7439 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
7440 /* If we hit the breakpoint at longjmp while stepping, we
7441 install a momentary breakpoint at the target of the
7442 jmp_buf. */
7443
7444 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
7445
7446 ecs->event_thread->stepping_over_breakpoint = 1;
7447
7448 if (what.is_longjmp)
7449 {
7450 struct value *arg_value;
7451
7452 /* If we set the longjmp breakpoint via a SystemTap probe,
7453 then use it to extract the arguments. The destination PC
7454 is the third argument to the probe. */
7455 arg_value = probe_safe_evaluate_at_pc (frame, 2);
7456 if (arg_value)
7457 {
7458 jmp_buf_pc = value_as_address (arg_value);
7459 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
7460 }
7461 else if (!gdbarch_get_longjmp_target_p (gdbarch)
7462 || !gdbarch_get_longjmp_target (gdbarch,
7463 frame, &jmp_buf_pc))
7464 {
7465 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
7466 "(!gdbarch_get_longjmp_target)");
7467 keep_going (ecs);
7468 return;
7469 }
7470
7471 /* Insert a breakpoint at resume address. */
7472 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
7473 }
7474 else
7475 check_exception_resume (ecs, frame);
7476 keep_going (ecs);
7477 return;
7478
7479 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
7480 {
7481 frame_info_ptr init_frame;
7482
7483 /* There are several cases to consider.
7484
7485 1. The initiating frame no longer exists. In this case we
7486 must stop, because the exception or longjmp has gone too
7487 far.
7488
7489 2. The initiating frame exists, and is the same as the
7490 current frame. We stop, because the exception or longjmp
7491 has been caught.
7492
7493 3. The initiating frame exists and is different from the
7494 current frame. This means the exception or longjmp has
7495 been caught beneath the initiating frame, so keep going.
7496
7497 4. longjmp breakpoint has been placed just to protect
7498 against stale dummy frames and user is not interested in
7499 stopping around longjmps. */
7500
7501 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
7502
7503 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
7504 != nullptr);
7505 delete_exception_resume_breakpoint (ecs->event_thread);
7506
7507 if (what.is_longjmp)
7508 {
7509 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
7510
7511 if (!frame_id_p (ecs->event_thread->initiating_frame))
7512 {
7513 /* Case 4. */
7514 keep_going (ecs);
7515 return;
7516 }
7517 }
7518
7519 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
7520
7521 if (init_frame)
7522 {
7523 if (*curr_frame_id == ecs->event_thread->initiating_frame)
7524 {
7525 /* Case 2. Fall through. */
7526 }
7527 else
7528 {
7529 /* Case 3. */
7530 keep_going (ecs);
7531 return;
7532 }
7533 }
7534
7535 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
7536 exists. */
7537 delete_step_resume_breakpoint (ecs->event_thread);
7538
7539 end_stepping_range (ecs);
7540 }
7541 return;
7542
7543 case BPSTAT_WHAT_SINGLE:
7544 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
7545 ecs->event_thread->stepping_over_breakpoint = 1;
7546 /* Still need to check other stuff, at least the case where we
7547 are stepping and step out of the right range. */
7548 break;
7549
7550 case BPSTAT_WHAT_STEP_RESUME:
7551 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
7552
7553 delete_step_resume_breakpoint (ecs->event_thread);
7554 if (ecs->event_thread->control.proceed_to_finish
7555 && execution_direction == EXEC_REVERSE)
7556 {
7557 struct thread_info *tp = ecs->event_thread;
7558
7559 /* We are finishing a function in reverse, and just hit the
7560 step-resume breakpoint at the start address of the
7561 function, and we're almost there -- just need to back up
7562 by one more single-step, which should take us back to the
7563 function call. */
7564 tp->control.step_range_start = tp->control.step_range_end = 1;
7565 keep_going (ecs);
7566 return;
7567 }
7568 fill_in_stop_func (gdbarch, ecs);
7569 if (ecs->event_thread->stop_pc () == ecs->stop_func_start
7570 && execution_direction == EXEC_REVERSE)
7571 {
7572 /* We are stepping over a function call in reverse, and just
7573 hit the step-resume breakpoint at the start address of
7574 the function. Go back to single-stepping, which should
7575 take us back to the function call. */
7576 ecs->event_thread->stepping_over_breakpoint = 1;
7577 keep_going (ecs);
7578 return;
7579 }
7580 break;
7581
7582 case BPSTAT_WHAT_STOP_NOISY:
7583 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
7584 stop_print_frame = true;
7585
7586 /* Assume the thread stopped for a breakpoint. We'll still check
7587 whether a/the breakpoint is there when the thread is next
7588 resumed. */
7589 ecs->event_thread->stepping_over_breakpoint = 1;
7590
7591 stop_waiting (ecs);
7592 return;
7593
7594 case BPSTAT_WHAT_STOP_SILENT:
7595 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
7596 stop_print_frame = false;
7597
7598 /* Assume the thread stopped for a breakpoint. We'll still check
7599 whether a/the breakpoint is there when the thread is next
7600 resumed. */
7601 ecs->event_thread->stepping_over_breakpoint = 1;
7602 stop_waiting (ecs);
7603 return;
7604
7605 case BPSTAT_WHAT_HP_STEP_RESUME:
7606 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
7607
7608 delete_step_resume_breakpoint (ecs->event_thread);
7609 if (ecs->event_thread->step_after_step_resume_breakpoint)
7610 {
7611 /* Back when the step-resume breakpoint was inserted, we
7612 were trying to single-step off a breakpoint. Go back to
7613 doing that. */
7614 ecs->event_thread->step_after_step_resume_breakpoint = 0;
7615 ecs->event_thread->stepping_over_breakpoint = 1;
7616 keep_going (ecs);
7617 return;
7618 }
7619 break;
7620
7621 case BPSTAT_WHAT_KEEP_CHECKING:
7622 break;
7623 }
7624
7625 /* If we stepped a permanent breakpoint and we had a high priority
7626 step-resume breakpoint for the address we stepped, but we didn't
7627 hit it, then we must have stepped into the signal handler. The
7628 step-resume was only necessary to catch the case of _not_
7629 stepping into the handler, so delete it, and fall through to
7630 checking whether the step finished. */
7631 if (ecs->event_thread->stepped_breakpoint)
7632 {
7633 struct breakpoint *sr_bp
7634 = ecs->event_thread->control.step_resume_breakpoint;
7635
7636 if (sr_bp != nullptr
7637 && sr_bp->first_loc ().permanent
7638 && sr_bp->type == bp_hp_step_resume
7639 && sr_bp->first_loc ().address == ecs->event_thread->prev_pc)
7640 {
7641 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
7642 delete_step_resume_breakpoint (ecs->event_thread);
7643 ecs->event_thread->step_after_step_resume_breakpoint = 0;
7644 }
7645 }
7646
7647 /* We come here if we hit a breakpoint but should not stop for it.
7648 Possibly we also were stepping and should stop for that. So fall
7649 through and test for stepping. But, if not stepping, do not
7650 stop. */
7651
7652 /* In all-stop mode, if we're currently stepping but have stopped in
7653 some other thread, we need to switch back to the stepped thread. */
7654 if (switch_back_to_stepped_thread (ecs))
7655 return;
7656
7657 if (ecs->event_thread->control.step_resume_breakpoint)
7658 {
7659 infrun_debug_printf ("step-resume breakpoint is inserted");
7660
7661 /* Having a step-resume breakpoint overrides anything
7662 else having to do with stepping commands until
7663 that breakpoint is reached. */
7664 keep_going (ecs);
7665 return;
7666 }
7667
7668 if (ecs->event_thread->control.step_range_end == 0)
7669 {
7670 infrun_debug_printf ("no stepping, continue");
7671 /* Likewise if we aren't even stepping. */
7672 keep_going (ecs);
7673 return;
7674 }
7675
7676 fill_in_stop_func (gdbarch, ecs);
7677
7678 /* If stepping through a line, keep going if still within it.
7679
7680 Note that step_range_end is the address of the first instruction
7681 beyond the step range, and NOT the address of the last instruction
7682 within it!
7683
7684 Note also that during reverse execution, we may be stepping
7685 through a function epilogue and therefore must detect when
7686 the current-frame changes in the middle of a line. */
7687
7688 if (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
7689 ecs->event_thread)
7690 && (execution_direction != EXEC_REVERSE
7691 || *curr_frame_id == original_frame_id))
7692 {
7693 infrun_debug_printf
7694 ("stepping inside range [%s-%s]",
7695 paddress (gdbarch, ecs->event_thread->control.step_range_start),
7696 paddress (gdbarch, ecs->event_thread->control.step_range_end));
7697
7698 /* Tentatively re-enable range stepping; `resume' disables it if
7699 necessary (e.g., if we're stepping over a breakpoint or we
7700 have software watchpoints). */
7701 ecs->event_thread->control.may_range_step = 1;
7702
7703 /* When stepping backward, stop at beginning of line range
7704 (unless it's the function entry point, in which case
7705 keep going back to the call point). */
7706 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7707 if (stop_pc == ecs->event_thread->control.step_range_start
7708 && stop_pc != ecs->stop_func_start
7709 && execution_direction == EXEC_REVERSE)
7710 end_stepping_range (ecs);
7711 else
7712 keep_going (ecs);
7713
7714 return;
7715 }
7716
7717 /* We stepped out of the stepping range. */
7718
7719 /* If we are stepping at the source level and entered the runtime
7720 loader dynamic symbol resolution code...
7721
7722 EXEC_FORWARD: we keep on single stepping until we exit the run
7723 time loader code and reach the callee's address.
7724
7725 EXEC_REVERSE: we've already executed the callee (backward), and
7726 the runtime loader code is handled just like any other
7727 undebuggable function call. Now we need only keep stepping
7728 backward through the trampoline code, and that's handled further
7729 down, so there is nothing for us to do here. */
7730
7731 if (execution_direction != EXEC_REVERSE
7732 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7733 && in_solib_dynsym_resolve_code (ecs->event_thread->stop_pc ())
7734 && (ecs->event_thread->control.step_start_function == nullptr
7735 || !in_solib_dynsym_resolve_code (
7736 ecs->event_thread->control.step_start_function->value_block ()
7737 ->entry_pc ())))
7738 {
7739 CORE_ADDR pc_after_resolver =
7740 gdbarch_skip_solib_resolver (gdbarch, ecs->event_thread->stop_pc ());
7741
7742 infrun_debug_printf ("stepped into dynsym resolve code");
7743
7744 if (pc_after_resolver)
7745 {
7746 /* Set up a step-resume breakpoint at the address
7747 indicated by SKIP_SOLIB_RESOLVER. */
7748 symtab_and_line sr_sal;
7749 sr_sal.pc = pc_after_resolver;
7750 sr_sal.pspace = get_frame_program_space (frame);
7751
7752 insert_step_resume_breakpoint_at_sal (gdbarch,
7753 sr_sal, null_frame_id);
7754 }
7755
7756 keep_going (ecs);
7757 return;
7758 }
7759
7760 /* Step through an indirect branch thunk. */
7761 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
7762 && gdbarch_in_indirect_branch_thunk (gdbarch,
7763 ecs->event_thread->stop_pc ()))
7764 {
7765 infrun_debug_printf ("stepped into indirect branch thunk");
7766 keep_going (ecs);
7767 return;
7768 }
7769
7770 if (ecs->event_thread->control.step_range_end != 1
7771 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7772 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
7773 && get_frame_type (frame) == SIGTRAMP_FRAME)
7774 {
7775 infrun_debug_printf ("stepped into signal trampoline");
7776 /* The inferior, while doing a "step" or "next", has ended up in
7777 a signal trampoline (either by a signal being delivered or by
7778 the signal handler returning). Just single-step until the
7779 inferior leaves the trampoline (either by calling the handler
7780 or returning). */
7781 keep_going (ecs);
7782 return;
7783 }
7784
7785 /* If we're in the return path from a shared library trampoline,
7786 we want to proceed through the trampoline when stepping. */
7787 /* macro/2012-04-25: This needs to come before the subroutine
7788 call check below as on some targets return trampolines look
7789 like subroutine calls (MIPS16 return thunks). */
7790 if (gdbarch_in_solib_return_trampoline (gdbarch,
7791 ecs->event_thread->stop_pc (),
7792 ecs->stop_func_name)
7793 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
7794 {
7795 /* Determine where this trampoline returns. */
7796 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7797 CORE_ADDR real_stop_pc
7798 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7799
7800 infrun_debug_printf ("stepped into solib return tramp");
7801
7802 /* Only proceed through if we know where it's going. */
7803 if (real_stop_pc)
7804 {
7805 /* And put the step-breakpoint there and go until there. */
7806 symtab_and_line sr_sal;
7807 sr_sal.pc = real_stop_pc;
7808 sr_sal.section = find_pc_overlay (sr_sal.pc);
7809 sr_sal.pspace = get_frame_program_space (frame);
7810
7811 /* Do not specify what the fp should be when we stop since
7812 on some machines the prologue is where the new fp value
7813 is established. */
7814 insert_step_resume_breakpoint_at_sal (gdbarch,
7815 sr_sal, null_frame_id);
7816
7817 /* Restart without fiddling with the step ranges or
7818 other state. */
7819 keep_going (ecs);
7820 return;
7821 }
7822 }
7823
7824 /* Check for subroutine calls. The check for the current frame
7825 equalling the step ID is not necessary - the check of the
7826 previous frame's ID is sufficient - but it is a common case and
7827 cheaper than checking the previous frame's ID.
7828
7829 NOTE: frame_id::operator== will never report two invalid frame IDs as
7830 being equal, so to get into this block, both the current and
7831 previous frame must have valid frame IDs. */
7832 /* The outer_frame_id check is a heuristic to detect stepping
7833 through startup code. If we step over an instruction which
7834 sets the stack pointer from an invalid value to a valid value,
7835 we may detect that as a subroutine call from the mythical
7836 "outermost" function. This could be fixed by marking
7837 outermost frames as !stack_p,code_p,special_p. Then the
7838 initial outermost frame, before sp was valid, would
7839 have code_addr == &_start. See the comment in frame_id::operator==
7840 for more. */
7841
7842 /* We want "nexti" to step into, not over, signal handlers invoked
7843 by the kernel, therefore this subroutine check should not trigger
7844 for a signal handler invocation. On most platforms, this is already
7845 not the case, as the kernel puts a signal trampoline frame onto the
7846 stack to handle proper return after the handler, and therefore at this
7847 point, the current frame is a grandchild of the step frame, not a
7848 child. However, on some platforms, the kernel actually uses a
7849 trampoline to handle *invocation* of the handler. In that case,
7850 when executing the first instruction of the trampoline, this check
7851 would erroneously detect the trampoline invocation as a subroutine
7852 call. Fix this by checking for SIGTRAMP_FRAME. */
7853 if ((get_stack_frame_id (frame)
7854 != ecs->event_thread->control.step_stack_frame_id)
7855 && get_frame_type (frame) != SIGTRAMP_FRAME
7856 && ((frame_unwind_caller_id (frame)
7857 == ecs->event_thread->control.step_stack_frame_id)
7858 && ((ecs->event_thread->control.step_stack_frame_id
7859 != outer_frame_id)
7860 || (ecs->event_thread->control.step_start_function
7861 != find_pc_function (ecs->event_thread->stop_pc ())))))
7862 {
7863 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7864 CORE_ADDR real_stop_pc;
7865
7866 infrun_debug_printf ("stepped into subroutine");
7867
7868 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
7869 {
7870 /* I presume that step_over_calls is only 0 when we're
7871 supposed to be stepping at the assembly language level
7872 ("stepi"). Just stop. */
7873 /* And this works the same backward as frontward. MVS */
7874 end_stepping_range (ecs);
7875 return;
7876 }
7877
7878 /* Reverse stepping through solib trampolines. */
7879
7880 if (execution_direction == EXEC_REVERSE
7881 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
7882 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7883 || (ecs->stop_func_start == 0
7884 && in_solib_dynsym_resolve_code (stop_pc))))
7885 {
7886 /* Any solib trampoline code can be handled in reverse
7887 by simply continuing to single-step. We have already
7888 executed the solib function (backwards), and a few
7889 steps will take us back through the trampoline to the
7890 caller. */
7891 keep_going (ecs);
7892 return;
7893 }
7894
7895 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
7896 {
7897 /* We're doing a "next".
7898
7899 Normal (forward) execution: set a breakpoint at the
7900 callee's return address (the address at which the caller
7901 will resume).
7902
7903 Reverse (backward) execution. set the step-resume
7904 breakpoint at the start of the function that we just
7905 stepped into (backwards), and continue to there. When we
7906 get there, we'll need to single-step back to the caller. */
7907
7908 if (execution_direction == EXEC_REVERSE)
7909 {
7910 /* If we're already at the start of the function, we've either
7911 just stepped backward into a single instruction function,
7912 or stepped back out of a signal handler to the first instruction
7913 of the function. Just keep going, which will single-step back
7914 to the caller. */
7915 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
7916 {
7917 /* Normal function call return (static or dynamic). */
7918 symtab_and_line sr_sal;
7919 sr_sal.pc = ecs->stop_func_start;
7920 sr_sal.pspace = get_frame_program_space (frame);
7921 insert_step_resume_breakpoint_at_sal (gdbarch,
7922 sr_sal, get_stack_frame_id (frame));
7923 }
7924 }
7925 else
7926 insert_step_resume_breakpoint_at_caller (frame);
7927
7928 keep_going (ecs);
7929 return;
7930 }
7931
7932 /* If we are in a function call trampoline (a stub between the
7933 calling routine and the real function), locate the real
7934 function. That's what tells us (a) whether we want to step
7935 into it at all, and (b) what prologue we want to run to the
7936 end of, if we do step into it. */
7937 real_stop_pc = skip_language_trampoline (frame, stop_pc);
7938 if (real_stop_pc == 0)
7939 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7940 if (real_stop_pc != 0)
7941 ecs->stop_func_start = real_stop_pc;
7942
7943 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
7944 {
7945 symtab_and_line sr_sal;
7946 sr_sal.pc = ecs->stop_func_start;
7947 sr_sal.pspace = get_frame_program_space (frame);
7948
7949 insert_step_resume_breakpoint_at_sal (gdbarch,
7950 sr_sal, null_frame_id);
7951 keep_going (ecs);
7952 return;
7953 }
7954
7955 /* If we have line number information for the function we are
7956 thinking of stepping into and the function isn't on the skip
7957 list, step into it.
7958
7959 If there are several symtabs at that PC (e.g. with include
7960 files), just want to know whether *any* of them have line
7961 numbers. find_pc_line handles this. */
7962 {
7963 struct symtab_and_line tmp_sal;
7964
7965 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
7966 if (tmp_sal.line != 0
7967 && !function_name_is_marked_for_skip (ecs->stop_func_name,
7968 tmp_sal)
7969 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
7970 {
7971 if (execution_direction == EXEC_REVERSE)
7972 handle_step_into_function_backward (gdbarch, ecs);
7973 else
7974 handle_step_into_function (gdbarch, ecs);
7975 return;
7976 }
7977 }
7978
7979 /* If we have no line number and the step-stop-if-no-debug is
7980 set, we stop the step so that the user has a chance to switch
7981 in assembly mode. */
7982 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7983 && step_stop_if_no_debug)
7984 {
7985 end_stepping_range (ecs);
7986 return;
7987 }
7988
7989 if (execution_direction == EXEC_REVERSE)
7990 {
7991 /* If we're already at the start of the function, we've either just
7992 stepped backward into a single instruction function without line
7993 number info, or stepped back out of a signal handler to the first
7994 instruction of the function without line number info. Just keep
7995 going, which will single-step back to the caller. */
7996 if (ecs->stop_func_start != stop_pc)
7997 {
7998 /* Set a breakpoint at callee's start address.
7999 From there we can step once and be back in the caller. */
8000 symtab_and_line sr_sal;
8001 sr_sal.pc = ecs->stop_func_start;
8002 sr_sal.pspace = get_frame_program_space (frame);
8003 insert_step_resume_breakpoint_at_sal (gdbarch,
8004 sr_sal, null_frame_id);
8005 }
8006 }
8007 else
8008 /* Set a breakpoint at callee's return address (the address
8009 at which the caller will resume). */
8010 insert_step_resume_breakpoint_at_caller (frame);
8011
8012 keep_going (ecs);
8013 return;
8014 }
8015
8016 /* Reverse stepping through solib trampolines. */
8017
8018 if (execution_direction == EXEC_REVERSE
8019 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
8020 {
8021 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
8022
8023 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
8024 || (ecs->stop_func_start == 0
8025 && in_solib_dynsym_resolve_code (stop_pc)))
8026 {
8027 /* Any solib trampoline code can be handled in reverse
8028 by simply continuing to single-step. We have already
8029 executed the solib function (backwards), and a few
8030 steps will take us back through the trampoline to the
8031 caller. */
8032 keep_going (ecs);
8033 return;
8034 }
8035 else if (in_solib_dynsym_resolve_code (stop_pc))
8036 {
8037 /* Stepped backward into the solib dynsym resolver.
8038 Set a breakpoint at its start and continue, then
8039 one more step will take us out. */
8040 symtab_and_line sr_sal;
8041 sr_sal.pc = ecs->stop_func_start;
8042 sr_sal.pspace = get_frame_program_space (frame);
8043 insert_step_resume_breakpoint_at_sal (gdbarch,
8044 sr_sal, null_frame_id);
8045 keep_going (ecs);
8046 return;
8047 }
8048 }
8049
8050 /* This always returns the sal for the inner-most frame when we are in a
8051 stack of inlined frames, even if GDB actually believes that it is in a
8052 more outer frame. This is checked for below by calls to
8053 inline_skipped_frames. */
8054 stop_pc_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
8055
8056 /* NOTE: tausq/2004-05-24: This if block used to be done before all
8057 the trampoline processing logic, however, there are some trampolines
8058 that have no names, so we should do trampoline handling first. */
8059 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
8060 && ecs->stop_func_name == nullptr
8061 && stop_pc_sal.line == 0)
8062 {
8063 infrun_debug_printf ("stepped into undebuggable function");
8064
8065 /* The inferior just stepped into, or returned to, an
8066 undebuggable function (where there is no debugging information
8067 and no line number corresponding to the address where the
8068 inferior stopped). Since we want to skip this kind of code,
8069 we keep going until the inferior returns from this
8070 function - unless the user has asked us not to (via
8071 set step-mode) or we no longer know how to get back
8072 to the call site. */
8073 if (step_stop_if_no_debug
8074 || !frame_id_p (frame_unwind_caller_id (frame)))
8075 {
8076 /* If we have no line number and the step-stop-if-no-debug
8077 is set, we stop the step so that the user has a chance to
8078 switch in assembly mode. */
8079 end_stepping_range (ecs);
8080 return;
8081 }
8082 else
8083 {
8084 /* Set a breakpoint at callee's return address (the address
8085 at which the caller will resume). */
8086 insert_step_resume_breakpoint_at_caller (frame);
8087 keep_going (ecs);
8088 return;
8089 }
8090 }
8091
8092 if (execution_direction == EXEC_REVERSE
8093 && ecs->event_thread->control.proceed_to_finish
8094 && ecs->event_thread->stop_pc () >= ecs->stop_func_alt_start
8095 && ecs->event_thread->stop_pc () < ecs->stop_func_start)
8096 {
8097 /* We are executing the reverse-finish command.
8098 If the system supports multiple entry points and we are finishing a
8099 function in reverse. If we are between the entry points single-step
8100 back to the alternate entry point. If we are at the alternate entry
8101 point -- just need to back up by one more single-step, which
8102 should take us back to the function call. */
8103 ecs->event_thread->control.step_range_start
8104 = ecs->event_thread->control.step_range_end = 1;
8105 keep_going (ecs);
8106 return;
8107
8108 }
8109
8110 if (ecs->event_thread->control.step_range_end == 1)
8111 {
8112 /* It is stepi or nexti. We always want to stop stepping after
8113 one instruction. */
8114 infrun_debug_printf ("stepi/nexti");
8115 end_stepping_range (ecs);
8116 return;
8117 }
8118
8119 if (stop_pc_sal.line == 0)
8120 {
8121 /* We have no line number information. That means to stop
8122 stepping (does this always happen right after one instruction,
8123 when we do "s" in a function with no line numbers,
8124 or can this happen as a result of a return or longjmp?). */
8125 infrun_debug_printf ("line number info");
8126 end_stepping_range (ecs);
8127 return;
8128 }
8129
8130 /* Handle the case when subroutines have multiple ranges. When we step
8131 from one part to the next part of the same subroutine, all subroutine
8132 levels are skipped again which begin here. Compensate for this by
8133 removing all skipped subroutines, which were already executing from
8134 the user's perspective. */
8135
8136 if (get_stack_frame_id (frame)
8137 == ecs->event_thread->control.step_stack_frame_id
8138 && inline_skipped_frames (ecs->event_thread) > 0
8139 && ecs->event_thread->control.step_frame_id.artificial_depth > 0
8140 && ecs->event_thread->control.step_frame_id.code_addr_p)
8141 {
8142 int depth = 0;
8143 const struct block *prev
8144 = block_for_pc (ecs->event_thread->control.step_frame_id.code_addr);
8145 const struct block *curr = block_for_pc (ecs->event_thread->stop_pc ());
8146 while (curr != nullptr && !curr->contains (prev))
8147 {
8148 if (curr->inlined_p ())
8149 depth++;
8150 else if (curr->function () != nullptr)
8151 break;
8152 curr = curr->superblock ();
8153 }
8154 while (inline_skipped_frames (ecs->event_thread) > depth)
8155 step_into_inline_frame (ecs->event_thread);
8156 }
8157
8158 /* Look for "calls" to inlined functions, part one. If the inline
8159 frame machinery detected some skipped call sites, we have entered
8160 a new inline function. */
8161
8162 if ((*curr_frame_id == original_frame_id)
8163 && inline_skipped_frames (ecs->event_thread))
8164 {
8165 infrun_debug_printf ("stepped into inlined function");
8166
8167 symtab_and_line call_sal = find_frame_sal (frame);
8168
8169 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
8170 {
8171 /* For "step", we're going to stop. But if the call site
8172 for this inlined function is on the same source line as
8173 we were previously stepping, go down into the function
8174 first. Otherwise stop at the call site. */
8175
8176 if (call_sal.line == ecs->event_thread->current_line
8177 && call_sal.symtab == ecs->event_thread->current_symtab)
8178 {
8179 step_into_inline_frame (ecs->event_thread);
8180 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
8181 {
8182 keep_going (ecs);
8183 return;
8184 }
8185 }
8186
8187 end_stepping_range (ecs);
8188 return;
8189 }
8190 else
8191 {
8192 /* For "next", we should stop at the call site if it is on a
8193 different source line. Otherwise continue through the
8194 inlined function. */
8195 if (call_sal.line == ecs->event_thread->current_line
8196 && call_sal.symtab == ecs->event_thread->current_symtab)
8197 keep_going (ecs);
8198 else
8199 end_stepping_range (ecs);
8200 return;
8201 }
8202 }
8203
8204 /* Look for "calls" to inlined functions, part two. If we are still
8205 in the same real function we were stepping through, but we have
8206 to go further up to find the exact frame ID, we are stepping
8207 through a more inlined call beyond its call site. */
8208
8209 if (get_frame_type (frame) == INLINE_FRAME
8210 && (*curr_frame_id != original_frame_id)
8211 && stepped_in_from (frame, original_frame_id))
8212 {
8213 infrun_debug_printf ("stepping through inlined function");
8214
8215 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
8216 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
8217 keep_going (ecs);
8218 else
8219 end_stepping_range (ecs);
8220 return;
8221 }
8222
8223 bool refresh_step_info = true;
8224 if ((ecs->event_thread->stop_pc () == stop_pc_sal.pc)
8225 && (ecs->event_thread->current_line != stop_pc_sal.line
8226 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
8227 {
8228 /* We are at a different line. */
8229
8230 if (stop_pc_sal.is_stmt)
8231 {
8232 if (execution_direction == EXEC_REVERSE)
8233 {
8234 /* We are stepping backwards make sure we have reached the
8235 beginning of the line. */
8236 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
8237 CORE_ADDR start_line_pc
8238 = update_line_range_start (stop_pc, ecs);
8239
8240 if (stop_pc != start_line_pc)
8241 {
8242 /* Have not reached the beginning of the source code line.
8243 Set a step range. Execution should stop in any function
8244 calls we execute back into before reaching the beginning
8245 of the line. */
8246 ecs->event_thread->control.step_range_start
8247 = start_line_pc;
8248 ecs->event_thread->control.step_range_end = stop_pc;
8249 set_step_info (ecs->event_thread, frame, stop_pc_sal);
8250 keep_going (ecs);
8251 return;
8252 }
8253 }
8254
8255 /* We are at the start of a statement.
8256
8257 So stop. Note that we don't stop if we step into the middle of a
8258 statement. That is said to make things like for (;;) statements
8259 work better. */
8260 infrun_debug_printf ("stepped to a different line");
8261 end_stepping_range (ecs);
8262 return;
8263 }
8264 else if (*curr_frame_id == original_frame_id)
8265 {
8266 /* We are not at the start of a statement, and we have not changed
8267 frame.
8268
8269 We ignore this line table entry, and continue stepping forward,
8270 looking for a better place to stop. */
8271 refresh_step_info = false;
8272 infrun_debug_printf ("stepped to a different line, but "
8273 "it's not the start of a statement");
8274 }
8275 else
8276 {
8277 /* We are not the start of a statement, and we have changed frame.
8278
8279 We ignore this line table entry, and continue stepping forward,
8280 looking for a better place to stop. Keep refresh_step_info at
8281 true to note that the frame has changed, but ignore the line
8282 number to make sure we don't ignore a subsequent entry with the
8283 same line number. */
8284 stop_pc_sal.line = 0;
8285 infrun_debug_printf ("stepped to a different frame, but "
8286 "it's not the start of a statement");
8287 }
8288 }
8289
8290 if (execution_direction == EXEC_REVERSE
8291 && *curr_frame_id != original_frame_id
8292 && original_frame_id.code_addr_p && curr_frame_id->code_addr_p
8293 && original_frame_id.code_addr == curr_frame_id->code_addr)
8294 {
8295 /* If we enter here, we're leaving a recursive function call. In this
8296 situation, we shouldn't refresh the step information, because if we
8297 do, we'll lose the frame_id of when we started stepping, and this
8298 will make GDB not know we need to print frame information. */
8299 refresh_step_info = false;
8300 infrun_debug_printf ("reverse stepping, left a recursive call, don't "
8301 "update step info so we remember we left a frame");
8302 }
8303
8304 /* We aren't done stepping.
8305
8306 Optimize by setting the stepping range to the line.
8307 (We might not be in the original line, but if we entered a
8308 new line in mid-statement, we continue stepping. This makes
8309 things like for(;;) statements work better.)
8310
8311 If we entered a SAL that indicates a non-statement line table entry,
8312 then we update the stepping range, but we don't update the step info,
8313 which includes things like the line number we are stepping away from.
8314 This means we will stop when we find a line table entry that is marked
8315 as is-statement, even if it matches the non-statement one we just
8316 stepped into. */
8317
8318 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
8319 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
8320 ecs->event_thread->control.may_range_step = 1;
8321 infrun_debug_printf
8322 ("updated step range, start = %s, end = %s, may_range_step = %d",
8323 paddress (gdbarch, ecs->event_thread->control.step_range_start),
8324 paddress (gdbarch, ecs->event_thread->control.step_range_end),
8325 ecs->event_thread->control.may_range_step);
8326 if (refresh_step_info)
8327 set_step_info (ecs->event_thread, frame, stop_pc_sal);
8328
8329 infrun_debug_printf ("keep going");
8330
8331 if (execution_direction == EXEC_REVERSE)
8332 {
8333 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
8334
8335 /* Make sure the stop_pc is set to the beginning of the line. */
8336 if (stop_pc != ecs->event_thread->control.step_range_start)
8337 ecs->event_thread->control.step_range_start
8338 = update_line_range_start (stop_pc, ecs);
8339 }
8340
8341 keep_going (ecs);
8342}
8343
8344static bool restart_stepped_thread (process_stratum_target *resume_target,
8345 ptid_t resume_ptid);
8346
8347/* In all-stop mode, if we're currently stepping but have stopped in
8348 some other thread, we may need to switch back to the stepped
8349 thread. Returns true we set the inferior running, false if we left
8350 it stopped (and the event needs further processing). */
8351
8352static bool
8353switch_back_to_stepped_thread (struct execution_control_state *ecs)
8354{
8355 if (!target_is_non_stop_p ())
8356 {
8357 /* If any thread is blocked on some internal breakpoint, and we
8358 simply need to step over that breakpoint to get it going
8359 again, do that first. */
8360
8361 /* However, if we see an event for the stepping thread, then we
8362 know all other threads have been moved past their breakpoints
8363 already. Let the caller check whether the step is finished,
8364 etc., before deciding to move it past a breakpoint. */
8365 if (ecs->event_thread->control.step_range_end != 0)
8366 return false;
8367
8368 /* Check if the current thread is blocked on an incomplete
8369 step-over, interrupted by a random signal. */
8370 if (ecs->event_thread->control.trap_expected
8371 && ecs->event_thread->stop_signal () != GDB_SIGNAL_TRAP)
8372 {
8373 infrun_debug_printf
8374 ("need to finish step-over of [%s]",
8375 ecs->event_thread->ptid.to_string ().c_str ());
8376 keep_going (ecs);
8377 return true;
8378 }
8379
8380 /* Check if the current thread is blocked by a single-step
8381 breakpoint of another thread. */
8382 if (ecs->hit_singlestep_breakpoint)
8383 {
8384 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
8385 ecs->ptid.to_string ().c_str ());
8386 keep_going (ecs);
8387 return true;
8388 }
8389
8390 /* If this thread needs yet another step-over (e.g., stepping
8391 through a delay slot), do it first before moving on to
8392 another thread. */
8393 if (thread_still_needs_step_over (ecs->event_thread))
8394 {
8395 infrun_debug_printf
8396 ("thread [%s] still needs step-over",
8397 ecs->event_thread->ptid.to_string ().c_str ());
8398 keep_going (ecs);
8399 return true;
8400 }
8401
8402 /* If scheduler locking applies even if not stepping, there's no
8403 need to walk over threads. Above we've checked whether the
8404 current thread is stepping. If some other thread not the
8405 event thread is stepping, then it must be that scheduler
8406 locking is not in effect. */
8407 if (schedlock_applies (ecs->event_thread))
8408 return false;
8409
8410 /* Otherwise, we no longer expect a trap in the current thread.
8411 Clear the trap_expected flag before switching back -- this is
8412 what keep_going does as well, if we call it. */
8413 ecs->event_thread->control.trap_expected = 0;
8414
8415 /* Likewise, clear the signal if it should not be passed. */
8416 if (!signal_program[ecs->event_thread->stop_signal ()])
8417 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
8418
8419 if (restart_stepped_thread (ecs->target, ecs->ptid))
8420 {
8421 prepare_to_wait (ecs);
8422 return true;
8423 }
8424
8425 switch_to_thread (ecs->event_thread);
8426 }
8427
8428 return false;
8429}
8430
8431/* Look for the thread that was stepping, and resume it.
8432 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
8433 is resuming. Return true if a thread was started, false
8434 otherwise. */
8435
8436static bool
8437restart_stepped_thread (process_stratum_target *resume_target,
8438 ptid_t resume_ptid)
8439{
8440 /* Do all pending step-overs before actually proceeding with
8441 step/next/etc. */
8442 if (start_step_over ())
8443 return true;
8444
8445 for (thread_info *tp : all_threads_safe ())
8446 {
8447 if (tp->state == THREAD_EXITED)
8448 continue;
8449
8450 if (tp->has_pending_waitstatus ())
8451 continue;
8452
8453 /* Ignore threads of processes the caller is not
8454 resuming. */
8455 if (!sched_multi
8456 && (tp->inf->process_target () != resume_target
8457 || tp->inf->pid != resume_ptid.pid ()))
8458 continue;
8459
8460 if (tp->control.trap_expected)
8461 {
8462 infrun_debug_printf ("switching back to stepped thread (step-over)");
8463
8464 if (keep_going_stepped_thread (tp))
8465 return true;
8466 }
8467 }
8468
8469 for (thread_info *tp : all_threads_safe ())
8470 {
8471 if (tp->state == THREAD_EXITED)
8472 continue;
8473
8474 if (tp->has_pending_waitstatus ())
8475 continue;
8476
8477 /* Ignore threads of processes the caller is not
8478 resuming. */
8479 if (!sched_multi
8480 && (tp->inf->process_target () != resume_target
8481 || tp->inf->pid != resume_ptid.pid ()))
8482 continue;
8483
8484 /* Did we find the stepping thread? */
8485 if (tp->control.step_range_end)
8486 {
8487 infrun_debug_printf ("switching back to stepped thread (stepping)");
8488
8489 if (keep_going_stepped_thread (tp))
8490 return true;
8491 }
8492 }
8493
8494 return false;
8495}
8496
8497/* See infrun.h. */
8498
8499void
8500restart_after_all_stop_detach (process_stratum_target *proc_target)
8501{
8502 /* Note we don't check target_is_non_stop_p() here, because the
8503 current inferior may no longer have a process_stratum target
8504 pushed, as we just detached. */
8505
8506 /* See if we have a THREAD_RUNNING thread that need to be
8507 re-resumed. If we have any thread that is already executing,
8508 then we don't need to resume the target -- it is already been
8509 resumed. With the remote target (in all-stop), it's even
8510 impossible to issue another resumption if the target is already
8511 resumed, until the target reports a stop. */
8512 for (thread_info *thr : all_threads (proc_target))
8513 {
8514 if (thr->state != THREAD_RUNNING)
8515 continue;
8516
8517 /* If we have any thread that is already executing, then we
8518 don't need to resume the target -- it is already been
8519 resumed. */
8520 if (thr->executing ())
8521 return;
8522
8523 /* If we have a pending event to process, skip resuming the
8524 target and go straight to processing it. */
8525 if (thr->resumed () && thr->has_pending_waitstatus ())
8526 return;
8527 }
8528
8529 /* Alright, we need to re-resume the target. If a thread was
8530 stepping, we need to restart it stepping. */
8531 if (restart_stepped_thread (proc_target, minus_one_ptid))
8532 return;
8533
8534 /* Otherwise, find the first THREAD_RUNNING thread and resume
8535 it. */
8536 for (thread_info *thr : all_threads (proc_target))
8537 {
8538 if (thr->state != THREAD_RUNNING)
8539 continue;
8540
8541 execution_control_state ecs (thr);
8542 switch_to_thread (thr);
8543 keep_going (&ecs);
8544 return;
8545 }
8546}
8547
8548/* Set a previously stepped thread back to stepping. Returns true on
8549 success, false if the resume is not possible (e.g., the thread
8550 vanished). */
8551
8552static bool
8553keep_going_stepped_thread (struct thread_info *tp)
8554{
8555 frame_info_ptr frame;
8556
8557 /* If the stepping thread exited, then don't try to switch back and
8558 resume it, which could fail in several different ways depending
8559 on the target. Instead, just keep going.
8560
8561 We can find a stepping dead thread in the thread list in two
8562 cases:
8563
8564 - The target supports thread exit events, and when the target
8565 tries to delete the thread from the thread list, inferior_ptid
8566 pointed at the exiting thread. In such case, calling
8567 delete_thread does not really remove the thread from the list;
8568 instead, the thread is left listed, with 'exited' state.
8569
8570 - The target's debug interface does not support thread exit
8571 events, and so we have no idea whatsoever if the previously
8572 stepping thread is still alive. For that reason, we need to
8573 synchronously query the target now. */
8574
8575 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
8576 {
8577 infrun_debug_printf ("not resuming previously stepped thread, it has "
8578 "vanished");
8579
8580 delete_thread (tp);
8581 return false;
8582 }
8583
8584 infrun_debug_printf ("resuming previously stepped thread");
8585
8586 execution_control_state ecs (tp);
8587 switch_to_thread (tp);
8588
8589 tp->set_stop_pc (regcache_read_pc (get_thread_regcache (tp)));
8590 frame = get_current_frame ();
8591
8592 /* If the PC of the thread we were trying to single-step has
8593 changed, then that thread has trapped or been signaled, but the
8594 event has not been reported to GDB yet. Re-poll the target
8595 looking for this particular thread's event (i.e. temporarily
8596 enable schedlock) by:
8597
8598 - setting a break at the current PC
8599 - resuming that particular thread, only (by setting trap
8600 expected)
8601
8602 This prevents us continuously moving the single-step breakpoint
8603 forward, one instruction at a time, overstepping. */
8604
8605 if (tp->stop_pc () != tp->prev_pc)
8606 {
8607 ptid_t resume_ptid;
8608
8609 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
8610 paddress (current_inferior ()->arch (), tp->prev_pc),
8611 paddress (current_inferior ()->arch (),
8612 tp->stop_pc ()));
8613
8614 /* Clear the info of the previous step-over, as it's no longer
8615 valid (if the thread was trying to step over a breakpoint, it
8616 has already succeeded). It's what keep_going would do too,
8617 if we called it. Do this before trying to insert the sss
8618 breakpoint, otherwise if we were previously trying to step
8619 over this exact address in another thread, the breakpoint is
8620 skipped. */
8621 clear_step_over_info ();
8622 tp->control.trap_expected = 0;
8623
8624 insert_single_step_breakpoint (get_frame_arch (frame),
8625 get_frame_address_space (frame),
8626 tp->stop_pc ());
8627
8628 tp->set_resumed (true);
8629 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
8630 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
8631 }
8632 else
8633 {
8634 infrun_debug_printf ("expected thread still hasn't advanced");
8635
8636 keep_going_pass_signal (&ecs);
8637 }
8638
8639 return true;
8640}
8641
8642/* Is thread TP in the middle of (software or hardware)
8643 single-stepping? (Note the result of this function must never be
8644 passed directly as target_resume's STEP parameter.) */
8645
8646static bool
8647currently_stepping (struct thread_info *tp)
8648{
8649 return ((tp->control.step_range_end
8650 && tp->control.step_resume_breakpoint == nullptr)
8651 || tp->control.trap_expected
8652 || tp->stepped_breakpoint
8653 || bpstat_should_step ());
8654}
8655
8656/* Inferior has stepped into a subroutine call with source code that
8657 we should not step over. Do step to the first line of code in
8658 it. */
8659
8660static void
8661handle_step_into_function (struct gdbarch *gdbarch,
8662 struct execution_control_state *ecs)
8663{
8664 fill_in_stop_func (gdbarch, ecs);
8665
8666 compunit_symtab *cust
8667 = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
8668 if (cust != nullptr && cust->language () != language_asm)
8669 ecs->stop_func_start
8670 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
8671
8672 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
8673 /* Use the step_resume_break to step until the end of the prologue,
8674 even if that involves jumps (as it seems to on the vax under
8675 4.2). */
8676 /* If the prologue ends in the middle of a source line, continue to
8677 the end of that source line (if it is still within the function).
8678 Otherwise, just go to end of prologue. */
8679 if (stop_func_sal.end
8680 && stop_func_sal.pc != ecs->stop_func_start
8681 && stop_func_sal.end < ecs->stop_func_end)
8682 ecs->stop_func_start = stop_func_sal.end;
8683
8684 /* Architectures which require breakpoint adjustment might not be able
8685 to place a breakpoint at the computed address. If so, the test
8686 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
8687 ecs->stop_func_start to an address at which a breakpoint may be
8688 legitimately placed.
8689
8690 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
8691 made, GDB will enter an infinite loop when stepping through
8692 optimized code consisting of VLIW instructions which contain
8693 subinstructions corresponding to different source lines. On
8694 FR-V, it's not permitted to place a breakpoint on any but the
8695 first subinstruction of a VLIW instruction. When a breakpoint is
8696 set, GDB will adjust the breakpoint address to the beginning of
8697 the VLIW instruction. Thus, we need to make the corresponding
8698 adjustment here when computing the stop address. */
8699
8700 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
8701 {
8702 ecs->stop_func_start
8703 = gdbarch_adjust_breakpoint_address (gdbarch,
8704 ecs->stop_func_start);
8705 }
8706
8707 if (ecs->stop_func_start == ecs->event_thread->stop_pc ())
8708 {
8709 /* We are already there: stop now. */
8710 end_stepping_range (ecs);
8711 return;
8712 }
8713 else
8714 {
8715 /* Put the step-breakpoint there and go until there. */
8716 symtab_and_line sr_sal;
8717 sr_sal.pc = ecs->stop_func_start;
8718 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
8719 sr_sal.pspace = get_frame_program_space (get_current_frame ());
8720
8721 /* Do not specify what the fp should be when we stop since on
8722 some machines the prologue is where the new fp value is
8723 established. */
8724 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
8725
8726 /* And make sure stepping stops right away then. */
8727 ecs->event_thread->control.step_range_end
8728 = ecs->event_thread->control.step_range_start;
8729 }
8730 keep_going (ecs);
8731}
8732
8733/* Inferior has stepped backward into a subroutine call with source
8734 code that we should not step over. Do step to the beginning of the
8735 last line of code in it. */
8736
8737static void
8738handle_step_into_function_backward (struct gdbarch *gdbarch,
8739 struct execution_control_state *ecs)
8740{
8741 struct compunit_symtab *cust;
8742 struct symtab_and_line stop_func_sal;
8743
8744 fill_in_stop_func (gdbarch, ecs);
8745
8746 cust = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
8747 if (cust != nullptr && cust->language () != language_asm)
8748 ecs->stop_func_start
8749 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
8750
8751 stop_func_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
8752
8753 /* OK, we're just going to keep stepping here. */
8754 if (stop_func_sal.pc == ecs->event_thread->stop_pc ())
8755 {
8756 /* We're there already. Just stop stepping now. */
8757 end_stepping_range (ecs);
8758 }
8759 else
8760 {
8761 /* Else just reset the step range and keep going.
8762 No step-resume breakpoint, they don't work for
8763 epilogues, which can have multiple entry paths. */
8764 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
8765 ecs->event_thread->control.step_range_end = stop_func_sal.end;
8766 keep_going (ecs);
8767 }
8768 return;
8769}
8770
8771/* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
8772 This is used to both functions and to skip over code. */
8773
8774static void
8775insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
8776 struct symtab_and_line sr_sal,
8777 struct frame_id sr_id,
8778 enum bptype sr_type)
8779{
8780 /* There should never be more than one step-resume or longjmp-resume
8781 breakpoint per thread, so we should never be setting a new
8782 step_resume_breakpoint when one is already active. */
8783 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == nullptr);
8784 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
8785
8786 infrun_debug_printf ("inserting step-resume breakpoint at %s",
8787 paddress (gdbarch, sr_sal.pc));
8788
8789 inferior_thread ()->control.step_resume_breakpoint
8790 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
8791}
8792
8793void
8794insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
8795 struct symtab_and_line sr_sal,
8796 struct frame_id sr_id)
8797{
8798 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
8799 sr_sal, sr_id,
8800 bp_step_resume);
8801}
8802
8803/* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
8804 This is used to skip a potential signal handler.
8805
8806 This is called with the interrupted function's frame. The signal
8807 handler, when it returns, will resume the interrupted function at
8808 RETURN_FRAME.pc. */
8809
8810static void
8811insert_hp_step_resume_breakpoint_at_frame (const frame_info_ptr &return_frame)
8812{
8813 gdb_assert (return_frame != nullptr);
8814
8815 struct gdbarch *gdbarch = get_frame_arch (return_frame);
8816
8817 symtab_and_line sr_sal;
8818 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
8819 sr_sal.section = find_pc_overlay (sr_sal.pc);
8820 sr_sal.pspace = get_frame_program_space (return_frame);
8821
8822 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
8823 get_stack_frame_id (return_frame),
8824 bp_hp_step_resume);
8825}
8826
8827/* Insert a "step-resume breakpoint" at the previous frame's PC. This
8828 is used to skip a function after stepping into it (for "next" or if
8829 the called function has no debugging information).
8830
8831 The current function has almost always been reached by single
8832 stepping a call or return instruction. NEXT_FRAME belongs to the
8833 current function, and the breakpoint will be set at the caller's
8834 resume address.
8835
8836 This is a separate function rather than reusing
8837 insert_hp_step_resume_breakpoint_at_frame in order to avoid
8838 get_prev_frame, which may stop prematurely (see the implementation
8839 of frame_unwind_caller_id for an example). */
8840
8841static void
8842insert_step_resume_breakpoint_at_caller (const frame_info_ptr &next_frame)
8843{
8844 /* We shouldn't have gotten here if we don't know where the call site
8845 is. */
8846 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
8847
8848 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
8849
8850 symtab_and_line sr_sal;
8851 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
8852 frame_unwind_caller_pc (next_frame));
8853 sr_sal.section = find_pc_overlay (sr_sal.pc);
8854 sr_sal.pspace = frame_unwind_program_space (next_frame);
8855
8856 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
8857 frame_unwind_caller_id (next_frame));
8858}
8859
8860/* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
8861 new breakpoint at the target of a jmp_buf. The handling of
8862 longjmp-resume uses the same mechanisms used for handling
8863 "step-resume" breakpoints. */
8864
8865static void
8866insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
8867{
8868 /* There should never be more than one longjmp-resume breakpoint per
8869 thread, so we should never be setting a new
8870 longjmp_resume_breakpoint when one is already active. */
8871 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == nullptr);
8872
8873 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
8874 paddress (gdbarch, pc));
8875
8876 inferior_thread ()->control.exception_resume_breakpoint =
8877 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
8878}
8879
8880/* Insert an exception resume breakpoint. TP is the thread throwing
8881 the exception. The block B is the block of the unwinder debug hook
8882 function. FRAME is the frame corresponding to the call to this
8883 function. SYM is the symbol of the function argument holding the
8884 target PC of the exception. */
8885
8886static void
8887insert_exception_resume_breakpoint (struct thread_info *tp,
8888 const struct block *b,
8889 const frame_info_ptr &frame,
8890 struct symbol *sym)
8891{
8892 try
8893 {
8894 struct block_symbol vsym;
8895 struct value *value;
8896 CORE_ADDR handler;
8897 struct breakpoint *bp;
8898
8899 vsym = lookup_symbol_search_name (sym->search_name (),
8900 b, SEARCH_VAR_DOMAIN);
8901 value = read_var_value (vsym.symbol, vsym.block, frame);
8902 /* If the value was optimized out, revert to the old behavior. */
8903 if (! value->optimized_out ())
8904 {
8905 handler = value_as_address (value);
8906
8907 infrun_debug_printf ("exception resume at %lx",
8908 (unsigned long) handler);
8909
8910 /* set_momentary_breakpoint_at_pc creates a thread-specific
8911 breakpoint for the current inferior thread. */
8912 gdb_assert (tp == inferior_thread ());
8913 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
8914 handler,
8915 bp_exception_resume).release ();
8916
8917 tp->control.exception_resume_breakpoint = bp;
8918 }
8919 }
8920 catch (const gdb_exception_error &e)
8921 {
8922 /* We want to ignore errors here. */
8923 }
8924}
8925
8926/* A helper for check_exception_resume that sets an
8927 exception-breakpoint based on a SystemTap probe. */
8928
8929static void
8930insert_exception_resume_from_probe (struct thread_info *tp,
8931 const struct bound_probe *probe,
8932 const frame_info_ptr &frame)
8933{
8934 struct value *arg_value;
8935 CORE_ADDR handler;
8936 struct breakpoint *bp;
8937
8938 arg_value = probe_safe_evaluate_at_pc (frame, 1);
8939 if (!arg_value)
8940 return;
8941
8942 handler = value_as_address (arg_value);
8943
8944 infrun_debug_printf ("exception resume at %s",
8945 paddress (probe->objfile->arch (), handler));
8946
8947 /* set_momentary_breakpoint_at_pc creates a thread-specific breakpoint
8948 for the current inferior thread. */
8949 gdb_assert (tp == inferior_thread ());
8950 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
8951 handler, bp_exception_resume).release ();
8952 tp->control.exception_resume_breakpoint = bp;
8953}
8954
8955/* This is called when an exception has been intercepted. Check to
8956 see whether the exception's destination is of interest, and if so,
8957 set an exception resume breakpoint there. */
8958
8959static void
8960check_exception_resume (struct execution_control_state *ecs,
8961 const frame_info_ptr &frame)
8962{
8963 struct bound_probe probe;
8964 struct symbol *func;
8965
8966 /* First see if this exception unwinding breakpoint was set via a
8967 SystemTap probe point. If so, the probe has two arguments: the
8968 CFA and the HANDLER. We ignore the CFA, extract the handler, and
8969 set a breakpoint there. */
8970 probe = find_probe_by_pc (get_frame_pc (frame));
8971 if (probe.prob)
8972 {
8973 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
8974 return;
8975 }
8976
8977 func = get_frame_function (frame);
8978 if (!func)
8979 return;
8980
8981 try
8982 {
8983 const struct block *b;
8984 int argno = 0;
8985
8986 /* The exception breakpoint is a thread-specific breakpoint on
8987 the unwinder's debug hook, declared as:
8988
8989 void _Unwind_DebugHook (void *cfa, void *handler);
8990
8991 The CFA argument indicates the frame to which control is
8992 about to be transferred. HANDLER is the destination PC.
8993
8994 We ignore the CFA and set a temporary breakpoint at HANDLER.
8995 This is not extremely efficient but it avoids issues in gdb
8996 with computing the DWARF CFA, and it also works even in weird
8997 cases such as throwing an exception from inside a signal
8998 handler. */
8999
9000 b = func->value_block ();
9001 for (struct symbol *sym : block_iterator_range (b))
9002 {
9003 if (!sym->is_argument ())
9004 continue;
9005
9006 if (argno == 0)
9007 ++argno;
9008 else
9009 {
9010 insert_exception_resume_breakpoint (ecs->event_thread,
9011 b, frame, sym);
9012 break;
9013 }
9014 }
9015 }
9016 catch (const gdb_exception_error &e)
9017 {
9018 }
9019}
9020
9021static void
9022stop_waiting (struct execution_control_state *ecs)
9023{
9024 infrun_debug_printf ("stop_waiting");
9025
9026 /* Let callers know we don't want to wait for the inferior anymore. */
9027 ecs->wait_some_more = 0;
9028}
9029
9030/* Like keep_going, but passes the signal to the inferior, even if the
9031 signal is set to nopass. */
9032
9033static void
9034keep_going_pass_signal (struct execution_control_state *ecs)
9035{
9036 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
9037 gdb_assert (!ecs->event_thread->resumed ());
9038
9039 /* Save the pc before execution, to compare with pc after stop. */
9040 ecs->event_thread->prev_pc
9041 = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
9042
9043 if (ecs->event_thread->control.trap_expected)
9044 {
9045 struct thread_info *tp = ecs->event_thread;
9046
9047 infrun_debug_printf ("%s has trap_expected set, "
9048 "resuming to collect trap",
9049 tp->ptid.to_string ().c_str ());
9050
9051 /* We haven't yet gotten our trap, and either: intercepted a
9052 non-signal event (e.g., a fork); or took a signal which we
9053 are supposed to pass through to the inferior. Simply
9054 continue. */
9055 resume (ecs->event_thread->stop_signal ());
9056 }
9057 else if (step_over_info_valid_p ())
9058 {
9059 /* Another thread is stepping over a breakpoint in-line. If
9060 this thread needs a step-over too, queue the request. In
9061 either case, this resume must be deferred for later. */
9062 struct thread_info *tp = ecs->event_thread;
9063
9064 if (ecs->hit_singlestep_breakpoint
9065 || thread_still_needs_step_over (tp))
9066 {
9067 infrun_debug_printf ("step-over already in progress: "
9068 "step-over for %s deferred",
9069 tp->ptid.to_string ().c_str ());
9070 global_thread_step_over_chain_enqueue (tp);
9071 }
9072 else
9073 infrun_debug_printf ("step-over in progress: resume of %s deferred",
9074 tp->ptid.to_string ().c_str ());
9075 }
9076 else
9077 {
9078 regcache *regcache = get_thread_regcache (ecs->event_thread);
9079 int remove_bp;
9080 int remove_wps;
9081 step_over_what step_what;
9082
9083 /* Either the trap was not expected, but we are continuing
9084 anyway (if we got a signal, the user asked it be passed to
9085 the child)
9086 -- or --
9087 We got our expected trap, but decided we should resume from
9088 it.
9089
9090 We're going to run this baby now!
9091
9092 Note that insert_breakpoints won't try to re-insert
9093 already inserted breakpoints. Therefore, we don't
9094 care if breakpoints were already inserted, or not. */
9095
9096 /* If we need to step over a breakpoint, and we're not using
9097 displaced stepping to do so, insert all breakpoints
9098 (watchpoints, etc.) but the one we're stepping over, step one
9099 instruction, and then re-insert the breakpoint when that step
9100 is finished. */
9101
9102 step_what = thread_still_needs_step_over (ecs->event_thread);
9103
9104 remove_bp = (ecs->hit_singlestep_breakpoint
9105 || (step_what & STEP_OVER_BREAKPOINT));
9106 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
9107
9108 /* We can't use displaced stepping if we need to step past a
9109 watchpoint. The instruction copied to the scratch pad would
9110 still trigger the watchpoint. */
9111 if (remove_bp
9112 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
9113 {
9114 set_step_over_info (ecs->event_thread->inf->aspace.get (),
9115 regcache_read_pc (regcache), remove_wps,
9116 ecs->event_thread->global_num);
9117 }
9118 else if (remove_wps)
9119 set_step_over_info (nullptr, 0, remove_wps, -1);
9120
9121 /* If we now need to do an in-line step-over, we need to stop
9122 all other threads. Note this must be done before
9123 insert_breakpoints below, because that removes the breakpoint
9124 we're about to step over, otherwise other threads could miss
9125 it. */
9126 if (step_over_info_valid_p () && target_is_non_stop_p ())
9127 stop_all_threads ("starting in-line step-over");
9128
9129 /* Stop stepping if inserting breakpoints fails. */
9130 try
9131 {
9132 insert_breakpoints ();
9133 }
9134 catch (const gdb_exception_error &e)
9135 {
9136 exception_print (gdb_stderr, e);
9137 stop_waiting (ecs);
9138 clear_step_over_info ();
9139 return;
9140 }
9141
9142 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
9143
9144 resume (ecs->event_thread->stop_signal ());
9145 }
9146
9147 prepare_to_wait (ecs);
9148}
9149
9150/* Called when we should continue running the inferior, because the
9151 current event doesn't cause a user visible stop. This does the
9152 resuming part; waiting for the next event is done elsewhere. */
9153
9154static void
9155keep_going (struct execution_control_state *ecs)
9156{
9157 if (ecs->event_thread->control.trap_expected
9158 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
9159 ecs->event_thread->control.trap_expected = 0;
9160
9161 if (!signal_program[ecs->event_thread->stop_signal ()])
9162 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
9163 keep_going_pass_signal (ecs);
9164}
9165
9166/* This function normally comes after a resume, before
9167 handle_inferior_event exits. It takes care of any last bits of
9168 housekeeping, and sets the all-important wait_some_more flag. */
9169
9170static void
9171prepare_to_wait (struct execution_control_state *ecs)
9172{
9173 infrun_debug_printf ("prepare_to_wait");
9174
9175 ecs->wait_some_more = 1;
9176
9177 /* If the target can't async, emulate it by marking the infrun event
9178 handler such that as soon as we get back to the event-loop, we
9179 immediately end up in fetch_inferior_event again calling
9180 target_wait. */
9181 if (!target_can_async_p ())
9182 mark_infrun_async_event_handler ();
9183}
9184
9185/* We are done with the step range of a step/next/si/ni command.
9186 Called once for each n of a "step n" operation. */
9187
9188static void
9189end_stepping_range (struct execution_control_state *ecs)
9190{
9191 ecs->event_thread->control.stop_step = 1;
9192 stop_waiting (ecs);
9193}
9194
9195/* Several print_*_reason functions to print why the inferior has stopped.
9196 We always print something when the inferior exits, or receives a signal.
9197 The rest of the cases are dealt with later on in normal_stop and
9198 print_it_typical. Ideally there should be a call to one of these
9199 print_*_reason functions functions from handle_inferior_event each time
9200 stop_waiting is called.
9201
9202 Note that we don't call these directly, instead we delegate that to
9203 the interpreters, through observers. Interpreters then call these
9204 with whatever uiout is right. */
9205
9206void
9207print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
9208{
9209 annotate_signalled ();
9210 if (uiout->is_mi_like_p ())
9211 uiout->field_string
9212 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
9213 uiout->text ("\nProgram terminated with signal ");
9214 annotate_signal_name ();
9215 uiout->field_string ("signal-name",
9216 gdb_signal_to_name (siggnal));
9217 annotate_signal_name_end ();
9218 uiout->text (", ");
9219 annotate_signal_string ();
9220 uiout->field_string ("signal-meaning",
9221 gdb_signal_to_string (siggnal));
9222 annotate_signal_string_end ();
9223 uiout->text (".\n");
9224 uiout->text ("The program no longer exists.\n");
9225}
9226
9227void
9228print_exited_reason (struct ui_out *uiout, int exitstatus)
9229{
9230 struct inferior *inf = current_inferior ();
9231 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
9232
9233 annotate_exited (exitstatus);
9234 if (exitstatus)
9235 {
9236 if (uiout->is_mi_like_p ())
9237 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
9238 std::string exit_code_str
9239 = string_printf ("0%o", (unsigned int) exitstatus);
9240 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
9241 plongest (inf->num), pidstr.c_str (),
9242 string_field ("exit-code", exit_code_str.c_str ()));
9243 }
9244 else
9245 {
9246 if (uiout->is_mi_like_p ())
9247 uiout->field_string
9248 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
9249 uiout->message ("[Inferior %s (%s) exited normally]\n",
9250 plongest (inf->num), pidstr.c_str ());
9251 }
9252}
9253
9254void
9255print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
9256{
9257 struct thread_info *thr = inferior_thread ();
9258
9259 infrun_debug_printf ("signal = %s", gdb_signal_to_string (siggnal));
9260
9261 annotate_signal ();
9262
9263 if (uiout->is_mi_like_p ())
9264 ;
9265 else if (show_thread_that_caused_stop ())
9266 {
9267 uiout->text ("\nThread ");
9268 uiout->field_string ("thread-id", print_thread_id (thr));
9269
9270 const char *name = thread_name (thr);
9271 if (name != nullptr)
9272 {
9273 uiout->text (" \"");
9274 uiout->field_string ("name", name);
9275 uiout->text ("\"");
9276 }
9277 }
9278 else
9279 uiout->text ("\nProgram");
9280
9281 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
9282 uiout->text (" stopped");
9283 else
9284 {
9285 uiout->text (" received signal ");
9286 annotate_signal_name ();
9287 if (uiout->is_mi_like_p ())
9288 uiout->field_string
9289 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
9290 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
9291 annotate_signal_name_end ();
9292 uiout->text (", ");
9293 annotate_signal_string ();
9294 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
9295
9296 regcache *regcache = get_thread_regcache (thr);
9297 struct gdbarch *gdbarch = regcache->arch ();
9298 if (gdbarch_report_signal_info_p (gdbarch))
9299 gdbarch_report_signal_info (gdbarch, uiout, siggnal);
9300
9301 annotate_signal_string_end ();
9302 }
9303 uiout->text (".\n");
9304}
9305
9306void
9307print_no_history_reason (struct ui_out *uiout)
9308{
9309 if (uiout->is_mi_like_p ())
9310 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_NO_HISTORY));
9311 else if (execution_direction == EXEC_FORWARD)
9312 uiout->text ("\nReached end of recorded history; stopping.\nFollowing "
9313 "forward execution will be added to history.\n");
9314 else
9315 {
9316 gdb_assert (execution_direction == EXEC_REVERSE);
9317 uiout->text ("\nReached end of recorded history; stopping.\nBackward "
9318 "execution from here not possible.\n");
9319 }
9320}
9321
9322/* Print current location without a level number, if we have changed
9323 functions or hit a breakpoint. Print source line if we have one.
9324 bpstat_print contains the logic deciding in detail what to print,
9325 based on the event(s) that just occurred. */
9326
9327static void
9328print_stop_location (const target_waitstatus &ws)
9329{
9330 int bpstat_ret;
9331 enum print_what source_flag;
9332 int do_frame_printing = 1;
9333 struct thread_info *tp = inferior_thread ();
9334
9335 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws.kind ());
9336 switch (bpstat_ret)
9337 {
9338 case PRINT_UNKNOWN:
9339 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
9340 should) carry around the function and does (or should) use
9341 that when doing a frame comparison. */
9342 if (tp->control.stop_step
9343 && (tp->control.step_frame_id
9344 == get_frame_id (get_current_frame ()))
9345 && (tp->control.step_start_function
9346 == find_pc_function (tp->stop_pc ())))
9347 {
9348 symtab_and_line sal = find_frame_sal (get_selected_frame (nullptr));
9349 if (sal.symtab != tp->current_symtab)
9350 {
9351 /* Finished step in same frame but into different file, print
9352 location and source line. */
9353 source_flag = SRC_AND_LOC;
9354 }
9355 else
9356 {
9357 /* Finished step in same frame and same file, just print source
9358 line. */
9359 source_flag = SRC_LINE;
9360 }
9361 }
9362 else
9363 {
9364 /* Finished step into different frame, print location and source
9365 line. */
9366 source_flag = SRC_AND_LOC;
9367 }
9368 break;
9369 case PRINT_SRC_AND_LOC:
9370 /* Print location and source line. */
9371 source_flag = SRC_AND_LOC;
9372 break;
9373 case PRINT_SRC_ONLY:
9374 source_flag = SRC_LINE;
9375 break;
9376 case PRINT_NOTHING:
9377 /* Something bogus. */
9378 source_flag = SRC_LINE;
9379 do_frame_printing = 0;
9380 break;
9381 default:
9382 internal_error (_("Unknown value."));
9383 }
9384
9385 /* The behavior of this routine with respect to the source
9386 flag is:
9387 SRC_LINE: Print only source line
9388 LOCATION: Print only location
9389 SRC_AND_LOC: Print location and source line. */
9390 if (do_frame_printing)
9391 print_stack_frame (get_selected_frame (nullptr), 0, source_flag, 1);
9392}
9393
9394/* See `print_stop_event` in infrun.h. */
9395
9396static void
9397do_print_stop_event (struct ui_out *uiout, bool displays)
9398{
9399 struct target_waitstatus last;
9400 struct thread_info *tp;
9401
9402 get_last_target_status (nullptr, nullptr, &last);
9403
9404 {
9405 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
9406
9407 print_stop_location (last);
9408
9409 /* Display the auto-display expressions. */
9410 if (displays)
9411 do_displays ();
9412 }
9413
9414 tp = inferior_thread ();
9415 if (tp->thread_fsm () != nullptr
9416 && tp->thread_fsm ()->finished_p ())
9417 {
9418 struct return_value_info *rv;
9419
9420 rv = tp->thread_fsm ()->return_value ();
9421 if (rv != nullptr)
9422 print_return_value (uiout, rv);
9423 }
9424}
9425
9426/* See infrun.h. This function itself sets up buffered output for the
9427 duration of do_print_stop_event, which performs the actual event
9428 printing. */
9429
9430void
9431print_stop_event (struct ui_out *uiout, bool displays)
9432{
9433 do_with_buffered_output (do_print_stop_event, uiout, displays);
9434}
9435
9436/* See infrun.h. */
9437
9438void
9439maybe_remove_breakpoints (void)
9440{
9441 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
9442 {
9443 if (remove_breakpoints ())
9444 {
9445 target_terminal::ours_for_output ();
9446 gdb_printf (_("Cannot remove breakpoints because "
9447 "program is no longer writable.\nFurther "
9448 "execution is probably impossible.\n"));
9449 }
9450 }
9451}
9452
9453/* The execution context that just caused a normal stop. */
9454
9455struct stop_context
9456{
9457 stop_context ();
9458
9459 DISABLE_COPY_AND_ASSIGN (stop_context);
9460
9461 bool changed () const;
9462
9463 /* The stop ID. */
9464 ULONGEST stop_id;
9465
9466 /* The event PTID. */
9467
9468 ptid_t ptid;
9469
9470 /* If stopped for a thread event, this is the thread that caused the
9471 stop. */
9472 thread_info_ref thread;
9473
9474 /* The inferior that caused the stop. */
9475 int inf_num;
9476};
9477
9478/* Initializes a new stop context. If stopped for a thread event, this
9479 takes a strong reference to the thread. */
9480
9481stop_context::stop_context ()
9482{
9483 stop_id = get_stop_id ();
9484 ptid = inferior_ptid;
9485 inf_num = current_inferior ()->num;
9486
9487 if (inferior_ptid != null_ptid)
9488 {
9489 /* Take a strong reference so that the thread can't be deleted
9490 yet. */
9491 thread = thread_info_ref::new_reference (inferior_thread ());
9492 }
9493}
9494
9495/* Return true if the current context no longer matches the saved stop
9496 context. */
9497
9498bool
9499stop_context::changed () const
9500{
9501 if (ptid != inferior_ptid)
9502 return true;
9503 if (inf_num != current_inferior ()->num)
9504 return true;
9505 if (thread != nullptr && thread->state != THREAD_STOPPED)
9506 return true;
9507 if (get_stop_id () != stop_id)
9508 return true;
9509 return false;
9510}
9511
9512/* See infrun.h. */
9513
9514bool
9515normal_stop ()
9516{
9517 struct target_waitstatus last;
9518
9519 get_last_target_status (nullptr, nullptr, &last);
9520
9521 new_stop_id ();
9522
9523 /* If an exception is thrown from this point on, make sure to
9524 propagate GDB's knowledge of the executing state to the
9525 frontend/user running state. A QUIT is an easy exception to see
9526 here, so do this before any filtered output. */
9527
9528 ptid_t finish_ptid = null_ptid;
9529
9530 if (!non_stop)
9531 finish_ptid = minus_one_ptid;
9532 else if (last.kind () == TARGET_WAITKIND_SIGNALLED
9533 || last.kind () == TARGET_WAITKIND_EXITED)
9534 {
9535 /* On some targets, we may still have live threads in the
9536 inferior when we get a process exit event. E.g., for
9537 "checkpoint", when the current checkpoint/fork exits,
9538 linux-fork.c automatically switches to another fork from
9539 within target_mourn_inferior. */
9540 if (inferior_ptid != null_ptid)
9541 finish_ptid = ptid_t (inferior_ptid.pid ());
9542 }
9543 else if (last.kind () != TARGET_WAITKIND_NO_RESUMED
9544 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
9545 finish_ptid = inferior_ptid;
9546
9547 std::optional<scoped_finish_thread_state> maybe_finish_thread_state;
9548 if (finish_ptid != null_ptid)
9549 {
9550 maybe_finish_thread_state.emplace
9551 (user_visible_resume_target (finish_ptid), finish_ptid);
9552 }
9553
9554 /* As we're presenting a stop, and potentially removing breakpoints,
9555 update the thread list so we can tell whether there are threads
9556 running on the target. With target remote, for example, we can
9557 only learn about new threads when we explicitly update the thread
9558 list. Do this before notifying the interpreters about signal
9559 stops, end of stepping ranges, etc., so that the "new thread"
9560 output is emitted before e.g., "Program received signal FOO",
9561 instead of after. */
9562 update_thread_list ();
9563
9564 if (last.kind () == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
9565 notify_signal_received (inferior_thread ()->stop_signal ());
9566
9567 /* As with the notification of thread events, we want to delay
9568 notifying the user that we've switched thread context until
9569 the inferior actually stops.
9570
9571 There's no point in saying anything if the inferior has exited.
9572 Note that SIGNALLED here means "exited with a signal", not
9573 "received a signal".
9574
9575 Also skip saying anything in non-stop mode. In that mode, as we
9576 don't want GDB to switch threads behind the user's back, to avoid
9577 races where the user is typing a command to apply to thread x,
9578 but GDB switches to thread y before the user finishes entering
9579 the command, fetch_inferior_event installs a cleanup to restore
9580 the current thread back to the thread the user had selected right
9581 after this event is handled, so we're not really switching, only
9582 informing of a stop. */
9583 if (!non_stop)
9584 {
9585 if ((last.kind () != TARGET_WAITKIND_SIGNALLED
9586 && last.kind () != TARGET_WAITKIND_EXITED
9587 && last.kind () != TARGET_WAITKIND_NO_RESUMED
9588 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
9589 && target_has_execution ()
9590 && previous_thread != inferior_thread ())
9591 {
9592 SWITCH_THRU_ALL_UIS ()
9593 {
9594 target_terminal::ours_for_output ();
9595 gdb_printf (_("[Switching to %s]\n"),
9596 target_pid_to_str (inferior_ptid).c_str ());
9597 annotate_thread_changed ();
9598 }
9599 }
9600
9601 update_previous_thread ();
9602 }
9603
9604 if (last.kind () == TARGET_WAITKIND_NO_RESUMED
9605 || last.kind () == TARGET_WAITKIND_THREAD_EXITED)
9606 {
9607 stop_print_frame = false;
9608
9609 SWITCH_THRU_ALL_UIS ()
9610 if (current_ui->prompt_state == PROMPT_BLOCKED)
9611 {
9612 target_terminal::ours_for_output ();
9613 if (last.kind () == TARGET_WAITKIND_NO_RESUMED)
9614 gdb_printf (_("No unwaited-for children left.\n"));
9615 else if (last.kind () == TARGET_WAITKIND_THREAD_EXITED)
9616 gdb_printf (_("Command aborted, thread exited.\n"));
9617 else
9618 gdb_assert_not_reached ("unhandled");
9619 }
9620 }
9621
9622 /* Note: this depends on the update_thread_list call above. */
9623 maybe_remove_breakpoints ();
9624
9625 /* If an auto-display called a function and that got a signal,
9626 delete that auto-display to avoid an infinite recursion. */
9627
9628 if (stopped_by_random_signal)
9629 disable_current_display ();
9630
9631 SWITCH_THRU_ALL_UIS ()
9632 {
9633 async_enable_stdin ();
9634 }
9635
9636 /* Let the user/frontend see the threads as stopped. */
9637 maybe_finish_thread_state.reset ();
9638
9639 /* Select innermost stack frame - i.e., current frame is frame 0,
9640 and current location is based on that. Handle the case where the
9641 dummy call is returning after being stopped. E.g. the dummy call
9642 previously hit a breakpoint. (If the dummy call returns
9643 normally, we won't reach here.) Do this before the stop hook is
9644 run, so that it doesn't get to see the temporary dummy frame,
9645 which is not where we'll present the stop. */
9646 if (has_stack_frames ())
9647 {
9648 if (stop_stack_dummy == STOP_STACK_DUMMY)
9649 {
9650 /* Pop the empty frame that contains the stack dummy. This
9651 also restores inferior state prior to the call (struct
9652 infcall_suspend_state). */
9653 frame_info_ptr frame = get_current_frame ();
9654
9655 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
9656 frame_pop (frame);
9657 /* frame_pop calls reinit_frame_cache as the last thing it
9658 does which means there's now no selected frame. */
9659 }
9660
9661 select_frame (get_current_frame ());
9662
9663 /* Set the current source location. */
9664 set_current_sal_from_frame (get_current_frame ());
9665 }
9666
9667 /* Look up the hook_stop and run it (CLI internally handles problem
9668 of stop_command's pre-hook not existing). */
9669 stop_context saved_context;
9670
9671 try
9672 {
9673 execute_cmd_pre_hook (stop_command);
9674 }
9675 catch (const gdb_exception_error &ex)
9676 {
9677 exception_fprintf (gdb_stderr, ex,
9678 "Error while running hook_stop:\n");
9679 }
9680
9681 /* If the stop hook resumes the target, then there's no point in
9682 trying to notify about the previous stop; its context is
9683 gone. Likewise if the command switches thread or inferior --
9684 the observers would print a stop for the wrong
9685 thread/inferior. */
9686 if (saved_context.changed ())
9687 return true;
9688
9689 /* Notify observers about the stop. This is where the interpreters
9690 print the stop event. */
9691 notify_normal_stop ((inferior_ptid != null_ptid
9692 ? inferior_thread ()->control.stop_bpstat
9693 : nullptr),
9694 stop_print_frame);
9695 annotate_stopped ();
9696
9697 if (target_has_execution ())
9698 {
9699 if (last.kind () != TARGET_WAITKIND_SIGNALLED
9700 && last.kind () != TARGET_WAITKIND_EXITED
9701 && last.kind () != TARGET_WAITKIND_NO_RESUMED
9702 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
9703 /* Delete the breakpoint we stopped at, if it wants to be deleted.
9704 Delete any breakpoint that is to be deleted at the next stop. */
9705 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
9706 }
9707
9708 return false;
9709}
9710\f
9711int
9712signal_stop_state (int signo)
9713{
9714 return signal_stop[signo];
9715}
9716
9717int
9718signal_print_state (int signo)
9719{
9720 return signal_print[signo];
9721}
9722
9723int
9724signal_pass_state (int signo)
9725{
9726 return signal_program[signo];
9727}
9728
9729static void
9730signal_cache_update (int signo)
9731{
9732 if (signo == -1)
9733 {
9734 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
9735 signal_cache_update (signo);
9736
9737 return;
9738 }
9739
9740 signal_pass[signo] = (signal_stop[signo] == 0
9741 && signal_print[signo] == 0
9742 && signal_program[signo] == 1
9743 && signal_catch[signo] == 0);
9744}
9745
9746int
9747signal_stop_update (int signo, int state)
9748{
9749 int ret = signal_stop[signo];
9750
9751 signal_stop[signo] = state;
9752 signal_cache_update (signo);
9753 return ret;
9754}
9755
9756int
9757signal_print_update (int signo, int state)
9758{
9759 int ret = signal_print[signo];
9760
9761 signal_print[signo] = state;
9762 signal_cache_update (signo);
9763 return ret;
9764}
9765
9766int
9767signal_pass_update (int signo, int state)
9768{
9769 int ret = signal_program[signo];
9770
9771 signal_program[signo] = state;
9772 signal_cache_update (signo);
9773 return ret;
9774}
9775
9776/* Update the global 'signal_catch' from INFO and notify the
9777 target. */
9778
9779void
9780signal_catch_update (const unsigned int *info)
9781{
9782 int i;
9783
9784 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
9785 signal_catch[i] = info[i] > 0;
9786 signal_cache_update (-1);
9787 target_pass_signals (signal_pass);
9788}
9789
9790static void
9791sig_print_header (void)
9792{
9793 gdb_printf (_("Signal Stop\tPrint\tPass "
9794 "to program\tDescription\n"));
9795}
9796
9797static void
9798sig_print_info (enum gdb_signal oursig)
9799{
9800 const char *name = gdb_signal_to_name (oursig);
9801 int name_padding = 13 - strlen (name);
9802
9803 if (name_padding <= 0)
9804 name_padding = 0;
9805
9806 gdb_printf ("%s", name);
9807 gdb_printf ("%*.*s ", name_padding, name_padding, " ");
9808 gdb_printf ("%s\t", signal_stop[oursig] ? "Yes" : "No");
9809 gdb_printf ("%s\t", signal_print[oursig] ? "Yes" : "No");
9810 gdb_printf ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
9811 gdb_printf ("%s\n", gdb_signal_to_string (oursig));
9812}
9813
9814/* Specify how various signals in the inferior should be handled. */
9815
9816static void
9817handle_command (const char *args, int from_tty)
9818{
9819 int digits, wordlen;
9820 int sigfirst, siglast;
9821 enum gdb_signal oursig;
9822 int allsigs;
9823
9824 if (args == nullptr)
9825 {
9826 error_no_arg (_("signal to handle"));
9827 }
9828
9829 /* Allocate and zero an array of flags for which signals to handle. */
9830
9831 const size_t nsigs = GDB_SIGNAL_LAST;
9832 unsigned char sigs[nsigs] {};
9833
9834 /* Break the command line up into args. */
9835
9836 gdb_argv built_argv (args);
9837
9838 /* Walk through the args, looking for signal oursigs, signal names, and
9839 actions. Signal numbers and signal names may be interspersed with
9840 actions, with the actions being performed for all signals cumulatively
9841 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
9842
9843 for (char *arg : built_argv)
9844 {
9845 wordlen = strlen (arg);
9846 for (digits = 0; isdigit (arg[digits]); digits++)
9847 {;
9848 }
9849 allsigs = 0;
9850 sigfirst = siglast = -1;
9851
9852 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
9853 {
9854 /* Apply action to all signals except those used by the
9855 debugger. Silently skip those. */
9856 allsigs = 1;
9857 sigfirst = 0;
9858 siglast = nsigs - 1;
9859 }
9860 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
9861 {
9862 SET_SIGS (nsigs, sigs, signal_stop);
9863 SET_SIGS (nsigs, sigs, signal_print);
9864 }
9865 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
9866 {
9867 UNSET_SIGS (nsigs, sigs, signal_program);
9868 }
9869 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
9870 {
9871 SET_SIGS (nsigs, sigs, signal_print);
9872 }
9873 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
9874 {
9875 SET_SIGS (nsigs, sigs, signal_program);
9876 }
9877 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
9878 {
9879 UNSET_SIGS (nsigs, sigs, signal_stop);
9880 }
9881 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
9882 {
9883 SET_SIGS (nsigs, sigs, signal_program);
9884 }
9885 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
9886 {
9887 UNSET_SIGS (nsigs, sigs, signal_print);
9888 UNSET_SIGS (nsigs, sigs, signal_stop);
9889 }
9890 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
9891 {
9892 UNSET_SIGS (nsigs, sigs, signal_program);
9893 }
9894 else if (digits > 0)
9895 {
9896 /* It is numeric. The numeric signal refers to our own
9897 internal signal numbering from target.h, not to host/target
9898 signal number. This is a feature; users really should be
9899 using symbolic names anyway, and the common ones like
9900 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
9901
9902 sigfirst = siglast = (int)
9903 gdb_signal_from_command (atoi (arg));
9904 if (arg[digits] == '-')
9905 {
9906 siglast = (int)
9907 gdb_signal_from_command (atoi (arg + digits + 1));
9908 }
9909 if (sigfirst > siglast)
9910 {
9911 /* Bet he didn't figure we'd think of this case... */
9912 std::swap (sigfirst, siglast);
9913 }
9914 }
9915 else
9916 {
9917 oursig = gdb_signal_from_name (arg);
9918 if (oursig != GDB_SIGNAL_UNKNOWN)
9919 {
9920 sigfirst = siglast = (int) oursig;
9921 }
9922 else
9923 {
9924 /* Not a number and not a recognized flag word => complain. */
9925 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
9926 }
9927 }
9928
9929 /* If any signal numbers or symbol names were found, set flags for
9930 which signals to apply actions to. */
9931
9932 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
9933 {
9934 switch ((enum gdb_signal) signum)
9935 {
9936 case GDB_SIGNAL_TRAP:
9937 case GDB_SIGNAL_INT:
9938 if (!allsigs && !sigs[signum])
9939 {
9940 if (query (_("%s is used by the debugger.\n\
9941Are you sure you want to change it? "),
9942 gdb_signal_to_name ((enum gdb_signal) signum)))
9943 {
9944 sigs[signum] = 1;
9945 }
9946 else
9947 gdb_printf (_("Not confirmed, unchanged.\n"));
9948 }
9949 break;
9950 case GDB_SIGNAL_0:
9951 case GDB_SIGNAL_DEFAULT:
9952 case GDB_SIGNAL_UNKNOWN:
9953 /* Make sure that "all" doesn't print these. */
9954 break;
9955 default:
9956 sigs[signum] = 1;
9957 break;
9958 }
9959 }
9960 }
9961
9962 for (int signum = 0; signum < nsigs; signum++)
9963 if (sigs[signum])
9964 {
9965 signal_cache_update (-1);
9966 target_pass_signals (signal_pass);
9967 target_program_signals (signal_program);
9968
9969 if (from_tty)
9970 {
9971 /* Show the results. */
9972 sig_print_header ();
9973 for (; signum < nsigs; signum++)
9974 if (sigs[signum])
9975 sig_print_info ((enum gdb_signal) signum);
9976 }
9977
9978 break;
9979 }
9980}
9981
9982/* Complete the "handle" command. */
9983
9984static void
9985handle_completer (struct cmd_list_element *ignore,
9986 completion_tracker &tracker,
9987 const char *text, const char *word)
9988{
9989 static const char * const keywords[] =
9990 {
9991 "all",
9992 "stop",
9993 "ignore",
9994 "print",
9995 "pass",
9996 "nostop",
9997 "noignore",
9998 "noprint",
9999 "nopass",
10000 nullptr,
10001 };
10002
10003 signal_completer (ignore, tracker, text, word);
10004 complete_on_enum (tracker, keywords, word, word);
10005}
10006
10007enum gdb_signal
10008gdb_signal_from_command (int num)
10009{
10010 if (num >= 1 && num <= 15)
10011 return (enum gdb_signal) num;
10012 error (_("Only signals 1-15 are valid as numeric signals.\n\
10013Use \"info signals\" for a list of symbolic signals."));
10014}
10015
10016/* Print current contents of the tables set by the handle command.
10017 It is possible we should just be printing signals actually used
10018 by the current target (but for things to work right when switching
10019 targets, all signals should be in the signal tables). */
10020
10021static void
10022info_signals_command (const char *signum_exp, int from_tty)
10023{
10024 enum gdb_signal oursig;
10025
10026 sig_print_header ();
10027
10028 if (signum_exp)
10029 {
10030 /* First see if this is a symbol name. */
10031 oursig = gdb_signal_from_name (signum_exp);
10032 if (oursig == GDB_SIGNAL_UNKNOWN)
10033 {
10034 /* No, try numeric. */
10035 oursig =
10036 gdb_signal_from_command (parse_and_eval_long (signum_exp));
10037 }
10038 sig_print_info (oursig);
10039 return;
10040 }
10041
10042 gdb_printf ("\n");
10043 /* These ugly casts brought to you by the native VAX compiler. */
10044 for (oursig = GDB_SIGNAL_FIRST;
10045 (int) oursig < (int) GDB_SIGNAL_LAST;
10046 oursig = (enum gdb_signal) ((int) oursig + 1))
10047 {
10048 QUIT;
10049
10050 if (oursig != GDB_SIGNAL_UNKNOWN
10051 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
10052 sig_print_info (oursig);
10053 }
10054
10055 gdb_printf (_("\nUse the \"%ps\" command to change these tables.\n"),
10056 styled_string (command_style.style (), "handle"));
10057}
10058
10059/* The $_siginfo convenience variable is a bit special. We don't know
10060 for sure the type of the value until we actually have a chance to
10061 fetch the data. The type can change depending on gdbarch, so it is
10062 also dependent on which thread you have selected.
10063
10064 1. making $_siginfo be an internalvar that creates a new value on
10065 access.
10066
10067 2. making the value of $_siginfo be an lval_computed value. */
10068
10069/* This function implements the lval_computed support for reading a
10070 $_siginfo value. */
10071
10072static void
10073siginfo_value_read (struct value *v)
10074{
10075 LONGEST transferred;
10076
10077 /* If we can access registers, so can we access $_siginfo. Likewise
10078 vice versa. */
10079 validate_registers_access ();
10080
10081 transferred =
10082 target_read (current_inferior ()->top_target (),
10083 TARGET_OBJECT_SIGNAL_INFO,
10084 nullptr,
10085 v->contents_all_raw ().data (),
10086 v->offset (),
10087 v->type ()->length ());
10088
10089 if (transferred != v->type ()->length ())
10090 error (_("Unable to read siginfo"));
10091}
10092
10093/* This function implements the lval_computed support for writing a
10094 $_siginfo value. */
10095
10096static void
10097siginfo_value_write (struct value *v, struct value *fromval)
10098{
10099 LONGEST transferred;
10100
10101 /* If we can access registers, so can we access $_siginfo. Likewise
10102 vice versa. */
10103 validate_registers_access ();
10104
10105 transferred = target_write (current_inferior ()->top_target (),
10106 TARGET_OBJECT_SIGNAL_INFO,
10107 nullptr,
10108 fromval->contents_all_raw ().data (),
10109 v->offset (),
10110 fromval->type ()->length ());
10111
10112 if (transferred != fromval->type ()->length ())
10113 error (_("Unable to write siginfo"));
10114}
10115
10116static const struct lval_funcs siginfo_value_funcs =
10117 {
10118 siginfo_value_read,
10119 siginfo_value_write
10120 };
10121
10122/* Return a new value with the correct type for the siginfo object of
10123 the current thread using architecture GDBARCH. Return a void value
10124 if there's no object available. */
10125
10126static struct value *
10127siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
10128 void *ignore)
10129{
10130 if (target_has_stack ()
10131 && inferior_ptid != null_ptid
10132 && gdbarch_get_siginfo_type_p (gdbarch))
10133 {
10134 struct type *type = gdbarch_get_siginfo_type (gdbarch);
10135
10136 return value::allocate_computed (type, &siginfo_value_funcs, nullptr);
10137 }
10138
10139 return value::allocate (builtin_type (gdbarch)->builtin_void);
10140}
10141
10142\f
10143/* infcall_suspend_state contains state about the program itself like its
10144 registers and any signal it received when it last stopped.
10145 This state must be restored regardless of how the inferior function call
10146 ends (either successfully, or after it hits a breakpoint or signal)
10147 if the program is to properly continue where it left off. */
10148
10149class infcall_suspend_state
10150{
10151public:
10152 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
10153 once the inferior function call has finished. */
10154 infcall_suspend_state (struct gdbarch *gdbarch,
10155 const struct thread_info *tp,
10156 struct regcache *regcache)
10157 : m_registers (new readonly_detached_regcache (*regcache))
10158 {
10159 tp->save_suspend_to (m_thread_suspend);
10160
10161 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
10162
10163 if (gdbarch_get_siginfo_type_p (gdbarch))
10164 {
10165 struct type *type = gdbarch_get_siginfo_type (gdbarch);
10166 size_t len = type->length ();
10167
10168 siginfo_data.reset ((gdb_byte *) xmalloc (len));
10169
10170 if (target_read (current_inferior ()->top_target (),
10171 TARGET_OBJECT_SIGNAL_INFO, nullptr,
10172 siginfo_data.get (), 0, len) != len)
10173 {
10174 /* Errors ignored. */
10175 siginfo_data.reset (nullptr);
10176 }
10177 }
10178
10179 if (siginfo_data)
10180 {
10181 m_siginfo_gdbarch = gdbarch;
10182 m_siginfo_data = std::move (siginfo_data);
10183 }
10184 }
10185
10186 /* Return a pointer to the stored register state. */
10187
10188 readonly_detached_regcache *registers () const
10189 {
10190 return m_registers.get ();
10191 }
10192
10193 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
10194
10195 void restore (struct gdbarch *gdbarch,
10196 struct thread_info *tp,
10197 struct regcache *regcache) const
10198 {
10199 tp->restore_suspend_from (m_thread_suspend);
10200
10201 if (m_siginfo_gdbarch == gdbarch)
10202 {
10203 struct type *type = gdbarch_get_siginfo_type (gdbarch);
10204
10205 /* Errors ignored. */
10206 target_write (current_inferior ()->top_target (),
10207 TARGET_OBJECT_SIGNAL_INFO, nullptr,
10208 m_siginfo_data.get (), 0, type->length ());
10209 }
10210
10211 /* The inferior can be gone if the user types "print exit(0)"
10212 (and perhaps other times). */
10213 if (target_has_execution ())
10214 /* NB: The register write goes through to the target. */
10215 regcache->restore (registers ());
10216 }
10217
10218private:
10219 /* How the current thread stopped before the inferior function call was
10220 executed. */
10221 struct thread_suspend_state m_thread_suspend;
10222
10223 /* The registers before the inferior function call was executed. */
10224 std::unique_ptr<readonly_detached_regcache> m_registers;
10225
10226 /* Format of SIGINFO_DATA or NULL if it is not present. */
10227 struct gdbarch *m_siginfo_gdbarch = nullptr;
10228
10229 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
10230 gdbarch_get_siginfo_type ()->length (). For different gdbarch the
10231 content would be invalid. */
10232 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
10233};
10234
10235infcall_suspend_state_up
10236save_infcall_suspend_state ()
10237{
10238 struct thread_info *tp = inferior_thread ();
10239 regcache *regcache = get_thread_regcache (tp);
10240 struct gdbarch *gdbarch = regcache->arch ();
10241
10242 infcall_suspend_state_up inf_state
10243 (new struct infcall_suspend_state (gdbarch, tp, regcache));
10244
10245 /* Having saved the current state, adjust the thread state, discarding
10246 any stop signal information. The stop signal is not useful when
10247 starting an inferior function call, and run_inferior_call will not use
10248 the signal due to its `proceed' call with GDB_SIGNAL_0. */
10249 tp->set_stop_signal (GDB_SIGNAL_0);
10250
10251 return inf_state;
10252}
10253
10254/* Restore inferior session state to INF_STATE. */
10255
10256void
10257restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
10258{
10259 struct thread_info *tp = inferior_thread ();
10260 regcache *regcache = get_thread_regcache (inferior_thread ());
10261 struct gdbarch *gdbarch = regcache->arch ();
10262
10263 inf_state->restore (gdbarch, tp, regcache);
10264 discard_infcall_suspend_state (inf_state);
10265}
10266
10267void
10268discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
10269{
10270 delete inf_state;
10271}
10272
10273readonly_detached_regcache *
10274get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
10275{
10276 return inf_state->registers ();
10277}
10278
10279/* infcall_control_state contains state regarding gdb's control of the
10280 inferior itself like stepping control. It also contains session state like
10281 the user's currently selected frame. */
10282
10283struct infcall_control_state
10284{
10285 struct thread_control_state thread_control;
10286 struct inferior_control_state inferior_control;
10287
10288 /* Other fields: */
10289 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
10290 int stopped_by_random_signal = 0;
10291
10292 /* ID and level of the selected frame when the inferior function
10293 call was made. */
10294 struct frame_id selected_frame_id {};
10295 int selected_frame_level = -1;
10296};
10297
10298/* Save all of the information associated with the inferior<==>gdb
10299 connection. */
10300
10301infcall_control_state_up
10302save_infcall_control_state ()
10303{
10304 infcall_control_state_up inf_status (new struct infcall_control_state);
10305 struct thread_info *tp = inferior_thread ();
10306 struct inferior *inf = current_inferior ();
10307
10308 inf_status->thread_control = tp->control;
10309 inf_status->inferior_control = inf->control;
10310
10311 tp->control.step_resume_breakpoint = nullptr;
10312 tp->control.exception_resume_breakpoint = nullptr;
10313
10314 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
10315 chain. If caller's caller is walking the chain, they'll be happier if we
10316 hand them back the original chain when restore_infcall_control_state is
10317 called. */
10318 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
10319
10320 /* Other fields: */
10321 inf_status->stop_stack_dummy = stop_stack_dummy;
10322 inf_status->stopped_by_random_signal = stopped_by_random_signal;
10323
10324 save_selected_frame (&inf_status->selected_frame_id,
10325 &inf_status->selected_frame_level);
10326
10327 return inf_status;
10328}
10329
10330/* Restore inferior session state to INF_STATUS. */
10331
10332void
10333restore_infcall_control_state (struct infcall_control_state *inf_status)
10334{
10335 struct thread_info *tp = inferior_thread ();
10336 struct inferior *inf = current_inferior ();
10337
10338 if (tp->control.step_resume_breakpoint)
10339 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
10340
10341 if (tp->control.exception_resume_breakpoint)
10342 tp->control.exception_resume_breakpoint->disposition
10343 = disp_del_at_next_stop;
10344
10345 /* Handle the bpstat_copy of the chain. */
10346 bpstat_clear (&tp->control.stop_bpstat);
10347
10348 tp->control = inf_status->thread_control;
10349 inf->control = inf_status->inferior_control;
10350
10351 /* Other fields: */
10352 stop_stack_dummy = inf_status->stop_stack_dummy;
10353 stopped_by_random_signal = inf_status->stopped_by_random_signal;
10354
10355 if (target_has_stack ())
10356 {
10357 restore_selected_frame (inf_status->selected_frame_id,
10358 inf_status->selected_frame_level);
10359 }
10360
10361 delete inf_status;
10362}
10363
10364void
10365discard_infcall_control_state (struct infcall_control_state *inf_status)
10366{
10367 if (inf_status->thread_control.step_resume_breakpoint)
10368 inf_status->thread_control.step_resume_breakpoint->disposition
10369 = disp_del_at_next_stop;
10370
10371 if (inf_status->thread_control.exception_resume_breakpoint)
10372 inf_status->thread_control.exception_resume_breakpoint->disposition
10373 = disp_del_at_next_stop;
10374
10375 /* See save_infcall_control_state for info on stop_bpstat. */
10376 bpstat_clear (&inf_status->thread_control.stop_bpstat);
10377
10378 delete inf_status;
10379}
10380\f
10381/* See infrun.h. */
10382
10383void
10384clear_exit_convenience_vars (void)
10385{
10386 clear_internalvar (lookup_internalvar ("_exitsignal"));
10387 clear_internalvar (lookup_internalvar ("_exitcode"));
10388}
10389\f
10390
10391/* User interface for reverse debugging:
10392 Set exec-direction / show exec-direction commands
10393 (returns error unless target implements to_set_exec_direction method). */
10394
10395enum exec_direction_kind execution_direction = EXEC_FORWARD;
10396static const char exec_forward[] = "forward";
10397static const char exec_reverse[] = "reverse";
10398static const char *exec_direction = exec_forward;
10399static const char *const exec_direction_names[] = {
10400 exec_forward,
10401 exec_reverse,
10402 nullptr
10403};
10404
10405static void
10406set_exec_direction_func (const char *args, int from_tty,
10407 struct cmd_list_element *cmd)
10408{
10409 if (target_can_execute_reverse ())
10410 {
10411 if (!strcmp (exec_direction, exec_forward))
10412 execution_direction = EXEC_FORWARD;
10413 else if (!strcmp (exec_direction, exec_reverse))
10414 execution_direction = EXEC_REVERSE;
10415 }
10416 else
10417 {
10418 exec_direction = exec_forward;
10419 error (_("Target does not support this operation."));
10420 }
10421}
10422
10423static void
10424show_exec_direction_func (struct ui_file *out, int from_tty,
10425 struct cmd_list_element *cmd, const char *value)
10426{
10427 switch (execution_direction) {
10428 case EXEC_FORWARD:
10429 gdb_printf (out, _("Forward.\n"));
10430 break;
10431 case EXEC_REVERSE:
10432 gdb_printf (out, _("Reverse.\n"));
10433 break;
10434 default:
10435 internal_error (_("bogus execution_direction value: %d"),
10436 (int) execution_direction);
10437 }
10438}
10439
10440static void
10441show_schedule_multiple (struct ui_file *file, int from_tty,
10442 struct cmd_list_element *c, const char *value)
10443{
10444 gdb_printf (file, _("Resuming the execution of threads "
10445 "of all processes is %s.\n"), value);
10446}
10447
10448/* Implementation of `siginfo' variable. */
10449
10450static const struct internalvar_funcs siginfo_funcs =
10451{
10452 siginfo_make_value,
10453 nullptr,
10454};
10455
10456/* Callback for infrun's target events source. This is marked when a
10457 thread has a pending status to process. */
10458
10459static void
10460infrun_async_inferior_event_handler (gdb_client_data data)
10461{
10462 clear_async_event_handler (infrun_async_inferior_event_token);
10463 inferior_event_handler (INF_REG_EVENT);
10464}
10465
10466#if GDB_SELF_TEST
10467namespace selftests
10468{
10469
10470/* Verify that when two threads with the same ptid exist (from two different
10471 targets) and one of them changes ptid, we only update inferior_ptid if
10472 it is appropriate. */
10473
10474static void
10475infrun_thread_ptid_changed ()
10476{
10477 gdbarch *arch = current_inferior ()->arch ();
10478
10479 /* The thread which inferior_ptid represents changes ptid. */
10480 {
10481 scoped_restore_current_pspace_and_thread restore;
10482
10483 scoped_mock_context<test_target_ops> target1 (arch);
10484 scoped_mock_context<test_target_ops> target2 (arch);
10485
10486 ptid_t old_ptid (111, 222);
10487 ptid_t new_ptid (111, 333);
10488
10489 target1.mock_inferior.pid = old_ptid.pid ();
10490 target1.mock_thread.ptid = old_ptid;
10491 target1.mock_inferior.ptid_thread_map.clear ();
10492 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
10493
10494 target2.mock_inferior.pid = old_ptid.pid ();
10495 target2.mock_thread.ptid = old_ptid;
10496 target2.mock_inferior.ptid_thread_map.clear ();
10497 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
10498
10499 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
10500 set_current_inferior (&target1.mock_inferior);
10501
10502 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
10503
10504 gdb_assert (inferior_ptid == new_ptid);
10505 }
10506
10507 /* A thread with the same ptid as inferior_ptid, but from another target,
10508 changes ptid. */
10509 {
10510 scoped_restore_current_pspace_and_thread restore;
10511
10512 scoped_mock_context<test_target_ops> target1 (arch);
10513 scoped_mock_context<test_target_ops> target2 (arch);
10514
10515 ptid_t old_ptid (111, 222);
10516 ptid_t new_ptid (111, 333);
10517
10518 target1.mock_inferior.pid = old_ptid.pid ();
10519 target1.mock_thread.ptid = old_ptid;
10520 target1.mock_inferior.ptid_thread_map.clear ();
10521 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
10522
10523 target2.mock_inferior.pid = old_ptid.pid ();
10524 target2.mock_thread.ptid = old_ptid;
10525 target2.mock_inferior.ptid_thread_map.clear ();
10526 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
10527
10528 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
10529 set_current_inferior (&target2.mock_inferior);
10530
10531 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
10532
10533 gdb_assert (inferior_ptid == old_ptid);
10534 }
10535}
10536
10537} /* namespace selftests */
10538
10539#endif /* GDB_SELF_TEST */
10540
10541INIT_GDB_FILE (infrun)
10542{
10543 struct cmd_list_element *c;
10544
10545 /* Register extra event sources in the event loop. */
10546 infrun_async_inferior_event_token
10547 = create_async_event_handler (infrun_async_inferior_event_handler, nullptr,
10548 "infrun");
10549
10550 cmd_list_element *info_signals_cmd
10551 = add_info ("signals", info_signals_command, _("\
10552What debugger does when program gets various signals.\n\
10553Specify a signal as argument to print info on that signal only."));
10554 add_info_alias ("handle", info_signals_cmd, 0);
10555
10556 c = add_com ("handle", class_run, handle_command, _("\
10557Specify how to handle signals.\n\
10558Usage: handle SIGNAL [ACTIONS]\n\
10559Args are signals and actions to apply to those signals.\n\
10560If no actions are specified, the current settings for the specified signals\n\
10561will be displayed instead.\n\
10562\n\
10563Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
10564from 1-15 are allowed for compatibility with old versions of GDB.\n\
10565Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
10566The special arg \"all\" is recognized to mean all signals except those\n\
10567used by the debugger, typically SIGTRAP and SIGINT.\n\
10568\n\
10569Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
10570\"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
10571Stop means reenter debugger if this signal happens (implies print).\n\
10572Print means print a message if this signal happens.\n\
10573Pass means let program see this signal; otherwise program doesn't know.\n\
10574Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
10575Pass and Stop may be combined.\n\
10576\n\
10577Multiple signals may be specified. Signal numbers and signal names\n\
10578may be interspersed with actions, with the actions being performed for\n\
10579all signals cumulatively specified."));
10580 set_cmd_completer (c, handle_completer);
10581
10582 stop_command = add_cmd ("stop", class_obscure,
10583 not_just_help_class_command, _("\
10584There is no `stop' command, but you can set a hook on `stop'.\n\
10585This allows you to set a list of commands to be run each time execution\n\
10586of the program stops."), &cmdlist);
10587
10588 add_setshow_boolean_cmd
10589 ("infrun", class_maintenance, &debug_infrun,
10590 _("Set inferior debugging."),
10591 _("Show inferior debugging."),
10592 _("When non-zero, inferior specific debugging is enabled."),
10593 nullptr, show_debug_infrun, &setdebuglist, &showdebuglist);
10594
10595 add_setshow_boolean_cmd ("non-stop", no_class,
10596 &non_stop_1, _("\
10597Set whether gdb controls the inferior in non-stop mode."), _("\
10598Show whether gdb controls the inferior in non-stop mode."), _("\
10599When debugging a multi-threaded program and this setting is\n\
10600off (the default, also called all-stop mode), when one thread stops\n\
10601(for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
10602all other threads in the program while you interact with the thread of\n\
10603interest. When you continue or step a thread, you can allow the other\n\
10604threads to run, or have them remain stopped, but while you inspect any\n\
10605thread's state, all threads stop.\n\
10606\n\
10607In non-stop mode, when one thread stops, other threads can continue\n\
10608to run freely. You'll be able to step each thread independently,\n\
10609leave it stopped or free to run as needed."),
10610 set_non_stop,
10611 show_non_stop,
10612 &setlist,
10613 &showlist);
10614
10615 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
10616 {
10617 signal_stop[i] = 1;
10618 signal_print[i] = 1;
10619 signal_program[i] = 1;
10620 signal_catch[i] = 0;
10621 }
10622
10623 /* Signals caused by debugger's own actions should not be given to
10624 the program afterwards.
10625
10626 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
10627 explicitly specifies that it should be delivered to the target
10628 program. Typically, that would occur when a user is debugging a
10629 target monitor on a simulator: the target monitor sets a
10630 breakpoint; the simulator encounters this breakpoint and halts
10631 the simulation handing control to GDB; GDB, noting that the stop
10632 address doesn't map to any known breakpoint, returns control back
10633 to the simulator; the simulator then delivers the hardware
10634 equivalent of a GDB_SIGNAL_TRAP to the program being
10635 debugged. */
10636 signal_program[GDB_SIGNAL_TRAP] = 0;
10637 signal_program[GDB_SIGNAL_INT] = 0;
10638
10639 /* Signals that are not errors should not normally enter the debugger. */
10640 signal_stop[GDB_SIGNAL_ALRM] = 0;
10641 signal_print[GDB_SIGNAL_ALRM] = 0;
10642 signal_stop[GDB_SIGNAL_VTALRM] = 0;
10643 signal_print[GDB_SIGNAL_VTALRM] = 0;
10644 signal_stop[GDB_SIGNAL_PROF] = 0;
10645 signal_print[GDB_SIGNAL_PROF] = 0;
10646 signal_stop[GDB_SIGNAL_CHLD] = 0;
10647 signal_print[GDB_SIGNAL_CHLD] = 0;
10648 signal_stop[GDB_SIGNAL_IO] = 0;
10649 signal_print[GDB_SIGNAL_IO] = 0;
10650 signal_stop[GDB_SIGNAL_POLL] = 0;
10651 signal_print[GDB_SIGNAL_POLL] = 0;
10652 signal_stop[GDB_SIGNAL_URG] = 0;
10653 signal_print[GDB_SIGNAL_URG] = 0;
10654 signal_stop[GDB_SIGNAL_WINCH] = 0;
10655 signal_print[GDB_SIGNAL_WINCH] = 0;
10656 signal_stop[GDB_SIGNAL_PRIO] = 0;
10657 signal_print[GDB_SIGNAL_PRIO] = 0;
10658
10659 /* These signals are used internally by user-level thread
10660 implementations. (See signal(5) on Solaris.) Like the above
10661 signals, a healthy program receives and handles them as part of
10662 its normal operation. */
10663 signal_stop[GDB_SIGNAL_LWP] = 0;
10664 signal_print[GDB_SIGNAL_LWP] = 0;
10665 signal_stop[GDB_SIGNAL_WAITING] = 0;
10666 signal_print[GDB_SIGNAL_WAITING] = 0;
10667 signal_stop[GDB_SIGNAL_CANCEL] = 0;
10668 signal_print[GDB_SIGNAL_CANCEL] = 0;
10669 signal_stop[GDB_SIGNAL_LIBRT] = 0;
10670 signal_print[GDB_SIGNAL_LIBRT] = 0;
10671
10672 /* Update cached state. */
10673 signal_cache_update (-1);
10674
10675 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
10676 &stop_on_solib_events, _("\
10677Set stopping for shared library events."), _("\
10678Show stopping for shared library events."), _("\
10679If nonzero, gdb will give control to the user when the dynamic linker\n\
10680notifies gdb of shared library events. The most common event of interest\n\
10681to the user would be loading/unloading of a new library."),
10682 set_stop_on_solib_events,
10683 show_stop_on_solib_events,
10684 &setlist, &showlist);
10685
10686 add_setshow_enum_cmd ("follow-fork-mode", class_run,
10687 follow_fork_mode_kind_names,
10688 &follow_fork_mode_string, _("\
10689Set debugger response to a program call of fork or vfork."), _("\
10690Show debugger response to a program call of fork or vfork."), _("\
10691A fork or vfork creates a new process. follow-fork-mode can be:\n\
10692 parent - the original process is debugged after a fork\n\
10693 child - the new process is debugged after a fork\n\
10694The unfollowed process will continue to run.\n\
10695By default, the debugger will follow the parent process."),
10696 nullptr,
10697 show_follow_fork_mode_string,
10698 &setlist, &showlist);
10699
10700 add_setshow_enum_cmd ("follow-exec-mode", class_run,
10701 follow_exec_mode_names,
10702 &follow_exec_mode_string, _("\
10703Set debugger response to a program call of exec."), _("\
10704Show debugger response to a program call of exec."), _("\
10705An exec call replaces the program image of a process.\n\
10706\n\
10707follow-exec-mode can be:\n\
10708\n\
10709 new - the debugger creates a new inferior and rebinds the process\n\
10710to this new inferior. The program the process was running before\n\
10711the exec call can be restarted afterwards by restarting the original\n\
10712inferior.\n\
10713\n\
10714 same - the debugger keeps the process bound to the same inferior.\n\
10715The new executable image replaces the previous executable loaded in\n\
10716the inferior. Restarting the inferior after the exec call restarts\n\
10717the executable the process was running after the exec call.\n\
10718\n\
10719By default, the debugger will use the same inferior."),
10720 nullptr,
10721 show_follow_exec_mode_string,
10722 &setlist, &showlist);
10723
10724 add_setshow_enum_cmd ("scheduler-locking", class_run,
10725 scheduler_enums, &scheduler_mode, _("\
10726Set mode for locking scheduler during execution."), _("\
10727Show mode for locking scheduler during execution."), _("\
10728off == no locking (threads may preempt at any time)\n\
10729on == full locking (no thread except the current thread may run)\n\
10730 This applies to both normal execution and replay mode.\n\
10731step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
10732 In this mode, other threads may run during other commands.\n\
10733 This applies to both normal execution and replay mode.\n\
10734replay == scheduler locked in replay mode and unlocked during normal execution."),
10735 set_schedlock_func, /* traps on target vector */
10736 show_scheduler_mode,
10737 &setlist, &showlist);
10738
10739 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
10740Set mode for resuming threads of all processes."), _("\
10741Show mode for resuming threads of all processes."), _("\
10742When on, execution commands (such as 'continue' or 'next') resume all\n\
10743threads of all processes. When off (which is the default), execution\n\
10744commands only resume the threads of the current process. The set of\n\
10745threads that are resumed is further refined by the scheduler-locking\n\
10746mode (see help set scheduler-locking)."),
10747 nullptr,
10748 show_schedule_multiple,
10749 &setlist, &showlist);
10750
10751 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
10752Set mode of the step operation."), _("\
10753Show mode of the step operation."), _("\
10754When set, doing a step over a function without debug line information\n\
10755will stop at the first instruction of that function. Otherwise, the\n\
10756function is skipped and the step command stops at a different source line."),
10757 nullptr,
10758 show_step_stop_if_no_debug,
10759 &setlist, &showlist);
10760
10761 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
10762 &can_use_displaced_stepping, _("\
10763Set debugger's willingness to use displaced stepping."), _("\
10764Show debugger's willingness to use displaced stepping."), _("\
10765If on, gdb will use displaced stepping to step over breakpoints if it is\n\
10766supported by the target architecture. If off, gdb will not use displaced\n\
10767stepping to step over breakpoints, even if such is supported by the target\n\
10768architecture. If auto (which is the default), gdb will use displaced stepping\n\
10769if the target architecture supports it and non-stop mode is active, but will not\n\
10770use it in all-stop mode (see help set non-stop)."),
10771 nullptr,
10772 show_can_use_displaced_stepping,
10773 &setlist, &showlist);
10774
10775 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
10776 &exec_direction, _("Set direction of execution.\n\
10777Options are 'forward' or 'reverse'."),
10778 _("Show direction of execution (forward/reverse)."),
10779 _("Tells gdb whether to execute forward or backward."),
10780 set_exec_direction_func, show_exec_direction_func,
10781 &setlist, &showlist);
10782
10783 /* Set/show detach-on-fork: user-settable mode. */
10784
10785 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
10786Set whether gdb will detach the child of a fork."), _("\
10787Show whether gdb will detach the child of a fork."), _("\
10788Tells gdb whether to detach the child of a fork."),
10789 nullptr, nullptr, &setlist, &showlist);
10790
10791 /* Set/show disable address space randomization mode. */
10792
10793 add_setshow_boolean_cmd ("disable-randomization", class_support,
10794 &disable_randomization, _("\
10795Set disabling of debuggee's virtual address space randomization."), _("\
10796Show disabling of debuggee's virtual address space randomization."), _("\
10797When this mode is on (which is the default), randomization of the virtual\n\
10798address space is disabled. Standalone programs run with the randomization\n\
10799enabled by default on some platforms."),
10800 &set_disable_randomization,
10801 &show_disable_randomization,
10802 &setlist, &showlist);
10803
10804 /* ptid initializations */
10805 inferior_ptid = null_ptid;
10806 target_last_wait_ptid = minus_one_ptid;
10807
10808 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed,
10809 "infrun");
10810 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested,
10811 "infrun");
10812 gdb::observers::inferior_exit.attach (infrun_inferior_exit, "infrun");
10813 gdb::observers::inferior_execd.attach (infrun_inferior_execd, "infrun");
10814
10815 /* Explicitly create without lookup, since that tries to create a
10816 value with a void typed value, and when we get here, gdbarch
10817 isn't initialized yet. At this point, we're quite sure there
10818 isn't another convenience variable of the same name. */
10819 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, nullptr);
10820
10821 add_setshow_boolean_cmd ("observer", no_class,
10822 &observer_mode_1, _("\
10823Set whether gdb controls the inferior in observer mode."), _("\
10824Show whether gdb controls the inferior in observer mode."), _("\
10825In observer mode, GDB can get data from the inferior, but not\n\
10826affect its execution. Registers and memory may not be changed,\n\
10827breakpoints may not be set, and the program cannot be interrupted\n\
10828or signalled."),
10829 set_observer_mode,
10830 show_observer_mode,
10831 &setlist,
10832 &showlist);
10833
10834#if GDB_SELF_TEST
10835 selftests::register_test ("infrun_thread_ptid_changed",
10836 selftests::infrun_thread_ptid_changed);
10837#endif
10838}