]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/infrun.c
bbb98f6dcdb3e56a740ff5561826cb8ae56f7f97
[thirdparty/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2024 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "displaced-stepping.h"
23 #include "infrun.h"
24 #include <ctype.h>
25 #include "symtab.h"
26 #include "frame.h"
27 #include "inferior.h"
28 #include "breakpoint.h"
29 #include "gdbcore.h"
30 #include "gdbcmd.h"
31 #include "target.h"
32 #include "target-connection.h"
33 #include "gdbthread.h"
34 #include "annotate.h"
35 #include "symfile.h"
36 #include "top.h"
37 #include "ui.h"
38 #include "inf-loop.h"
39 #include "regcache.h"
40 #include "value.h"
41 #include "observable.h"
42 #include "language.h"
43 #include "solib.h"
44 #include "main.h"
45 #include "block.h"
46 #include "mi/mi-common.h"
47 #include "event-top.h"
48 #include "record.h"
49 #include "record-full.h"
50 #include "inline-frame.h"
51 #include "jit.h"
52 #include "tracepoint.h"
53 #include "skip.h"
54 #include "probe.h"
55 #include "objfiles.h"
56 #include "completer.h"
57 #include "target-descriptions.h"
58 #include "target-dcache.h"
59 #include "terminal.h"
60 #include "solist.h"
61 #include "gdbsupport/event-loop.h"
62 #include "thread-fsm.h"
63 #include "gdbsupport/enum-flags.h"
64 #include "progspace-and-thread.h"
65 #include <optional>
66 #include "arch-utils.h"
67 #include "gdbsupport/scope-exit.h"
68 #include "gdbsupport/forward-scope-exit.h"
69 #include "gdbsupport/gdb_select.h"
70 #include <unordered_map>
71 #include "async-event.h"
72 #include "gdbsupport/selftest.h"
73 #include "scoped-mock-context.h"
74 #include "test-target.h"
75 #include "gdbsupport/common-debug.h"
76 #include "gdbsupport/buildargv.h"
77 #include "extension.h"
78 #include "disasm.h"
79 #include "interps.h"
80
81 /* Prototypes for local functions */
82
83 static void sig_print_info (enum gdb_signal);
84
85 static void sig_print_header (void);
86
87 static void follow_inferior_reset_breakpoints (void);
88
89 static bool currently_stepping (struct thread_info *tp);
90
91 static void insert_hp_step_resume_breakpoint_at_frame (const frame_info_ptr &);
92
93 static void insert_step_resume_breakpoint_at_caller (const frame_info_ptr &);
94
95 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
96
97 static bool maybe_software_singlestep (struct gdbarch *gdbarch);
98
99 static void resume (gdb_signal sig);
100
101 static void wait_for_inferior (inferior *inf);
102
103 static void restart_threads (struct thread_info *event_thread,
104 inferior *inf = nullptr);
105
106 static bool start_step_over (void);
107
108 static bool step_over_info_valid_p (void);
109
110 static bool schedlock_applies (struct thread_info *tp);
111
112 /* Asynchronous signal handler registered as event loop source for
113 when we have pending events ready to be passed to the core. */
114 static struct async_event_handler *infrun_async_inferior_event_token;
115
116 /* Stores whether infrun_async was previously enabled or disabled.
117 Starts off as -1, indicating "never enabled/disabled". */
118 static int infrun_is_async = -1;
119 static CORE_ADDR update_line_range_start (CORE_ADDR pc,
120 struct execution_control_state *ecs);
121
122 /* See infrun.h. */
123
124 void
125 infrun_async (int enable)
126 {
127 if (infrun_is_async != enable)
128 {
129 infrun_is_async = enable;
130
131 infrun_debug_printf ("enable=%d", enable);
132
133 if (enable)
134 mark_async_event_handler (infrun_async_inferior_event_token);
135 else
136 clear_async_event_handler (infrun_async_inferior_event_token);
137 }
138 }
139
140 /* See infrun.h. */
141
142 void
143 mark_infrun_async_event_handler (void)
144 {
145 mark_async_event_handler (infrun_async_inferior_event_token);
146 }
147
148 /* When set, stop the 'step' command if we enter a function which has
149 no line number information. The normal behavior is that we step
150 over such function. */
151 bool step_stop_if_no_debug = false;
152 static void
153 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
154 struct cmd_list_element *c, const char *value)
155 {
156 gdb_printf (file, _("Mode of the step operation is %s.\n"), value);
157 }
158
159 /* proceed and normal_stop use this to notify the user when the
160 inferior stopped in a different thread than it had been running in.
161 It can also be used to find for which thread normal_stop last
162 reported a stop. */
163 static thread_info_ref previous_thread;
164
165 /* See infrun.h. */
166
167 void
168 update_previous_thread ()
169 {
170 if (inferior_ptid == null_ptid)
171 previous_thread = nullptr;
172 else
173 previous_thread = thread_info_ref::new_reference (inferior_thread ());
174 }
175
176 /* See infrun.h. */
177
178 thread_info *
179 get_previous_thread ()
180 {
181 return previous_thread.get ();
182 }
183
184 /* If set (default for legacy reasons), when following a fork, GDB
185 will detach from one of the fork branches, child or parent.
186 Exactly which branch is detached depends on 'set follow-fork-mode'
187 setting. */
188
189 static bool detach_fork = true;
190
191 bool debug_infrun = false;
192 static void
193 show_debug_infrun (struct ui_file *file, int from_tty,
194 struct cmd_list_element *c, const char *value)
195 {
196 gdb_printf (file, _("Inferior debugging is %s.\n"), value);
197 }
198
199 /* Support for disabling address space randomization. */
200
201 bool disable_randomization = true;
202
203 static void
204 show_disable_randomization (struct ui_file *file, int from_tty,
205 struct cmd_list_element *c, const char *value)
206 {
207 if (target_supports_disable_randomization ())
208 gdb_printf (file,
209 _("Disabling randomization of debuggee's "
210 "virtual address space is %s.\n"),
211 value);
212 else
213 gdb_puts (_("Disabling randomization of debuggee's "
214 "virtual address space is unsupported on\n"
215 "this platform.\n"), file);
216 }
217
218 static void
219 set_disable_randomization (const char *args, int from_tty,
220 struct cmd_list_element *c)
221 {
222 if (!target_supports_disable_randomization ())
223 error (_("Disabling randomization of debuggee's "
224 "virtual address space is unsupported on\n"
225 "this platform."));
226 }
227
228 /* User interface for non-stop mode. */
229
230 bool non_stop = false;
231 static bool non_stop_1 = false;
232
233 static void
234 set_non_stop (const char *args, int from_tty,
235 struct cmd_list_element *c)
236 {
237 if (target_has_execution ())
238 {
239 non_stop_1 = non_stop;
240 error (_("Cannot change this setting while the inferior is running."));
241 }
242
243 non_stop = non_stop_1;
244 }
245
246 static void
247 show_non_stop (struct ui_file *file, int from_tty,
248 struct cmd_list_element *c, const char *value)
249 {
250 gdb_printf (file,
251 _("Controlling the inferior in non-stop mode is %s.\n"),
252 value);
253 }
254
255 /* "Observer mode" is somewhat like a more extreme version of
256 non-stop, in which all GDB operations that might affect the
257 target's execution have been disabled. */
258
259 static bool observer_mode = false;
260 static bool observer_mode_1 = false;
261
262 static void
263 set_observer_mode (const char *args, int from_tty,
264 struct cmd_list_element *c)
265 {
266 if (target_has_execution ())
267 {
268 observer_mode_1 = observer_mode;
269 error (_("Cannot change this setting while the inferior is running."));
270 }
271
272 observer_mode = observer_mode_1;
273
274 may_write_registers = !observer_mode;
275 may_write_memory = !observer_mode;
276 may_insert_breakpoints = !observer_mode;
277 may_insert_tracepoints = !observer_mode;
278 /* We can insert fast tracepoints in or out of observer mode,
279 but enable them if we're going into this mode. */
280 if (observer_mode)
281 may_insert_fast_tracepoints = true;
282 may_stop = !observer_mode;
283 update_target_permissions ();
284
285 /* Going *into* observer mode we must force non-stop, then
286 going out we leave it that way. */
287 if (observer_mode)
288 {
289 pagination_enabled = false;
290 non_stop = non_stop_1 = true;
291 }
292
293 if (from_tty)
294 gdb_printf (_("Observer mode is now %s.\n"),
295 (observer_mode ? "on" : "off"));
296 }
297
298 static void
299 show_observer_mode (struct ui_file *file, int from_tty,
300 struct cmd_list_element *c, const char *value)
301 {
302 gdb_printf (file, _("Observer mode is %s.\n"), value);
303 }
304
305 /* This updates the value of observer mode based on changes in
306 permissions. Note that we are deliberately ignoring the values of
307 may-write-registers and may-write-memory, since the user may have
308 reason to enable these during a session, for instance to turn on a
309 debugging-related global. */
310
311 void
312 update_observer_mode (void)
313 {
314 bool newval = (!may_insert_breakpoints
315 && !may_insert_tracepoints
316 && may_insert_fast_tracepoints
317 && !may_stop
318 && non_stop);
319
320 /* Let the user know if things change. */
321 if (newval != observer_mode)
322 gdb_printf (_("Observer mode is now %s.\n"),
323 (newval ? "on" : "off"));
324
325 observer_mode = observer_mode_1 = newval;
326 }
327
328 /* Tables of how to react to signals; the user sets them. */
329
330 static unsigned char signal_stop[GDB_SIGNAL_LAST];
331 static unsigned char signal_print[GDB_SIGNAL_LAST];
332 static unsigned char signal_program[GDB_SIGNAL_LAST];
333
334 /* Table of signals that are registered with "catch signal". A
335 non-zero entry indicates that the signal is caught by some "catch
336 signal" command. */
337 static unsigned char signal_catch[GDB_SIGNAL_LAST];
338
339 /* Table of signals that the target may silently handle.
340 This is automatically determined from the flags above,
341 and simply cached here. */
342 static unsigned char signal_pass[GDB_SIGNAL_LAST];
343
344 #define SET_SIGS(nsigs,sigs,flags) \
345 do { \
346 int signum = (nsigs); \
347 while (signum-- > 0) \
348 if ((sigs)[signum]) \
349 (flags)[signum] = 1; \
350 } while (0)
351
352 #define UNSET_SIGS(nsigs,sigs,flags) \
353 do { \
354 int signum = (nsigs); \
355 while (signum-- > 0) \
356 if ((sigs)[signum]) \
357 (flags)[signum] = 0; \
358 } while (0)
359
360 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
361 this function is to avoid exporting `signal_program'. */
362
363 void
364 update_signals_program_target (void)
365 {
366 target_program_signals (signal_program);
367 }
368
369 /* Value to pass to target_resume() to cause all threads to resume. */
370
371 #define RESUME_ALL minus_one_ptid
372
373 /* Command list pointer for the "stop" placeholder. */
374
375 static struct cmd_list_element *stop_command;
376
377 /* Nonzero if we want to give control to the user when we're notified
378 of shared library events by the dynamic linker. */
379 int stop_on_solib_events;
380
381 /* Enable or disable optional shared library event breakpoints
382 as appropriate when the above flag is changed. */
383
384 static void
385 set_stop_on_solib_events (const char *args,
386 int from_tty, struct cmd_list_element *c)
387 {
388 update_solib_breakpoints ();
389 }
390
391 static void
392 show_stop_on_solib_events (struct ui_file *file, int from_tty,
393 struct cmd_list_element *c, const char *value)
394 {
395 gdb_printf (file, _("Stopping for shared library events is %s.\n"),
396 value);
397 }
398
399 /* True after stop if current stack frame should be printed. */
400
401 static bool stop_print_frame;
402
403 /* This is a cached copy of the target/ptid/waitstatus of the last
404 event returned by target_wait().
405 This information is returned by get_last_target_status(). */
406 static process_stratum_target *target_last_proc_target;
407 static ptid_t target_last_wait_ptid;
408 static struct target_waitstatus target_last_waitstatus;
409
410 void init_thread_stepping_state (struct thread_info *tss);
411
412 static const char follow_fork_mode_child[] = "child";
413 static const char follow_fork_mode_parent[] = "parent";
414
415 static const char *const follow_fork_mode_kind_names[] = {
416 follow_fork_mode_child,
417 follow_fork_mode_parent,
418 nullptr
419 };
420
421 static const char *follow_fork_mode_string = follow_fork_mode_parent;
422 static void
423 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
424 struct cmd_list_element *c, const char *value)
425 {
426 gdb_printf (file,
427 _("Debugger response to a program "
428 "call of fork or vfork is \"%s\".\n"),
429 value);
430 }
431 \f
432
433 /* Handle changes to the inferior list based on the type of fork,
434 which process is being followed, and whether the other process
435 should be detached. On entry inferior_ptid must be the ptid of
436 the fork parent. At return inferior_ptid is the ptid of the
437 followed inferior. */
438
439 static bool
440 follow_fork_inferior (bool follow_child, bool detach_fork)
441 {
442 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
443
444 infrun_debug_printf ("follow_child = %d, detach_fork = %d",
445 follow_child, detach_fork);
446
447 target_waitkind fork_kind = inferior_thread ()->pending_follow.kind ();
448 gdb_assert (fork_kind == TARGET_WAITKIND_FORKED
449 || fork_kind == TARGET_WAITKIND_VFORKED);
450 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
451 ptid_t parent_ptid = inferior_ptid;
452 ptid_t child_ptid = inferior_thread ()->pending_follow.child_ptid ();
453
454 if (has_vforked
455 && !non_stop /* Non-stop always resumes both branches. */
456 && current_ui->prompt_state == PROMPT_BLOCKED
457 && !(follow_child || detach_fork || sched_multi))
458 {
459 /* The parent stays blocked inside the vfork syscall until the
460 child execs or exits. If we don't let the child run, then
461 the parent stays blocked. If we're telling the parent to run
462 in the foreground, the user will not be able to ctrl-c to get
463 back the terminal, effectively hanging the debug session. */
464 gdb_printf (gdb_stderr, _("\
465 Can not resume the parent process over vfork in the foreground while\n\
466 holding the child stopped. Try \"set detach-on-fork\" or \
467 \"set schedule-multiple\".\n"));
468 return true;
469 }
470
471 inferior *parent_inf = current_inferior ();
472 inferior *child_inf = nullptr;
473
474 gdb_assert (parent_inf->thread_waiting_for_vfork_done == nullptr);
475
476 if (!follow_child)
477 {
478 /* Detach new forked process? */
479 if (detach_fork)
480 {
481 /* Before detaching from the child, remove all breakpoints
482 from it. If we forked, then this has already been taken
483 care of by infrun.c. If we vforked however, any
484 breakpoint inserted in the parent is visible in the
485 child, even those added while stopped in a vfork
486 catchpoint. This will remove the breakpoints from the
487 parent also, but they'll be reinserted below. */
488 if (has_vforked)
489 {
490 /* Keep breakpoints list in sync. */
491 remove_breakpoints_inf (current_inferior ());
492 }
493
494 if (print_inferior_events)
495 {
496 /* Ensure that we have a process ptid. */
497 ptid_t process_ptid = ptid_t (child_ptid.pid ());
498
499 target_terminal::ours_for_output ();
500 gdb_printf (_("[Detaching after %s from child %s]\n"),
501 has_vforked ? "vfork" : "fork",
502 target_pid_to_str (process_ptid).c_str ());
503 }
504 }
505 else
506 {
507 /* Add process to GDB's tables. */
508 child_inf = add_inferior (child_ptid.pid ());
509
510 child_inf->attach_flag = parent_inf->attach_flag;
511 copy_terminal_info (child_inf, parent_inf);
512 child_inf->set_arch (parent_inf->arch ());
513 child_inf->tdesc_info = parent_inf->tdesc_info;
514
515 child_inf->symfile_flags = SYMFILE_NO_READ;
516
517 /* If this is a vfork child, then the address-space is
518 shared with the parent. */
519 if (has_vforked)
520 {
521 child_inf->pspace = parent_inf->pspace;
522 child_inf->aspace = parent_inf->aspace;
523
524 exec_on_vfork (child_inf);
525
526 /* The parent will be frozen until the child is done
527 with the shared region. Keep track of the
528 parent. */
529 child_inf->vfork_parent = parent_inf;
530 child_inf->pending_detach = false;
531 parent_inf->vfork_child = child_inf;
532 parent_inf->pending_detach = false;
533 }
534 else
535 {
536 child_inf->pspace = new program_space (new_address_space ());
537 child_inf->aspace = child_inf->pspace->aspace;
538 child_inf->removable = true;
539 clone_program_space (child_inf->pspace, parent_inf->pspace);
540 }
541 }
542
543 if (has_vforked)
544 {
545 /* If we detached from the child, then we have to be careful
546 to not insert breakpoints in the parent until the child
547 is done with the shared memory region. However, if we're
548 staying attached to the child, then we can and should
549 insert breakpoints, so that we can debug it. A
550 subsequent child exec or exit is enough to know when does
551 the child stops using the parent's address space. */
552 parent_inf->thread_waiting_for_vfork_done
553 = detach_fork ? inferior_thread () : nullptr;
554 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
555
556 infrun_debug_printf
557 ("parent_inf->thread_waiting_for_vfork_done == %s",
558 (parent_inf->thread_waiting_for_vfork_done == nullptr
559 ? "nullptr"
560 : (parent_inf->thread_waiting_for_vfork_done
561 ->ptid.to_string ().c_str ())));
562 }
563 }
564 else
565 {
566 /* Follow the child. */
567
568 if (print_inferior_events)
569 {
570 std::string parent_pid = target_pid_to_str (parent_ptid);
571 std::string child_pid = target_pid_to_str (child_ptid);
572
573 target_terminal::ours_for_output ();
574 gdb_printf (_("[Attaching after %s %s to child %s]\n"),
575 parent_pid.c_str (),
576 has_vforked ? "vfork" : "fork",
577 child_pid.c_str ());
578 }
579
580 /* Add the new inferior first, so that the target_detach below
581 doesn't unpush the target. */
582
583 child_inf = add_inferior (child_ptid.pid ());
584
585 child_inf->attach_flag = parent_inf->attach_flag;
586 copy_terminal_info (child_inf, parent_inf);
587 child_inf->set_arch (parent_inf->arch ());
588 child_inf->tdesc_info = parent_inf->tdesc_info;
589
590 if (has_vforked)
591 {
592 /* If this is a vfork child, then the address-space is shared
593 with the parent. */
594 child_inf->aspace = parent_inf->aspace;
595 child_inf->pspace = parent_inf->pspace;
596
597 exec_on_vfork (child_inf);
598 }
599 else if (detach_fork)
600 {
601 /* We follow the child and detach from the parent: move the parent's
602 program space to the child. This simplifies some things, like
603 doing "next" over fork() and landing on the expected line in the
604 child (note, that is broken with "set detach-on-fork off").
605
606 Before assigning brand new spaces for the parent, remove
607 breakpoints from it: because the new pspace won't match
608 currently inserted locations, the normal detach procedure
609 wouldn't remove them, and we would leave them inserted when
610 detaching. */
611 remove_breakpoints_inf (parent_inf);
612
613 child_inf->aspace = parent_inf->aspace;
614 child_inf->pspace = parent_inf->pspace;
615 parent_inf->pspace = new program_space (new_address_space ());
616 parent_inf->aspace = parent_inf->pspace->aspace;
617 clone_program_space (parent_inf->pspace, child_inf->pspace);
618
619 /* The parent inferior is still the current one, so keep things
620 in sync. */
621 set_current_program_space (parent_inf->pspace);
622 }
623 else
624 {
625 child_inf->pspace = new program_space (new_address_space ());
626 child_inf->aspace = child_inf->pspace->aspace;
627 child_inf->removable = true;
628 child_inf->symfile_flags = SYMFILE_NO_READ;
629 clone_program_space (child_inf->pspace, parent_inf->pspace);
630 }
631 }
632
633 gdb_assert (current_inferior () == parent_inf);
634
635 /* If we are setting up an inferior for the child, target_follow_fork is
636 responsible for pushing the appropriate targets on the new inferior's
637 target stack and adding the initial thread (with ptid CHILD_PTID).
638
639 If we are not setting up an inferior for the child (because following
640 the parent and detach_fork is true), it is responsible for detaching
641 from CHILD_PTID. */
642 target_follow_fork (child_inf, child_ptid, fork_kind, follow_child,
643 detach_fork);
644
645 gdb::observers::inferior_forked.notify (parent_inf, child_inf, fork_kind);
646
647 /* target_follow_fork must leave the parent as the current inferior. If we
648 want to follow the child, we make it the current one below. */
649 gdb_assert (current_inferior () == parent_inf);
650
651 /* If there is a child inferior, target_follow_fork must have created a thread
652 for it. */
653 if (child_inf != nullptr)
654 gdb_assert (!child_inf->thread_list.empty ());
655
656 /* Clear the parent thread's pending follow field. Do this before calling
657 target_detach, so that the target can differentiate the two following
658 cases:
659
660 - We continue past a fork with "follow-fork-mode == child" &&
661 "detach-on-fork on", and therefore detach the parent. In that
662 case the target should not detach the fork child.
663 - We run to a fork catchpoint and the user types "detach". In that
664 case, the target should detach the fork child in addition to the
665 parent.
666
667 The former case will have pending_follow cleared, the later will have
668 pending_follow set. */
669 thread_info *parent_thread = parent_inf->find_thread (parent_ptid);
670 gdb_assert (parent_thread != nullptr);
671 parent_thread->pending_follow.set_spurious ();
672
673 /* Detach the parent if needed. */
674 if (follow_child)
675 {
676 /* If we're vforking, we want to hold on to the parent until
677 the child exits or execs. At child exec or exit time we
678 can remove the old breakpoints from the parent and detach
679 or resume debugging it. Otherwise, detach the parent now;
680 we'll want to reuse it's program/address spaces, but we
681 can't set them to the child before removing breakpoints
682 from the parent, otherwise, the breakpoints module could
683 decide to remove breakpoints from the wrong process (since
684 they'd be assigned to the same address space). */
685
686 if (has_vforked)
687 {
688 gdb_assert (child_inf->vfork_parent == nullptr);
689 gdb_assert (parent_inf->vfork_child == nullptr);
690 child_inf->vfork_parent = parent_inf;
691 child_inf->pending_detach = false;
692 parent_inf->vfork_child = child_inf;
693 parent_inf->pending_detach = detach_fork;
694 }
695 else if (detach_fork)
696 {
697 if (print_inferior_events)
698 {
699 /* Ensure that we have a process ptid. */
700 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
701
702 target_terminal::ours_for_output ();
703 gdb_printf (_("[Detaching after fork from "
704 "parent %s]\n"),
705 target_pid_to_str (process_ptid).c_str ());
706 }
707
708 target_detach (parent_inf, 0);
709 }
710 }
711
712 /* If we ended up creating a new inferior, call post_create_inferior to inform
713 the various subcomponents. */
714 if (child_inf != nullptr)
715 {
716 /* If FOLLOW_CHILD, we leave CHILD_INF as the current inferior
717 (do not restore the parent as the current inferior). */
718 std::optional<scoped_restore_current_thread> maybe_restore;
719
720 if (!follow_child && !sched_multi)
721 maybe_restore.emplace ();
722
723 switch_to_thread (*child_inf->threads ().begin ());
724 post_create_inferior (0);
725 }
726
727 return false;
728 }
729
730 /* Set the last target status as TP having stopped. */
731
732 static void
733 set_last_target_status_stopped (thread_info *tp)
734 {
735 set_last_target_status (tp->inf->process_target (), tp->ptid,
736 target_waitstatus {}.set_stopped (GDB_SIGNAL_0));
737 }
738
739 /* Tell the target to follow the fork we're stopped at. Returns true
740 if the inferior should be resumed; false, if the target for some
741 reason decided it's best not to resume. */
742
743 static bool
744 follow_fork ()
745 {
746 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
747
748 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
749 bool should_resume = true;
750
751 /* Copy user stepping state to the new inferior thread. FIXME: the
752 followed fork child thread should have a copy of most of the
753 parent thread structure's run control related fields, not just these.
754 Initialized to avoid "may be used uninitialized" warnings from gcc. */
755 struct breakpoint *step_resume_breakpoint = nullptr;
756 struct breakpoint *exception_resume_breakpoint = nullptr;
757 CORE_ADDR step_range_start = 0;
758 CORE_ADDR step_range_end = 0;
759 int current_line = 0;
760 symtab *current_symtab = nullptr;
761 struct frame_id step_frame_id = { 0 };
762
763 if (!non_stop)
764 {
765 thread_info *cur_thr = inferior_thread ();
766
767 ptid_t resume_ptid
768 = user_visible_resume_ptid (cur_thr->control.stepping_command);
769 process_stratum_target *resume_target
770 = user_visible_resume_target (resume_ptid);
771
772 /* Check if there's a thread that we're about to resume, other
773 than the current, with an unfollowed fork/vfork. If so,
774 switch back to it, to tell the target to follow it (in either
775 direction). We'll afterwards refuse to resume, and inform
776 the user what happened. */
777 for (thread_info *tp : all_non_exited_threads (resume_target,
778 resume_ptid))
779 {
780 if (tp == cur_thr)
781 continue;
782
783 /* follow_fork_inferior clears tp->pending_follow, and below
784 we'll need the value after the follow_fork_inferior
785 call. */
786 target_waitkind kind = tp->pending_follow.kind ();
787
788 if (kind != TARGET_WAITKIND_SPURIOUS)
789 {
790 infrun_debug_printf ("need to follow-fork [%s] first",
791 tp->ptid.to_string ().c_str ());
792
793 switch_to_thread (tp);
794
795 /* Set up inferior(s) as specified by the caller, and
796 tell the target to do whatever is necessary to follow
797 either parent or child. */
798 if (follow_child)
799 {
800 /* The thread that started the execution command
801 won't exist in the child. Abort the command and
802 immediately stop in this thread, in the child,
803 inside fork. */
804 should_resume = false;
805 }
806 else
807 {
808 /* Following the parent, so let the thread fork its
809 child freely, it won't influence the current
810 execution command. */
811 if (follow_fork_inferior (follow_child, detach_fork))
812 {
813 /* Target refused to follow, or there's some
814 other reason we shouldn't resume. */
815 switch_to_thread (cur_thr);
816 set_last_target_status_stopped (cur_thr);
817 return false;
818 }
819
820 /* If we're following a vfork, when we need to leave
821 the just-forked thread as selected, as we need to
822 solo-resume it to collect the VFORK_DONE event.
823 If we're following a fork, however, switch back
824 to the original thread that we continue stepping
825 it, etc. */
826 if (kind != TARGET_WAITKIND_VFORKED)
827 {
828 gdb_assert (kind == TARGET_WAITKIND_FORKED);
829 switch_to_thread (cur_thr);
830 }
831 }
832
833 break;
834 }
835 }
836 }
837
838 thread_info *tp = inferior_thread ();
839
840 /* If there were any forks/vforks that were caught and are now to be
841 followed, then do so now. */
842 switch (tp->pending_follow.kind ())
843 {
844 case TARGET_WAITKIND_FORKED:
845 case TARGET_WAITKIND_VFORKED:
846 {
847 ptid_t parent, child;
848 std::unique_ptr<struct thread_fsm> thread_fsm;
849
850 /* If the user did a next/step, etc, over a fork call,
851 preserve the stepping state in the fork child. */
852 if (follow_child && should_resume)
853 {
854 step_resume_breakpoint = clone_momentary_breakpoint
855 (tp->control.step_resume_breakpoint);
856 step_range_start = tp->control.step_range_start;
857 step_range_end = tp->control.step_range_end;
858 current_line = tp->current_line;
859 current_symtab = tp->current_symtab;
860 step_frame_id = tp->control.step_frame_id;
861 exception_resume_breakpoint
862 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
863 thread_fsm = tp->release_thread_fsm ();
864
865 /* For now, delete the parent's sr breakpoint, otherwise,
866 parent/child sr breakpoints are considered duplicates,
867 and the child version will not be installed. Remove
868 this when the breakpoints module becomes aware of
869 inferiors and address spaces. */
870 delete_step_resume_breakpoint (tp);
871 tp->control.step_range_start = 0;
872 tp->control.step_range_end = 0;
873 tp->control.step_frame_id = null_frame_id;
874 delete_exception_resume_breakpoint (tp);
875 }
876
877 parent = inferior_ptid;
878 child = tp->pending_follow.child_ptid ();
879
880 /* If handling a vfork, stop all the inferior's threads, they will be
881 restarted when the vfork shared region is complete. */
882 if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED
883 && target_is_non_stop_p ())
884 stop_all_threads ("handling vfork", tp->inf);
885
886 process_stratum_target *parent_targ = tp->inf->process_target ();
887 /* Set up inferior(s) as specified by the caller, and tell the
888 target to do whatever is necessary to follow either parent
889 or child. */
890 if (follow_fork_inferior (follow_child, detach_fork))
891 {
892 /* Target refused to follow, or there's some other reason
893 we shouldn't resume. */
894 should_resume = 0;
895 }
896 else
897 {
898 /* If we followed the child, switch to it... */
899 if (follow_child)
900 {
901 tp = parent_targ->find_thread (child);
902 switch_to_thread (tp);
903
904 /* ... and preserve the stepping state, in case the
905 user was stepping over the fork call. */
906 if (should_resume)
907 {
908 tp->control.step_resume_breakpoint
909 = step_resume_breakpoint;
910 tp->control.step_range_start = step_range_start;
911 tp->control.step_range_end = step_range_end;
912 tp->current_line = current_line;
913 tp->current_symtab = current_symtab;
914 tp->control.step_frame_id = step_frame_id;
915 tp->control.exception_resume_breakpoint
916 = exception_resume_breakpoint;
917 tp->set_thread_fsm (std::move (thread_fsm));
918 }
919 else
920 {
921 /* If we get here, it was because we're trying to
922 resume from a fork catchpoint, but, the user
923 has switched threads away from the thread that
924 forked. In that case, the resume command
925 issued is most likely not applicable to the
926 child, so just warn, and refuse to resume. */
927 warning (_("Not resuming: switched threads "
928 "before following fork child."));
929 }
930
931 /* Reset breakpoints in the child as appropriate. */
932 follow_inferior_reset_breakpoints ();
933 }
934 }
935 }
936 break;
937 case TARGET_WAITKIND_SPURIOUS:
938 /* Nothing to follow. */
939 break;
940 default:
941 internal_error ("Unexpected pending_follow.kind %d\n",
942 tp->pending_follow.kind ());
943 break;
944 }
945
946 if (!should_resume)
947 set_last_target_status_stopped (tp);
948 return should_resume;
949 }
950
951 static void
952 follow_inferior_reset_breakpoints (void)
953 {
954 struct thread_info *tp = inferior_thread ();
955
956 /* Was there a step_resume breakpoint? (There was if the user
957 did a "next" at the fork() call.) If so, explicitly reset its
958 thread number. Cloned step_resume breakpoints are disabled on
959 creation, so enable it here now that it is associated with the
960 correct thread.
961
962 step_resumes are a form of bp that are made to be per-thread.
963 Since we created the step_resume bp when the parent process
964 was being debugged, and now are switching to the child process,
965 from the breakpoint package's viewpoint, that's a switch of
966 "threads". We must update the bp's notion of which thread
967 it is for, or it'll be ignored when it triggers. */
968
969 if (tp->control.step_resume_breakpoint)
970 {
971 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
972 tp->control.step_resume_breakpoint->first_loc ().enabled = 1;
973 }
974
975 /* Treat exception_resume breakpoints like step_resume breakpoints. */
976 if (tp->control.exception_resume_breakpoint)
977 {
978 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
979 tp->control.exception_resume_breakpoint->first_loc ().enabled = 1;
980 }
981
982 /* Reinsert all breakpoints in the child. The user may have set
983 breakpoints after catching the fork, in which case those
984 were never set in the child, but only in the parent. This makes
985 sure the inserted breakpoints match the breakpoint list. */
986
987 breakpoint_re_set ();
988 insert_breakpoints ();
989 }
990
991 /* The child has exited or execed: resume THREAD, a thread of the parent,
992 if it was meant to be executing. */
993
994 static void
995 proceed_after_vfork_done (thread_info *thread)
996 {
997 if (thread->state == THREAD_RUNNING
998 && !thread->executing ()
999 && !thread->stop_requested
1000 && thread->stop_signal () == GDB_SIGNAL_0)
1001 {
1002 infrun_debug_printf ("resuming vfork parent thread %s",
1003 thread->ptid.to_string ().c_str ());
1004
1005 switch_to_thread (thread);
1006 clear_proceed_status (0);
1007 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
1008 }
1009 }
1010
1011 /* Called whenever we notice an exec or exit event, to handle
1012 detaching or resuming a vfork parent. */
1013
1014 static void
1015 handle_vfork_child_exec_or_exit (int exec)
1016 {
1017 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1018
1019 struct inferior *inf = current_inferior ();
1020
1021 if (inf->vfork_parent)
1022 {
1023 inferior *resume_parent = nullptr;
1024
1025 /* This exec or exit marks the end of the shared memory region
1026 between the parent and the child. Break the bonds. */
1027 inferior *vfork_parent = inf->vfork_parent;
1028 inf->vfork_parent->vfork_child = nullptr;
1029 inf->vfork_parent = nullptr;
1030
1031 /* If the user wanted to detach from the parent, now is the
1032 time. */
1033 if (vfork_parent->pending_detach)
1034 {
1035 struct program_space *pspace;
1036
1037 /* follow-fork child, detach-on-fork on. */
1038
1039 vfork_parent->pending_detach = false;
1040
1041 scoped_restore_current_pspace_and_thread restore_thread;
1042
1043 /* We're letting loose of the parent. */
1044 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
1045 switch_to_thread (tp);
1046
1047 /* We're about to detach from the parent, which implicitly
1048 removes breakpoints from its address space. There's a
1049 catch here: we want to reuse the spaces for the child,
1050 but, parent/child are still sharing the pspace at this
1051 point, although the exec in reality makes the kernel give
1052 the child a fresh set of new pages. The problem here is
1053 that the breakpoints module being unaware of this, would
1054 likely chose the child process to write to the parent
1055 address space. Swapping the child temporarily away from
1056 the spaces has the desired effect. Yes, this is "sort
1057 of" a hack. */
1058
1059 pspace = inf->pspace;
1060 inf->pspace = nullptr;
1061 address_space_ref_ptr aspace = std::move (inf->aspace);
1062
1063 if (print_inferior_events)
1064 {
1065 std::string pidstr
1066 = target_pid_to_str (ptid_t (vfork_parent->pid));
1067
1068 target_terminal::ours_for_output ();
1069
1070 if (exec)
1071 {
1072 gdb_printf (_("[Detaching vfork parent %s "
1073 "after child exec]\n"), pidstr.c_str ());
1074 }
1075 else
1076 {
1077 gdb_printf (_("[Detaching vfork parent %s "
1078 "after child exit]\n"), pidstr.c_str ());
1079 }
1080 }
1081
1082 target_detach (vfork_parent, 0);
1083
1084 /* Put it back. */
1085 inf->pspace = pspace;
1086 inf->aspace = aspace;
1087 }
1088 else if (exec)
1089 {
1090 /* We're staying attached to the parent, so, really give the
1091 child a new address space. */
1092 inf->pspace = new program_space (maybe_new_address_space ());
1093 inf->aspace = inf->pspace->aspace;
1094 inf->removable = true;
1095 set_current_program_space (inf->pspace);
1096
1097 resume_parent = vfork_parent;
1098 }
1099 else
1100 {
1101 /* If this is a vfork child exiting, then the pspace and
1102 aspaces were shared with the parent. Since we're
1103 reporting the process exit, we'll be mourning all that is
1104 found in the address space, and switching to null_ptid,
1105 preparing to start a new inferior. But, since we don't
1106 want to clobber the parent's address/program spaces, we
1107 go ahead and create a new one for this exiting
1108 inferior. */
1109
1110 scoped_restore_current_thread restore_thread;
1111
1112 /* Temporarily switch to the vfork parent, to facilitate ptrace
1113 calls done during maybe_new_address_space. */
1114 switch_to_thread (any_live_thread_of_inferior (vfork_parent));
1115 address_space_ref_ptr aspace = maybe_new_address_space ();
1116
1117 /* Switch back to the vfork child inferior. Switch to no-thread
1118 while running clone_program_space, so that clone_program_space
1119 doesn't want to read the selected frame of a dead process. */
1120 switch_to_inferior_no_thread (inf);
1121
1122 inf->pspace = new program_space (std::move (aspace));
1123 inf->aspace = inf->pspace->aspace;
1124 set_current_program_space (inf->pspace);
1125 inf->removable = true;
1126 inf->symfile_flags = SYMFILE_NO_READ;
1127 clone_program_space (inf->pspace, vfork_parent->pspace);
1128
1129 resume_parent = vfork_parent;
1130 }
1131
1132 gdb_assert (current_program_space == inf->pspace);
1133
1134 if (non_stop && resume_parent != nullptr)
1135 {
1136 /* If the user wanted the parent to be running, let it go
1137 free now. */
1138 scoped_restore_current_thread restore_thread;
1139
1140 infrun_debug_printf ("resuming vfork parent process %d",
1141 resume_parent->pid);
1142
1143 for (thread_info *thread : resume_parent->threads ())
1144 proceed_after_vfork_done (thread);
1145 }
1146 }
1147 }
1148
1149 /* Handle TARGET_WAITKIND_VFORK_DONE. */
1150
1151 static void
1152 handle_vfork_done (thread_info *event_thread)
1153 {
1154 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1155
1156 /* We only care about this event if inferior::thread_waiting_for_vfork_done is
1157 set, that is if we are waiting for a vfork child not under our control
1158 (because we detached it) to exec or exit.
1159
1160 If an inferior has vforked and we are debugging the child, we don't use
1161 the vfork-done event to get notified about the end of the shared address
1162 space window. We rely instead on the child's exec or exit event, and the
1163 inferior::vfork_{parent,child} fields are used instead. See
1164 handle_vfork_child_exec_or_exit for that. */
1165 if (event_thread->inf->thread_waiting_for_vfork_done == nullptr)
1166 {
1167 infrun_debug_printf ("not waiting for a vfork-done event");
1168 return;
1169 }
1170
1171 /* We stopped all threads (other than the vforking thread) of the inferior in
1172 follow_fork and kept them stopped until now. It should therefore not be
1173 possible for another thread to have reported a vfork during that window.
1174 If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
1175 vfork-done we are handling right now. */
1176 gdb_assert (event_thread->inf->thread_waiting_for_vfork_done == event_thread);
1177
1178 event_thread->inf->thread_waiting_for_vfork_done = nullptr;
1179 event_thread->inf->pspace->breakpoints_not_allowed = 0;
1180
1181 /* On non-stop targets, we stopped all the inferior's threads in follow_fork,
1182 resume them now. On all-stop targets, everything that needs to be resumed
1183 will be when we resume the event thread. */
1184 if (target_is_non_stop_p ())
1185 {
1186 /* restart_threads and start_step_over may change the current thread, make
1187 sure we leave the event thread as the current thread. */
1188 scoped_restore_current_thread restore_thread;
1189
1190 insert_breakpoints ();
1191 start_step_over ();
1192
1193 if (!step_over_info_valid_p ())
1194 restart_threads (event_thread, event_thread->inf);
1195 }
1196 }
1197
1198 /* Enum strings for "set|show follow-exec-mode". */
1199
1200 static const char follow_exec_mode_new[] = "new";
1201 static const char follow_exec_mode_same[] = "same";
1202 static const char *const follow_exec_mode_names[] =
1203 {
1204 follow_exec_mode_new,
1205 follow_exec_mode_same,
1206 nullptr,
1207 };
1208
1209 static const char *follow_exec_mode_string = follow_exec_mode_same;
1210 static void
1211 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1212 struct cmd_list_element *c, const char *value)
1213 {
1214 gdb_printf (file, _("Follow exec mode is \"%s\".\n"), value);
1215 }
1216
1217 /* EXEC_FILE_TARGET is assumed to be non-NULL. */
1218
1219 static void
1220 follow_exec (ptid_t ptid, const char *exec_file_target)
1221 {
1222 int pid = ptid.pid ();
1223 ptid_t process_ptid;
1224
1225 /* Switch terminal for any messages produced e.g. by
1226 breakpoint_re_set. */
1227 target_terminal::ours_for_output ();
1228
1229 /* This is an exec event that we actually wish to pay attention to.
1230 Refresh our symbol table to the newly exec'd program, remove any
1231 momentary bp's, etc.
1232
1233 If there are breakpoints, they aren't really inserted now,
1234 since the exec() transformed our inferior into a fresh set
1235 of instructions.
1236
1237 We want to preserve symbolic breakpoints on the list, since
1238 we have hopes that they can be reset after the new a.out's
1239 symbol table is read.
1240
1241 However, any "raw" breakpoints must be removed from the list
1242 (e.g., the solib bp's), since their address is probably invalid
1243 now.
1244
1245 And, we DON'T want to call delete_breakpoints() here, since
1246 that may write the bp's "shadow contents" (the instruction
1247 value that was overwritten with a TRAP instruction). Since
1248 we now have a new a.out, those shadow contents aren't valid. */
1249
1250 mark_breakpoints_out (current_program_space);
1251
1252 /* The target reports the exec event to the main thread, even if
1253 some other thread does the exec, and even if the main thread was
1254 stopped or already gone. We may still have non-leader threads of
1255 the process on our list. E.g., on targets that don't have thread
1256 exit events (like remote) and nothing forces an update of the
1257 thread list up to here. When debugging remotely, it's best to
1258 avoid extra traffic, when possible, so avoid syncing the thread
1259 list with the target, and instead go ahead and delete all threads
1260 of the process but the one that reported the event. Note this must
1261 be done before calling update_breakpoints_after_exec, as
1262 otherwise clearing the threads' resources would reference stale
1263 thread breakpoints -- it may have been one of these threads that
1264 stepped across the exec. We could just clear their stepping
1265 states, but as long as we're iterating, might as well delete
1266 them. Deleting them now rather than at the next user-visible
1267 stop provides a nicer sequence of events for user and MI
1268 notifications. */
1269 for (thread_info *th : all_threads_safe ())
1270 if (th->ptid.pid () == pid && th->ptid != ptid)
1271 delete_thread (th);
1272
1273 /* We also need to clear any left over stale state for the
1274 leader/event thread. E.g., if there was any step-resume
1275 breakpoint or similar, it's gone now. We cannot truly
1276 step-to-next statement through an exec(). */
1277 thread_info *th = inferior_thread ();
1278 th->control.step_resume_breakpoint = nullptr;
1279 th->control.exception_resume_breakpoint = nullptr;
1280 th->control.single_step_breakpoints = nullptr;
1281 th->control.step_range_start = 0;
1282 th->control.step_range_end = 0;
1283
1284 /* The user may have had the main thread held stopped in the
1285 previous image (e.g., schedlock on, or non-stop). Release
1286 it now. */
1287 th->stop_requested = 0;
1288
1289 update_breakpoints_after_exec ();
1290
1291 /* What is this a.out's name? */
1292 process_ptid = ptid_t (pid);
1293 gdb_printf (_("%s is executing new program: %s\n"),
1294 target_pid_to_str (process_ptid).c_str (),
1295 exec_file_target);
1296
1297 /* We've followed the inferior through an exec. Therefore, the
1298 inferior has essentially been killed & reborn. */
1299
1300 breakpoint_init_inferior (current_inferior (), inf_execd);
1301
1302 gdb::unique_xmalloc_ptr<char> exec_file_host
1303 = exec_file_find (exec_file_target, nullptr);
1304
1305 /* If we were unable to map the executable target pathname onto a host
1306 pathname, tell the user that. Otherwise GDB's subsequent behavior
1307 is confusing. Maybe it would even be better to stop at this point
1308 so that the user can specify a file manually before continuing. */
1309 if (exec_file_host == nullptr)
1310 warning (_("Could not load symbols for executable %s.\n"
1311 "Do you need \"set sysroot\"?"),
1312 exec_file_target);
1313
1314 /* Reset the shared library package. This ensures that we get a
1315 shlib event when the child reaches "_start", at which point the
1316 dld will have had a chance to initialize the child. */
1317 /* Also, loading a symbol file below may trigger symbol lookups, and
1318 we don't want those to be satisfied by the libraries of the
1319 previous incarnation of this process. */
1320 no_shared_libraries (nullptr, 0);
1321
1322 inferior *execing_inferior = current_inferior ();
1323 inferior *following_inferior;
1324
1325 if (follow_exec_mode_string == follow_exec_mode_new)
1326 {
1327 /* The user wants to keep the old inferior and program spaces
1328 around. Create a new fresh one, and switch to it. */
1329
1330 /* Do exit processing for the original inferior before setting the new
1331 inferior's pid. Having two inferiors with the same pid would confuse
1332 find_inferior_p(t)id. Transfer the terminal state and info from the
1333 old to the new inferior. */
1334 following_inferior = add_inferior_with_spaces ();
1335
1336 swap_terminal_info (following_inferior, execing_inferior);
1337 exit_inferior (execing_inferior);
1338
1339 following_inferior->pid = pid;
1340 }
1341 else
1342 {
1343 /* follow-exec-mode is "same", we continue execution in the execing
1344 inferior. */
1345 following_inferior = execing_inferior;
1346
1347 /* The old description may no longer be fit for the new image.
1348 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1349 old description; we'll read a new one below. No need to do
1350 this on "follow-exec-mode new", as the old inferior stays
1351 around (its description is later cleared/refetched on
1352 restart). */
1353 target_clear_description ();
1354 }
1355
1356 target_follow_exec (following_inferior, ptid, exec_file_target);
1357
1358 gdb_assert (current_inferior () == following_inferior);
1359 gdb_assert (current_program_space == following_inferior->pspace);
1360
1361 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1362 because the proper displacement for a PIE (Position Independent
1363 Executable) main symbol file will only be computed by
1364 solib_create_inferior_hook below. breakpoint_re_set would fail
1365 to insert the breakpoints with the zero displacement. */
1366 try_open_exec_file (exec_file_host.get (), following_inferior,
1367 SYMFILE_DEFER_BP_RESET);
1368
1369 /* If the target can specify a description, read it. Must do this
1370 after flipping to the new executable (because the target supplied
1371 description must be compatible with the executable's
1372 architecture, and the old executable may e.g., be 32-bit, while
1373 the new one 64-bit), and before anything involving memory or
1374 registers. */
1375 target_find_description ();
1376
1377 gdb::observers::inferior_execd.notify (execing_inferior, following_inferior);
1378
1379 breakpoint_re_set ();
1380
1381 /* Reinsert all breakpoints. (Those which were symbolic have
1382 been reset to the proper address in the new a.out, thanks
1383 to symbol_file_command...). */
1384 insert_breakpoints ();
1385
1386 /* The next resume of this inferior should bring it to the shlib
1387 startup breakpoints. (If the user had also set bp's on
1388 "main" from the old (parent) process, then they'll auto-
1389 matically get reset there in the new process.). */
1390 }
1391
1392 /* The chain of threads that need to do a step-over operation to get
1393 past e.g., a breakpoint. What technique is used to step over the
1394 breakpoint/watchpoint does not matter -- all threads end up in the
1395 same queue, to maintain rough temporal order of execution, in order
1396 to avoid starvation, otherwise, we could e.g., find ourselves
1397 constantly stepping the same couple threads past their breakpoints
1398 over and over, if the single-step finish fast enough. */
1399 thread_step_over_list global_thread_step_over_list;
1400
1401 /* Bit flags indicating what the thread needs to step over. */
1402
1403 enum step_over_what_flag
1404 {
1405 /* Step over a breakpoint. */
1406 STEP_OVER_BREAKPOINT = 1,
1407
1408 /* Step past a non-continuable watchpoint, in order to let the
1409 instruction execute so we can evaluate the watchpoint
1410 expression. */
1411 STEP_OVER_WATCHPOINT = 2
1412 };
1413 DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
1414
1415 /* Info about an instruction that is being stepped over. */
1416
1417 struct step_over_info
1418 {
1419 /* If we're stepping past a breakpoint, this is the address space
1420 and address of the instruction the breakpoint is set at. We'll
1421 skip inserting all breakpoints here. Valid iff ASPACE is
1422 non-NULL. */
1423 const address_space *aspace = nullptr;
1424 CORE_ADDR address = 0;
1425
1426 /* The instruction being stepped over triggers a nonsteppable
1427 watchpoint. If true, we'll skip inserting watchpoints. */
1428 int nonsteppable_watchpoint_p = 0;
1429
1430 /* The thread's global number. */
1431 int thread = -1;
1432 };
1433
1434 /* The step-over info of the location that is being stepped over.
1435
1436 Note that with async/breakpoint always-inserted mode, a user might
1437 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1438 being stepped over. As setting a new breakpoint inserts all
1439 breakpoints, we need to make sure the breakpoint being stepped over
1440 isn't inserted then. We do that by only clearing the step-over
1441 info when the step-over is actually finished (or aborted).
1442
1443 Presently GDB can only step over one breakpoint at any given time.
1444 Given threads that can't run code in the same address space as the
1445 breakpoint's can't really miss the breakpoint, GDB could be taught
1446 to step-over at most one breakpoint per address space (so this info
1447 could move to the address space object if/when GDB is extended).
1448 The set of breakpoints being stepped over will normally be much
1449 smaller than the set of all breakpoints, so a flag in the
1450 breakpoint location structure would be wasteful. A separate list
1451 also saves complexity and run-time, as otherwise we'd have to go
1452 through all breakpoint locations clearing their flag whenever we
1453 start a new sequence. Similar considerations weigh against storing
1454 this info in the thread object. Plus, not all step overs actually
1455 have breakpoint locations -- e.g., stepping past a single-step
1456 breakpoint, or stepping to complete a non-continuable
1457 watchpoint. */
1458 static struct step_over_info step_over_info;
1459
1460 /* Record the address of the breakpoint/instruction we're currently
1461 stepping over.
1462 N.B. We record the aspace and address now, instead of say just the thread,
1463 because when we need the info later the thread may be running. */
1464
1465 static void
1466 set_step_over_info (const address_space *aspace, CORE_ADDR address,
1467 int nonsteppable_watchpoint_p,
1468 int thread)
1469 {
1470 step_over_info.aspace = aspace;
1471 step_over_info.address = address;
1472 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1473 step_over_info.thread = thread;
1474 }
1475
1476 /* Called when we're not longer stepping over a breakpoint / an
1477 instruction, so all breakpoints are free to be (re)inserted. */
1478
1479 static void
1480 clear_step_over_info (void)
1481 {
1482 infrun_debug_printf ("clearing step over info");
1483 step_over_info.aspace = nullptr;
1484 step_over_info.address = 0;
1485 step_over_info.nonsteppable_watchpoint_p = 0;
1486 step_over_info.thread = -1;
1487 }
1488
1489 /* See infrun.h. */
1490
1491 int
1492 stepping_past_instruction_at (struct address_space *aspace,
1493 CORE_ADDR address)
1494 {
1495 return (step_over_info.aspace != nullptr
1496 && breakpoint_address_match (aspace, address,
1497 step_over_info.aspace,
1498 step_over_info.address));
1499 }
1500
1501 /* See infrun.h. */
1502
1503 int
1504 thread_is_stepping_over_breakpoint (int thread)
1505 {
1506 return (step_over_info.thread != -1
1507 && thread == step_over_info.thread);
1508 }
1509
1510 /* See infrun.h. */
1511
1512 int
1513 stepping_past_nonsteppable_watchpoint (void)
1514 {
1515 return step_over_info.nonsteppable_watchpoint_p;
1516 }
1517
1518 /* Returns true if step-over info is valid. */
1519
1520 static bool
1521 step_over_info_valid_p (void)
1522 {
1523 return (step_over_info.aspace != nullptr
1524 || stepping_past_nonsteppable_watchpoint ());
1525 }
1526
1527 \f
1528 /* Displaced stepping. */
1529
1530 /* In non-stop debugging mode, we must take special care to manage
1531 breakpoints properly; in particular, the traditional strategy for
1532 stepping a thread past a breakpoint it has hit is unsuitable.
1533 'Displaced stepping' is a tactic for stepping one thread past a
1534 breakpoint it has hit while ensuring that other threads running
1535 concurrently will hit the breakpoint as they should.
1536
1537 The traditional way to step a thread T off a breakpoint in a
1538 multi-threaded program in all-stop mode is as follows:
1539
1540 a0) Initially, all threads are stopped, and breakpoints are not
1541 inserted.
1542 a1) We single-step T, leaving breakpoints uninserted.
1543 a2) We insert breakpoints, and resume all threads.
1544
1545 In non-stop debugging, however, this strategy is unsuitable: we
1546 don't want to have to stop all threads in the system in order to
1547 continue or step T past a breakpoint. Instead, we use displaced
1548 stepping:
1549
1550 n0) Initially, T is stopped, other threads are running, and
1551 breakpoints are inserted.
1552 n1) We copy the instruction "under" the breakpoint to a separate
1553 location, outside the main code stream, making any adjustments
1554 to the instruction, register, and memory state as directed by
1555 T's architecture.
1556 n2) We single-step T over the instruction at its new location.
1557 n3) We adjust the resulting register and memory state as directed
1558 by T's architecture. This includes resetting T's PC to point
1559 back into the main instruction stream.
1560 n4) We resume T.
1561
1562 This approach depends on the following gdbarch methods:
1563
1564 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1565 indicate where to copy the instruction, and how much space must
1566 be reserved there. We use these in step n1.
1567
1568 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1569 address, and makes any necessary adjustments to the instruction,
1570 register contents, and memory. We use this in step n1.
1571
1572 - gdbarch_displaced_step_fixup adjusts registers and memory after
1573 we have successfully single-stepped the instruction, to yield the
1574 same effect the instruction would have had if we had executed it
1575 at its original address. We use this in step n3.
1576
1577 The gdbarch_displaced_step_copy_insn and
1578 gdbarch_displaced_step_fixup functions must be written so that
1579 copying an instruction with gdbarch_displaced_step_copy_insn,
1580 single-stepping across the copied instruction, and then applying
1581 gdbarch_displaced_insn_fixup should have the same effects on the
1582 thread's memory and registers as stepping the instruction in place
1583 would have. Exactly which responsibilities fall to the copy and
1584 which fall to the fixup is up to the author of those functions.
1585
1586 See the comments in gdbarch.sh for details.
1587
1588 Note that displaced stepping and software single-step cannot
1589 currently be used in combination, although with some care I think
1590 they could be made to. Software single-step works by placing
1591 breakpoints on all possible subsequent instructions; if the
1592 displaced instruction is a PC-relative jump, those breakpoints
1593 could fall in very strange places --- on pages that aren't
1594 executable, or at addresses that are not proper instruction
1595 boundaries. (We do generally let other threads run while we wait
1596 to hit the software single-step breakpoint, and they might
1597 encounter such a corrupted instruction.) One way to work around
1598 this would be to have gdbarch_displaced_step_copy_insn fully
1599 simulate the effect of PC-relative instructions (and return NULL)
1600 on architectures that use software single-stepping.
1601
1602 In non-stop mode, we can have independent and simultaneous step
1603 requests, so more than one thread may need to simultaneously step
1604 over a breakpoint. The current implementation assumes there is
1605 only one scratch space per process. In this case, we have to
1606 serialize access to the scratch space. If thread A wants to step
1607 over a breakpoint, but we are currently waiting for some other
1608 thread to complete a displaced step, we leave thread A stopped and
1609 place it in the displaced_step_request_queue. Whenever a displaced
1610 step finishes, we pick the next thread in the queue and start a new
1611 displaced step operation on it. See displaced_step_prepare and
1612 displaced_step_finish for details. */
1613
1614 /* Return true if THREAD is doing a displaced step. */
1615
1616 static bool
1617 displaced_step_in_progress_thread (thread_info *thread)
1618 {
1619 gdb_assert (thread != nullptr);
1620
1621 return thread->displaced_step_state.in_progress ();
1622 }
1623
1624 /* Return true if INF has a thread doing a displaced step. */
1625
1626 static bool
1627 displaced_step_in_progress (inferior *inf)
1628 {
1629 return inf->displaced_step_state.in_progress_count > 0;
1630 }
1631
1632 /* Return true if any thread is doing a displaced step. */
1633
1634 static bool
1635 displaced_step_in_progress_any_thread ()
1636 {
1637 for (inferior *inf : all_non_exited_inferiors ())
1638 {
1639 if (displaced_step_in_progress (inf))
1640 return true;
1641 }
1642
1643 return false;
1644 }
1645
1646 static void
1647 infrun_inferior_exit (struct inferior *inf)
1648 {
1649 inf->displaced_step_state.reset ();
1650 inf->thread_waiting_for_vfork_done = nullptr;
1651 }
1652
1653 static void
1654 infrun_inferior_execd (inferior *exec_inf, inferior *follow_inf)
1655 {
1656 /* If some threads where was doing a displaced step in this inferior at the
1657 moment of the exec, they no longer exist. Even if the exec'ing thread
1658 doing a displaced step, we don't want to to any fixup nor restore displaced
1659 stepping buffer bytes. */
1660 follow_inf->displaced_step_state.reset ();
1661
1662 for (thread_info *thread : follow_inf->threads ())
1663 thread->displaced_step_state.reset ();
1664
1665 /* Since an in-line step is done with everything else stopped, if there was
1666 one in progress at the time of the exec, it must have been the exec'ing
1667 thread. */
1668 clear_step_over_info ();
1669
1670 follow_inf->thread_waiting_for_vfork_done = nullptr;
1671 }
1672
1673 /* If ON, and the architecture supports it, GDB will use displaced
1674 stepping to step over breakpoints. If OFF, or if the architecture
1675 doesn't support it, GDB will instead use the traditional
1676 hold-and-step approach. If AUTO (which is the default), GDB will
1677 decide which technique to use to step over breakpoints depending on
1678 whether the target works in a non-stop way (see use_displaced_stepping). */
1679
1680 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1681
1682 static void
1683 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1684 struct cmd_list_element *c,
1685 const char *value)
1686 {
1687 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1688 gdb_printf (file,
1689 _("Debugger's willingness to use displaced stepping "
1690 "to step over breakpoints is %s (currently %s).\n"),
1691 value, target_is_non_stop_p () ? "on" : "off");
1692 else
1693 gdb_printf (file,
1694 _("Debugger's willingness to use displaced stepping "
1695 "to step over breakpoints is %s.\n"), value);
1696 }
1697
1698 /* Return true if the gdbarch implements the required methods to use
1699 displaced stepping. */
1700
1701 static bool
1702 gdbarch_supports_displaced_stepping (gdbarch *arch)
1703 {
1704 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1705 that if `prepare` is provided, so is `finish`. */
1706 return gdbarch_displaced_step_prepare_p (arch);
1707 }
1708
1709 /* Return non-zero if displaced stepping can/should be used to step
1710 over breakpoints of thread TP. */
1711
1712 static bool
1713 use_displaced_stepping (thread_info *tp)
1714 {
1715 /* If the user disabled it explicitly, don't use displaced stepping. */
1716 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1717 return false;
1718
1719 /* If "auto", only use displaced stepping if the target operates in a non-stop
1720 way. */
1721 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1722 && !target_is_non_stop_p ())
1723 return false;
1724
1725 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1726
1727 /* If the architecture doesn't implement displaced stepping, don't use
1728 it. */
1729 if (!gdbarch_supports_displaced_stepping (gdbarch))
1730 return false;
1731
1732 /* If recording, don't use displaced stepping. */
1733 if (find_record_target () != nullptr)
1734 return false;
1735
1736 /* If displaced stepping failed before for this inferior, don't bother trying
1737 again. */
1738 if (tp->inf->displaced_step_state.failed_before)
1739 return false;
1740
1741 return true;
1742 }
1743
1744 /* Simple function wrapper around displaced_step_thread_state::reset. */
1745
1746 static void
1747 displaced_step_reset (displaced_step_thread_state *displaced)
1748 {
1749 displaced->reset ();
1750 }
1751
1752 /* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1753 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1754
1755 using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
1756
1757 /* Prepare to single-step, using displaced stepping.
1758
1759 Note that we cannot use displaced stepping when we have a signal to
1760 deliver. If we have a signal to deliver and an instruction to step
1761 over, then after the step, there will be no indication from the
1762 target whether the thread entered a signal handler or ignored the
1763 signal and stepped over the instruction successfully --- both cases
1764 result in a simple SIGTRAP. In the first case we mustn't do a
1765 fixup, and in the second case we must --- but we can't tell which.
1766 Comments in the code for 'random signals' in handle_inferior_event
1767 explain how we handle this case instead.
1768
1769 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1770 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1771 if displaced stepping this thread got queued; or
1772 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1773 stepped. */
1774
1775 static displaced_step_prepare_status
1776 displaced_step_prepare_throw (thread_info *tp)
1777 {
1778 regcache *regcache = get_thread_regcache (tp);
1779 struct gdbarch *gdbarch = regcache->arch ();
1780 displaced_step_thread_state &disp_step_thread_state
1781 = tp->displaced_step_state;
1782
1783 /* We should never reach this function if the architecture does not
1784 support displaced stepping. */
1785 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
1786
1787 /* Nor if the thread isn't meant to step over a breakpoint. */
1788 gdb_assert (tp->control.trap_expected);
1789
1790 /* Disable range stepping while executing in the scratch pad. We
1791 want a single-step even if executing the displaced instruction in
1792 the scratch buffer lands within the stepping range (e.g., a
1793 jump/branch). */
1794 tp->control.may_range_step = 0;
1795
1796 /* We are about to start a displaced step for this thread. If one is already
1797 in progress, something's wrong. */
1798 gdb_assert (!disp_step_thread_state.in_progress ());
1799
1800 if (tp->inf->displaced_step_state.unavailable)
1801 {
1802 /* The gdbarch tells us it's not worth asking to try a prepare because
1803 it is likely that it will return unavailable, so don't bother asking. */
1804
1805 displaced_debug_printf ("deferring step of %s",
1806 tp->ptid.to_string ().c_str ());
1807
1808 global_thread_step_over_chain_enqueue (tp);
1809 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1810 }
1811
1812 displaced_debug_printf ("displaced-stepping %s now",
1813 tp->ptid.to_string ().c_str ());
1814
1815 scoped_restore_current_thread restore_thread;
1816
1817 switch_to_thread (tp);
1818
1819 CORE_ADDR original_pc = regcache_read_pc (regcache);
1820 CORE_ADDR displaced_pc;
1821
1822 /* Display the instruction we are going to displaced step. */
1823 if (debug_displaced)
1824 {
1825 string_file tmp_stream;
1826 int dislen = gdb_print_insn (gdbarch, original_pc, &tmp_stream,
1827 nullptr);
1828
1829 if (dislen > 0)
1830 {
1831 gdb::byte_vector insn_buf (dislen);
1832 read_memory (original_pc, insn_buf.data (), insn_buf.size ());
1833
1834 std::string insn_bytes = bytes_to_string (insn_buf);
1835
1836 displaced_debug_printf ("original insn %s: %s \t %s",
1837 paddress (gdbarch, original_pc),
1838 insn_bytes.c_str (),
1839 tmp_stream.string ().c_str ());
1840 }
1841 else
1842 displaced_debug_printf ("original insn %s: invalid length: %d",
1843 paddress (gdbarch, original_pc), dislen);
1844 }
1845
1846 displaced_step_prepare_status status
1847 = gdbarch_displaced_step_prepare (gdbarch, tp, displaced_pc);
1848
1849 if (status == DISPLACED_STEP_PREPARE_STATUS_CANT)
1850 {
1851 displaced_debug_printf ("failed to prepare (%s)",
1852 tp->ptid.to_string ().c_str ());
1853
1854 return DISPLACED_STEP_PREPARE_STATUS_CANT;
1855 }
1856 else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
1857 {
1858 /* Not enough displaced stepping resources available, defer this
1859 request by placing it the queue. */
1860
1861 displaced_debug_printf ("not enough resources available, "
1862 "deferring step of %s",
1863 tp->ptid.to_string ().c_str ());
1864
1865 global_thread_step_over_chain_enqueue (tp);
1866
1867 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1868 }
1869
1870 gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK);
1871
1872 /* Save the information we need to fix things up if the step
1873 succeeds. */
1874 disp_step_thread_state.set (gdbarch);
1875
1876 tp->inf->displaced_step_state.in_progress_count++;
1877
1878 displaced_debug_printf ("prepared successfully thread=%s, "
1879 "original_pc=%s, displaced_pc=%s",
1880 tp->ptid.to_string ().c_str (),
1881 paddress (gdbarch, original_pc),
1882 paddress (gdbarch, displaced_pc));
1883
1884 /* Display the new displaced instruction(s). */
1885 if (debug_displaced)
1886 {
1887 string_file tmp_stream;
1888 CORE_ADDR addr = displaced_pc;
1889
1890 /* If displaced stepping is going to use h/w single step then we know
1891 that the replacement instruction can only be a single instruction,
1892 in that case set the end address at the next byte.
1893
1894 Otherwise the displaced stepping copy instruction routine could
1895 have generated multiple instructions, and all we know is that they
1896 must fit within the LEN bytes of the buffer. */
1897 CORE_ADDR end
1898 = addr + (gdbarch_displaced_step_hw_singlestep (gdbarch)
1899 ? 1 : gdbarch_displaced_step_buffer_length (gdbarch));
1900
1901 while (addr < end)
1902 {
1903 int dislen = gdb_print_insn (gdbarch, addr, &tmp_stream, nullptr);
1904 if (dislen <= 0)
1905 {
1906 displaced_debug_printf
1907 ("replacement insn %s: invalid length: %d",
1908 paddress (gdbarch, addr), dislen);
1909 break;
1910 }
1911
1912 gdb::byte_vector insn_buf (dislen);
1913 read_memory (addr, insn_buf.data (), insn_buf.size ());
1914
1915 std::string insn_bytes = bytes_to_string (insn_buf);
1916 std::string insn_str = tmp_stream.release ();
1917 displaced_debug_printf ("replacement insn %s: %s \t %s",
1918 paddress (gdbarch, addr),
1919 insn_bytes.c_str (),
1920 insn_str.c_str ());
1921 addr += dislen;
1922 }
1923 }
1924
1925 return DISPLACED_STEP_PREPARE_STATUS_OK;
1926 }
1927
1928 /* Wrapper for displaced_step_prepare_throw that disabled further
1929 attempts at displaced stepping if we get a memory error. */
1930
1931 static displaced_step_prepare_status
1932 displaced_step_prepare (thread_info *thread)
1933 {
1934 displaced_step_prepare_status status
1935 = DISPLACED_STEP_PREPARE_STATUS_CANT;
1936
1937 try
1938 {
1939 status = displaced_step_prepare_throw (thread);
1940 }
1941 catch (const gdb_exception_error &ex)
1942 {
1943 if (ex.error != MEMORY_ERROR
1944 && ex.error != NOT_SUPPORTED_ERROR)
1945 throw;
1946
1947 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1948 ex.what ());
1949
1950 /* Be verbose if "set displaced-stepping" is "on", silent if
1951 "auto". */
1952 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1953 {
1954 warning (_("disabling displaced stepping: %s"),
1955 ex.what ());
1956 }
1957
1958 /* Disable further displaced stepping attempts. */
1959 thread->inf->displaced_step_state.failed_before = 1;
1960 }
1961
1962 return status;
1963 }
1964
1965 /* True if any thread of TARGET that matches RESUME_PTID requires
1966 target_thread_events enabled. This assumes TARGET does not support
1967 target thread options. */
1968
1969 static bool
1970 any_thread_needs_target_thread_events (process_stratum_target *target,
1971 ptid_t resume_ptid)
1972 {
1973 for (thread_info *tp : all_non_exited_threads (target, resume_ptid))
1974 if (displaced_step_in_progress_thread (tp)
1975 || schedlock_applies (tp)
1976 || tp->thread_fsm () != nullptr)
1977 return true;
1978 return false;
1979 }
1980
1981 /* Maybe disable thread-{cloned,created,exited} event reporting after
1982 a step-over (either in-line or displaced) finishes. */
1983
1984 static void
1985 update_thread_events_after_step_over (thread_info *event_thread,
1986 const target_waitstatus &event_status)
1987 {
1988 if (schedlock_applies (event_thread))
1989 {
1990 /* If scheduler-locking applies, continue reporting
1991 thread-created/thread-cloned events. */
1992 return;
1993 }
1994 else if (target_supports_set_thread_options (0))
1995 {
1996 /* We can control per-thread options. Disable events for the
1997 event thread, unless the thread is gone. */
1998 if (event_status.kind () != TARGET_WAITKIND_THREAD_EXITED)
1999 event_thread->set_thread_options (0);
2000 }
2001 else
2002 {
2003 /* We can only control the target-wide target_thread_events
2004 setting. Disable it, but only if other threads in the target
2005 don't need it enabled. */
2006 process_stratum_target *target = event_thread->inf->process_target ();
2007 if (!any_thread_needs_target_thread_events (target, minus_one_ptid))
2008 target_thread_events (false);
2009 }
2010 }
2011
2012 /* If we displaced stepped an instruction successfully, adjust registers and
2013 memory to yield the same effect the instruction would have had if we had
2014 executed it at its original address, and return
2015 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
2016 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
2017
2018 If the thread wasn't displaced stepping, return
2019 DISPLACED_STEP_FINISH_STATUS_OK as well. */
2020
2021 static displaced_step_finish_status
2022 displaced_step_finish (thread_info *event_thread,
2023 const target_waitstatus &event_status)
2024 {
2025 /* Check whether the parent is displaced stepping. */
2026 inferior *parent_inf = event_thread->inf;
2027
2028 /* If this was a fork/vfork/clone, this event indicates that the
2029 displaced stepping of the syscall instruction has been done, so
2030 we perform cleanup for parent here. Also note that this
2031 operation also cleans up the child for vfork, because their pages
2032 are shared. */
2033
2034 /* If this is a fork (child gets its own address space copy) and
2035 some displaced step buffers were in use at the time of the fork,
2036 restore the displaced step buffer bytes in the child process.
2037
2038 Architectures which support displaced stepping and fork events
2039 must supply an implementation of
2040 gdbarch_displaced_step_restore_all_in_ptid. This is not enforced
2041 during gdbarch validation to support architectures which support
2042 displaced stepping but not forks. */
2043 if (event_status.kind () == TARGET_WAITKIND_FORKED)
2044 {
2045 struct regcache *parent_regcache = get_thread_regcache (event_thread);
2046 struct gdbarch *gdbarch = parent_regcache->arch ();
2047
2048 if (gdbarch_supports_displaced_stepping (gdbarch))
2049 gdbarch_displaced_step_restore_all_in_ptid
2050 (gdbarch, parent_inf, event_status.child_ptid ());
2051 }
2052
2053 displaced_step_thread_state *displaced = &event_thread->displaced_step_state;
2054
2055 /* Was this thread performing a displaced step? */
2056 if (!displaced->in_progress ())
2057 return DISPLACED_STEP_FINISH_STATUS_OK;
2058
2059 update_thread_events_after_step_over (event_thread, event_status);
2060
2061 gdb_assert (event_thread->inf->displaced_step_state.in_progress_count > 0);
2062 event_thread->inf->displaced_step_state.in_progress_count--;
2063
2064 /* Fixup may need to read memory/registers. Switch to the thread
2065 that we're fixing up. Also, target_stopped_by_watchpoint checks
2066 the current thread, and displaced_step_restore performs ptid-dependent
2067 memory accesses using current_inferior(). */
2068 switch_to_thread (event_thread);
2069
2070 displaced_step_reset_cleanup cleanup (displaced);
2071
2072 /* Do the fixup, and release the resources acquired to do the displaced
2073 step. */
2074 displaced_step_finish_status status
2075 = gdbarch_displaced_step_finish (displaced->get_original_gdbarch (),
2076 event_thread, event_status);
2077
2078 if (event_status.kind () == TARGET_WAITKIND_FORKED
2079 || event_status.kind () == TARGET_WAITKIND_VFORKED
2080 || event_status.kind () == TARGET_WAITKIND_THREAD_CLONED)
2081 {
2082 /* Since the vfork/fork/clone syscall instruction was executed
2083 in the scratchpad, the child's PC is also within the
2084 scratchpad. Set the child's PC to the parent's PC value,
2085 which has already been fixed up. Note: we use the parent's
2086 aspace here, although we're touching the child, because the
2087 child hasn't been added to the inferior list yet at this
2088 point. */
2089
2090 struct regcache *parent_regcache = get_thread_regcache (event_thread);
2091 struct gdbarch *gdbarch = parent_regcache->arch ();
2092 struct regcache *child_regcache
2093 = get_thread_arch_regcache (parent_inf, event_status.child_ptid (),
2094 gdbarch);
2095 /* Read PC value of parent. */
2096 CORE_ADDR parent_pc = regcache_read_pc (parent_regcache);
2097
2098 displaced_debug_printf ("write child pc from %s to %s",
2099 paddress (gdbarch,
2100 regcache_read_pc (child_regcache)),
2101 paddress (gdbarch, parent_pc));
2102
2103 regcache_write_pc (child_regcache, parent_pc);
2104 }
2105
2106 return status;
2107 }
2108
2109 /* Data to be passed around while handling an event. This data is
2110 discarded between events. */
2111 struct execution_control_state
2112 {
2113 explicit execution_control_state (thread_info *thr = nullptr)
2114 : ptid (thr == nullptr ? null_ptid : thr->ptid),
2115 event_thread (thr)
2116 {
2117 }
2118
2119 process_stratum_target *target = nullptr;
2120 ptid_t ptid;
2121 /* The thread that got the event, if this was a thread event; NULL
2122 otherwise. */
2123 struct thread_info *event_thread;
2124
2125 struct target_waitstatus ws;
2126 int stop_func_filled_in = 0;
2127 CORE_ADDR stop_func_alt_start = 0;
2128 CORE_ADDR stop_func_start = 0;
2129 CORE_ADDR stop_func_end = 0;
2130 const char *stop_func_name = nullptr;
2131 int wait_some_more = 0;
2132
2133 /* True if the event thread hit the single-step breakpoint of
2134 another thread. Thus the event doesn't cause a stop, the thread
2135 needs to be single-stepped past the single-step breakpoint before
2136 we can switch back to the original stepping thread. */
2137 int hit_singlestep_breakpoint = 0;
2138 };
2139
2140 static void keep_going_pass_signal (struct execution_control_state *ecs);
2141 static void prepare_to_wait (struct execution_control_state *ecs);
2142 static bool keep_going_stepped_thread (struct thread_info *tp);
2143 static step_over_what thread_still_needs_step_over (struct thread_info *tp);
2144
2145 /* Are there any pending step-over requests? If so, run all we can
2146 now and return true. Otherwise, return false. */
2147
2148 static bool
2149 start_step_over (void)
2150 {
2151 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
2152
2153 /* Don't start a new step-over if we already have an in-line
2154 step-over operation ongoing. */
2155 if (step_over_info_valid_p ())
2156 return false;
2157
2158 /* Steal the global thread step over chain. As we try to initiate displaced
2159 steps, threads will be enqueued in the global chain if no buffers are
2160 available. If we iterated on the global chain directly, we might iterate
2161 indefinitely. */
2162 thread_step_over_list threads_to_step
2163 = std::move (global_thread_step_over_list);
2164
2165 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
2166 thread_step_over_chain_length (threads_to_step));
2167
2168 bool started = false;
2169
2170 /* On scope exit (whatever the reason, return or exception), if there are
2171 threads left in the THREADS_TO_STEP chain, put back these threads in the
2172 global list. */
2173 SCOPE_EXIT
2174 {
2175 if (threads_to_step.empty ())
2176 infrun_debug_printf ("step-over queue now empty");
2177 else
2178 {
2179 infrun_debug_printf ("putting back %d threads to step in global queue",
2180 thread_step_over_chain_length (threads_to_step));
2181
2182 global_thread_step_over_chain_enqueue_chain
2183 (std::move (threads_to_step));
2184 }
2185 };
2186
2187 thread_step_over_list_safe_range range
2188 = make_thread_step_over_list_safe_range (threads_to_step);
2189
2190 for (thread_info *tp : range)
2191 {
2192 step_over_what step_what;
2193 int must_be_in_line;
2194
2195 gdb_assert (!tp->stop_requested);
2196
2197 if (tp->inf->displaced_step_state.unavailable)
2198 {
2199 /* The arch told us to not even try preparing another displaced step
2200 for this inferior. Just leave the thread in THREADS_TO_STEP, it
2201 will get moved to the global chain on scope exit. */
2202 continue;
2203 }
2204
2205 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
2206 {
2207 /* When we stop all threads, handling a vfork, any thread in the step
2208 over chain remains there. A user could also try to continue a
2209 thread stopped at a breakpoint while another thread is waiting for
2210 a vfork-done event. In any case, we don't want to start a step
2211 over right now. */
2212 continue;
2213 }
2214
2215 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
2216 while we try to prepare the displaced step, we don't add it back to
2217 the global step over chain. This is to avoid a thread staying in the
2218 step over chain indefinitely if something goes wrong when resuming it
2219 If the error is intermittent and it still needs a step over, it will
2220 get enqueued again when we try to resume it normally. */
2221 threads_to_step.erase (threads_to_step.iterator_to (*tp));
2222
2223 step_what = thread_still_needs_step_over (tp);
2224 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
2225 || ((step_what & STEP_OVER_BREAKPOINT)
2226 && !use_displaced_stepping (tp)));
2227
2228 /* We currently stop all threads of all processes to step-over
2229 in-line. If we need to start a new in-line step-over, let
2230 any pending displaced steps finish first. */
2231 if (must_be_in_line && displaced_step_in_progress_any_thread ())
2232 {
2233 global_thread_step_over_chain_enqueue (tp);
2234 continue;
2235 }
2236
2237 if (tp->control.trap_expected
2238 || tp->resumed ()
2239 || tp->executing ())
2240 {
2241 internal_error ("[%s] has inconsistent state: "
2242 "trap_expected=%d, resumed=%d, executing=%d\n",
2243 tp->ptid.to_string ().c_str (),
2244 tp->control.trap_expected,
2245 tp->resumed (),
2246 tp->executing ());
2247 }
2248
2249 infrun_debug_printf ("resuming [%s] for step-over",
2250 tp->ptid.to_string ().c_str ());
2251
2252 /* keep_going_pass_signal skips the step-over if the breakpoint
2253 is no longer inserted. In all-stop, we want to keep looking
2254 for a thread that needs a step-over instead of resuming TP,
2255 because we wouldn't be able to resume anything else until the
2256 target stops again. In non-stop, the resume always resumes
2257 only TP, so it's OK to let the thread resume freely. */
2258 if (!target_is_non_stop_p () && !step_what)
2259 continue;
2260
2261 switch_to_thread (tp);
2262 execution_control_state ecs (tp);
2263 keep_going_pass_signal (&ecs);
2264
2265 if (!ecs.wait_some_more)
2266 error (_("Command aborted."));
2267
2268 /* If the thread's step over could not be initiated because no buffers
2269 were available, it was re-added to the global step over chain. */
2270 if (tp->resumed ())
2271 {
2272 infrun_debug_printf ("[%s] was resumed.",
2273 tp->ptid.to_string ().c_str ());
2274 gdb_assert (!thread_is_in_step_over_chain (tp));
2275 }
2276 else
2277 {
2278 infrun_debug_printf ("[%s] was NOT resumed.",
2279 tp->ptid.to_string ().c_str ());
2280 gdb_assert (thread_is_in_step_over_chain (tp));
2281 }
2282
2283 /* If we started a new in-line step-over, we're done. */
2284 if (step_over_info_valid_p ())
2285 {
2286 gdb_assert (tp->control.trap_expected);
2287 started = true;
2288 break;
2289 }
2290
2291 if (!target_is_non_stop_p ())
2292 {
2293 /* On all-stop, shouldn't have resumed unless we needed a
2294 step over. */
2295 gdb_assert (tp->control.trap_expected
2296 || tp->step_after_step_resume_breakpoint);
2297
2298 /* With remote targets (at least), in all-stop, we can't
2299 issue any further remote commands until the program stops
2300 again. */
2301 started = true;
2302 break;
2303 }
2304
2305 /* Either the thread no longer needed a step-over, or a new
2306 displaced stepping sequence started. Even in the latter
2307 case, continue looking. Maybe we can also start another
2308 displaced step on a thread of other process. */
2309 }
2310
2311 return started;
2312 }
2313
2314 /* Update global variables holding ptids to hold NEW_PTID if they were
2315 holding OLD_PTID. */
2316 static void
2317 infrun_thread_ptid_changed (process_stratum_target *target,
2318 ptid_t old_ptid, ptid_t new_ptid)
2319 {
2320 if (inferior_ptid == old_ptid
2321 && current_inferior ()->process_target () == target)
2322 inferior_ptid = new_ptid;
2323 }
2324
2325 \f
2326
2327 static const char schedlock_off[] = "off";
2328 static const char schedlock_on[] = "on";
2329 static const char schedlock_step[] = "step";
2330 static const char schedlock_replay[] = "replay";
2331 static const char *const scheduler_enums[] = {
2332 schedlock_off,
2333 schedlock_on,
2334 schedlock_step,
2335 schedlock_replay,
2336 nullptr
2337 };
2338 static const char *scheduler_mode = schedlock_replay;
2339 static void
2340 show_scheduler_mode (struct ui_file *file, int from_tty,
2341 struct cmd_list_element *c, const char *value)
2342 {
2343 gdb_printf (file,
2344 _("Mode for locking scheduler "
2345 "during execution is \"%s\".\n"),
2346 value);
2347 }
2348
2349 static void
2350 set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
2351 {
2352 if (!target_can_lock_scheduler ())
2353 {
2354 scheduler_mode = schedlock_off;
2355 error (_("Target '%s' cannot support this command."),
2356 target_shortname ());
2357 }
2358 }
2359
2360 /* True if execution commands resume all threads of all processes by
2361 default; otherwise, resume only threads of the current inferior
2362 process. */
2363 bool sched_multi = false;
2364
2365 /* Try to setup for software single stepping. Return true if target_resume()
2366 should use hardware single step.
2367
2368 GDBARCH the current gdbarch. */
2369
2370 static bool
2371 maybe_software_singlestep (struct gdbarch *gdbarch)
2372 {
2373 bool hw_step = true;
2374
2375 if (execution_direction == EXEC_FORWARD
2376 && gdbarch_software_single_step_p (gdbarch))
2377 hw_step = !insert_single_step_breakpoints (gdbarch);
2378
2379 return hw_step;
2380 }
2381
2382 /* See infrun.h. */
2383
2384 ptid_t
2385 user_visible_resume_ptid (int step)
2386 {
2387 ptid_t resume_ptid;
2388
2389 if (non_stop)
2390 {
2391 /* With non-stop mode on, threads are always handled
2392 individually. */
2393 resume_ptid = inferior_ptid;
2394 }
2395 else if ((scheduler_mode == schedlock_on)
2396 || (scheduler_mode == schedlock_step && step))
2397 {
2398 /* User-settable 'scheduler' mode requires solo thread
2399 resume. */
2400 resume_ptid = inferior_ptid;
2401 }
2402 else if ((scheduler_mode == schedlock_replay)
2403 && target_record_will_replay (minus_one_ptid, execution_direction))
2404 {
2405 /* User-settable 'scheduler' mode requires solo thread resume in replay
2406 mode. */
2407 resume_ptid = inferior_ptid;
2408 }
2409 else if (!sched_multi && target_supports_multi_process ())
2410 {
2411 /* Resume all threads of the current process (and none of other
2412 processes). */
2413 resume_ptid = ptid_t (inferior_ptid.pid ());
2414 }
2415 else
2416 {
2417 /* Resume all threads of all processes. */
2418 resume_ptid = RESUME_ALL;
2419 }
2420
2421 return resume_ptid;
2422 }
2423
2424 /* See infrun.h. */
2425
2426 process_stratum_target *
2427 user_visible_resume_target (ptid_t resume_ptid)
2428 {
2429 return (resume_ptid == minus_one_ptid && sched_multi
2430 ? nullptr
2431 : current_inferior ()->process_target ());
2432 }
2433
2434 /* Find a thread from the inferiors that we'll resume that is waiting
2435 for a vfork-done event. */
2436
2437 static thread_info *
2438 find_thread_waiting_for_vfork_done ()
2439 {
2440 gdb_assert (!target_is_non_stop_p ());
2441
2442 if (sched_multi)
2443 {
2444 for (inferior *inf : all_non_exited_inferiors ())
2445 if (inf->thread_waiting_for_vfork_done != nullptr)
2446 return inf->thread_waiting_for_vfork_done;
2447 }
2448 else
2449 {
2450 inferior *cur_inf = current_inferior ();
2451 if (cur_inf->thread_waiting_for_vfork_done != nullptr)
2452 return cur_inf->thread_waiting_for_vfork_done;
2453 }
2454 return nullptr;
2455 }
2456
2457 /* Return a ptid representing the set of threads that we will resume,
2458 in the perspective of the target, assuming run control handling
2459 does not require leaving some threads stopped (e.g., stepping past
2460 breakpoint). USER_STEP indicates whether we're about to start the
2461 target for a stepping command. */
2462
2463 static ptid_t
2464 internal_resume_ptid (int user_step)
2465 {
2466 /* In non-stop, we always control threads individually. Note that
2467 the target may always work in non-stop mode even with "set
2468 non-stop off", in which case user_visible_resume_ptid could
2469 return a wildcard ptid. */
2470 if (target_is_non_stop_p ())
2471 return inferior_ptid;
2472
2473 /* The rest of the function assumes non-stop==off and
2474 target-non-stop==off.
2475
2476 If a thread is waiting for a vfork-done event, it means breakpoints are out
2477 for this inferior (well, program space in fact). We don't want to resume
2478 any thread other than the one waiting for vfork done, otherwise these other
2479 threads could miss breakpoints. So if a thread in the resumption set is
2480 waiting for a vfork-done event, resume only that thread.
2481
2482 The resumption set width depends on whether schedule-multiple is on or off.
2483
2484 Note that if the target_resume interface was more flexible, we could be
2485 smarter here when schedule-multiple is on. For example, imagine 3
2486 inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
2487 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
2488 target(s) to resume:
2489
2490 - All threads of inferior 1
2491 - Thread 2.1
2492 - Thread 3.2
2493
2494 Since we don't have that flexibility (we can only pass one ptid), just
2495 resume the first thread waiting for a vfork-done event we find (e.g. thread
2496 2.1). */
2497 thread_info *thr = find_thread_waiting_for_vfork_done ();
2498 if (thr != nullptr)
2499 {
2500 /* If we have a thread that is waiting for a vfork-done event,
2501 then we should have switched to it earlier. Calling
2502 target_resume with thread scope is only possible when the
2503 current thread matches the thread scope. */
2504 gdb_assert (thr->ptid == inferior_ptid);
2505 gdb_assert (thr->inf->process_target ()
2506 == inferior_thread ()->inf->process_target ());
2507 return thr->ptid;
2508 }
2509
2510 return user_visible_resume_ptid (user_step);
2511 }
2512
2513 /* Wrapper for target_resume, that handles infrun-specific
2514 bookkeeping. */
2515
2516 static void
2517 do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
2518 {
2519 struct thread_info *tp = inferior_thread ();
2520
2521 gdb_assert (!tp->stop_requested);
2522
2523 /* Install inferior's terminal modes. */
2524 target_terminal::inferior ();
2525
2526 /* Avoid confusing the next resume, if the next stop/resume
2527 happens to apply to another thread. */
2528 tp->set_stop_signal (GDB_SIGNAL_0);
2529
2530 /* Advise target which signals may be handled silently.
2531
2532 If we have removed breakpoints because we are stepping over one
2533 in-line (in any thread), we need to receive all signals to avoid
2534 accidentally skipping a breakpoint during execution of a signal
2535 handler.
2536
2537 Likewise if we're displaced stepping, otherwise a trap for a
2538 breakpoint in a signal handler might be confused with the
2539 displaced step finishing. We don't make the displaced_step_finish
2540 step distinguish the cases instead, because:
2541
2542 - a backtrace while stopped in the signal handler would show the
2543 scratch pad as frame older than the signal handler, instead of
2544 the real mainline code.
2545
2546 - when the thread is later resumed, the signal handler would
2547 return to the scratch pad area, which would no longer be
2548 valid. */
2549 if (step_over_info_valid_p ()
2550 || displaced_step_in_progress (tp->inf))
2551 target_pass_signals ({});
2552 else
2553 target_pass_signals (signal_pass);
2554
2555 /* Request that the target report thread-{created,cloned,exited}
2556 events in the following situations:
2557
2558 - If we are performing an in-line step-over-breakpoint, then we
2559 will remove a breakpoint from the target and only run the
2560 current thread. We don't want any new thread (spawned by the
2561 step) to start running, as it might miss the breakpoint. We
2562 need to clear the step-over state if the stepped thread exits,
2563 so we also enable thread-exit events.
2564
2565 - If we are stepping over a breakpoint out of line (displaced
2566 stepping) then we won't remove a breakpoint from the target,
2567 but, if the step spawns a new clone thread, then we will need
2568 to fixup the $pc address in the clone child too, so we need it
2569 to start stopped. We need to release the displaced stepping
2570 buffer if the stepped thread exits, so we also enable
2571 thread-exit events.
2572
2573 - If scheduler-locking applies, threads that the current thread
2574 spawns should remain halted. It's not strictly necessary to
2575 enable thread-exit events in this case, but it doesn't hurt.
2576 */
2577 if (step_over_info_valid_p ()
2578 || displaced_step_in_progress_thread (tp)
2579 || schedlock_applies (tp))
2580 {
2581 gdb_thread_options options
2582 = GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
2583 if (target_supports_set_thread_options (options))
2584 tp->set_thread_options (options);
2585 else
2586 target_thread_events (true);
2587 }
2588 else if (tp->thread_fsm () != nullptr)
2589 {
2590 gdb_thread_options options = GDB_THREAD_OPTION_EXIT;
2591 if (target_supports_set_thread_options (options))
2592 tp->set_thread_options (options);
2593 else
2594 target_thread_events (true);
2595 }
2596 else
2597 {
2598 if (target_supports_set_thread_options (0))
2599 tp->set_thread_options (0);
2600 else
2601 {
2602 process_stratum_target *resume_target = tp->inf->process_target ();
2603 if (!any_thread_needs_target_thread_events (resume_target,
2604 resume_ptid))
2605 target_thread_events (false);
2606 }
2607 }
2608
2609 /* If we're resuming more than one thread simultaneously, then any
2610 thread other than the leader is being set to run free. Clear any
2611 previous thread option for those threads. */
2612 if (resume_ptid != inferior_ptid && target_supports_set_thread_options (0))
2613 {
2614 process_stratum_target *resume_target = tp->inf->process_target ();
2615 for (thread_info *thr_iter : all_non_exited_threads (resume_target,
2616 resume_ptid))
2617 if (thr_iter != tp)
2618 thr_iter->set_thread_options (0);
2619 }
2620
2621 infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
2622 resume_ptid.to_string ().c_str (),
2623 step, gdb_signal_to_symbol_string (sig));
2624
2625 target_resume (resume_ptid, step, sig);
2626 }
2627
2628 /* Resume the inferior. SIG is the signal to give the inferior
2629 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2630 call 'resume', which handles exceptions. */
2631
2632 static void
2633 resume_1 (enum gdb_signal sig)
2634 {
2635 struct thread_info *tp = inferior_thread ();
2636 regcache *regcache = get_thread_regcache (tp);
2637 struct gdbarch *gdbarch = regcache->arch ();
2638 ptid_t resume_ptid;
2639 /* This represents the user's step vs continue request. When
2640 deciding whether "set scheduler-locking step" applies, it's the
2641 user's intention that counts. */
2642 const int user_step = tp->control.stepping_command;
2643 /* This represents what we'll actually request the target to do.
2644 This can decay from a step to a continue, if e.g., we need to
2645 implement single-stepping with breakpoints (software
2646 single-step). */
2647 bool step;
2648
2649 gdb_assert (!tp->stop_requested);
2650 gdb_assert (!thread_is_in_step_over_chain (tp));
2651
2652 if (tp->has_pending_waitstatus ())
2653 {
2654 infrun_debug_printf
2655 ("thread %s has pending wait "
2656 "status %s (currently_stepping=%d).",
2657 tp->ptid.to_string ().c_str (),
2658 tp->pending_waitstatus ().to_string ().c_str (),
2659 currently_stepping (tp));
2660
2661 tp->inf->process_target ()->threads_executing = true;
2662 tp->set_resumed (true);
2663
2664 /* FIXME: What should we do if we are supposed to resume this
2665 thread with a signal? Maybe we should maintain a queue of
2666 pending signals to deliver. */
2667 if (sig != GDB_SIGNAL_0)
2668 {
2669 warning (_("Couldn't deliver signal %s to %s."),
2670 gdb_signal_to_name (sig),
2671 tp->ptid.to_string ().c_str ());
2672 }
2673
2674 tp->set_stop_signal (GDB_SIGNAL_0);
2675
2676 if (target_can_async_p ())
2677 {
2678 target_async (true);
2679 /* Tell the event loop we have an event to process. */
2680 mark_async_event_handler (infrun_async_inferior_event_token);
2681 }
2682 return;
2683 }
2684
2685 tp->stepped_breakpoint = 0;
2686
2687 /* Depends on stepped_breakpoint. */
2688 step = currently_stepping (tp);
2689
2690 if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
2691 {
2692 /* Don't try to single-step a vfork parent that is waiting for
2693 the child to get out of the shared memory region (by exec'ing
2694 or exiting). This is particularly important on software
2695 single-step archs, as the child process would trip on the
2696 software single step breakpoint inserted for the parent
2697 process. Since the parent will not actually execute any
2698 instruction until the child is out of the shared region (such
2699 are vfork's semantics), it is safe to simply continue it.
2700 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2701 the parent, and tell it to `keep_going', which automatically
2702 re-sets it stepping. */
2703 infrun_debug_printf ("resume : clear step");
2704 step = false;
2705 }
2706
2707 CORE_ADDR pc = regcache_read_pc (regcache);
2708
2709 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2710 "current thread [%s] at %s",
2711 step, gdb_signal_to_symbol_string (sig),
2712 tp->control.trap_expected,
2713 inferior_ptid.to_string ().c_str (),
2714 paddress (gdbarch, pc));
2715
2716 const address_space *aspace = tp->inf->aspace.get ();
2717
2718 /* Normally, by the time we reach `resume', the breakpoints are either
2719 removed or inserted, as appropriate. The exception is if we're sitting
2720 at a permanent breakpoint; we need to step over it, but permanent
2721 breakpoints can't be removed. So we have to test for it here. */
2722 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2723 {
2724 if (sig != GDB_SIGNAL_0)
2725 {
2726 /* We have a signal to pass to the inferior. The resume
2727 may, or may not take us to the signal handler. If this
2728 is a step, we'll need to stop in the signal handler, if
2729 there's one, (if the target supports stepping into
2730 handlers), or in the next mainline instruction, if
2731 there's no handler. If this is a continue, we need to be
2732 sure to run the handler with all breakpoints inserted.
2733 In all cases, set a breakpoint at the current address
2734 (where the handler returns to), and once that breakpoint
2735 is hit, resume skipping the permanent breakpoint. If
2736 that breakpoint isn't hit, then we've stepped into the
2737 signal handler (or hit some other event). We'll delete
2738 the step-resume breakpoint then. */
2739
2740 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2741 "deliver signal first");
2742
2743 clear_step_over_info ();
2744 tp->control.trap_expected = 0;
2745
2746 if (tp->control.step_resume_breakpoint == nullptr)
2747 {
2748 /* Set a "high-priority" step-resume, as we don't want
2749 user breakpoints at PC to trigger (again) when this
2750 hits. */
2751 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2752 gdb_assert (tp->control.step_resume_breakpoint->first_loc ()
2753 .permanent);
2754
2755 tp->step_after_step_resume_breakpoint = step;
2756 }
2757
2758 insert_breakpoints ();
2759 }
2760 else
2761 {
2762 /* There's no signal to pass, we can go ahead and skip the
2763 permanent breakpoint manually. */
2764 infrun_debug_printf ("skipping permanent breakpoint");
2765 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2766 /* Update pc to reflect the new address from which we will
2767 execute instructions. */
2768 pc = regcache_read_pc (regcache);
2769
2770 if (step)
2771 {
2772 /* We've already advanced the PC, so the stepping part
2773 is done. Now we need to arrange for a trap to be
2774 reported to handle_inferior_event. Set a breakpoint
2775 at the current PC, and run to it. Don't update
2776 prev_pc, because if we end in
2777 switch_back_to_stepped_thread, we want the "expected
2778 thread advanced also" branch to be taken. IOW, we
2779 don't want this thread to step further from PC
2780 (overstep). */
2781 gdb_assert (!step_over_info_valid_p ());
2782 insert_single_step_breakpoint (gdbarch, aspace, pc);
2783 insert_breakpoints ();
2784
2785 resume_ptid = internal_resume_ptid (user_step);
2786 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
2787 tp->set_resumed (true);
2788 return;
2789 }
2790 }
2791 }
2792
2793 /* If we have a breakpoint to step over, make sure to do a single
2794 step only. Same if we have software watchpoints. */
2795 if (tp->control.trap_expected || bpstat_should_step ())
2796 tp->control.may_range_step = 0;
2797
2798 /* If displaced stepping is enabled, step over breakpoints by executing a
2799 copy of the instruction at a different address.
2800
2801 We can't use displaced stepping when we have a signal to deliver;
2802 the comments for displaced_step_prepare explain why. The
2803 comments in the handle_inferior event for dealing with 'random
2804 signals' explain what we do instead.
2805
2806 We can't use displaced stepping when we are waiting for vfork_done
2807 event, displaced stepping breaks the vfork child similarly as single
2808 step software breakpoint. */
2809 if (tp->control.trap_expected
2810 && use_displaced_stepping (tp)
2811 && !step_over_info_valid_p ()
2812 && sig == GDB_SIGNAL_0
2813 && current_inferior ()->thread_waiting_for_vfork_done == nullptr)
2814 {
2815 displaced_step_prepare_status prepare_status
2816 = displaced_step_prepare (tp);
2817
2818 if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
2819 {
2820 infrun_debug_printf ("Got placed in step-over queue");
2821
2822 tp->control.trap_expected = 0;
2823 return;
2824 }
2825 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_CANT)
2826 {
2827 /* Fallback to stepping over the breakpoint in-line. */
2828
2829 if (target_is_non_stop_p ())
2830 stop_all_threads ("displaced stepping falling back on inline stepping");
2831
2832 set_step_over_info (aspace, regcache_read_pc (regcache), 0,
2833 tp->global_num);
2834
2835 step = maybe_software_singlestep (gdbarch);
2836
2837 insert_breakpoints ();
2838 }
2839 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
2840 {
2841 /* Update pc to reflect the new address from which we will
2842 execute instructions due to displaced stepping. */
2843 pc = regcache_read_pc (get_thread_regcache (tp));
2844
2845 step = gdbarch_displaced_step_hw_singlestep (gdbarch);
2846 }
2847 else
2848 gdb_assert_not_reached ("Invalid displaced_step_prepare_status "
2849 "value.");
2850 }
2851
2852 /* Do we need to do it the hard way, w/temp breakpoints? */
2853 else if (step)
2854 step = maybe_software_singlestep (gdbarch);
2855
2856 /* Currently, our software single-step implementation leads to different
2857 results than hardware single-stepping in one situation: when stepping
2858 into delivering a signal which has an associated signal handler,
2859 hardware single-step will stop at the first instruction of the handler,
2860 while software single-step will simply skip execution of the handler.
2861
2862 For now, this difference in behavior is accepted since there is no
2863 easy way to actually implement single-stepping into a signal handler
2864 without kernel support.
2865
2866 However, there is one scenario where this difference leads to follow-on
2867 problems: if we're stepping off a breakpoint by removing all breakpoints
2868 and then single-stepping. In this case, the software single-step
2869 behavior means that even if there is a *breakpoint* in the signal
2870 handler, GDB still would not stop.
2871
2872 Fortunately, we can at least fix this particular issue. We detect
2873 here the case where we are about to deliver a signal while software
2874 single-stepping with breakpoints removed. In this situation, we
2875 revert the decisions to remove all breakpoints and insert single-
2876 step breakpoints, and instead we install a step-resume breakpoint
2877 at the current address, deliver the signal without stepping, and
2878 once we arrive back at the step-resume breakpoint, actually step
2879 over the breakpoint we originally wanted to step over. */
2880 if (thread_has_single_step_breakpoints_set (tp)
2881 && sig != GDB_SIGNAL_0
2882 && step_over_info_valid_p ())
2883 {
2884 /* If we have nested signals or a pending signal is delivered
2885 immediately after a handler returns, might already have
2886 a step-resume breakpoint set on the earlier handler. We cannot
2887 set another step-resume breakpoint; just continue on until the
2888 original breakpoint is hit. */
2889 if (tp->control.step_resume_breakpoint == nullptr)
2890 {
2891 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2892 tp->step_after_step_resume_breakpoint = 1;
2893 }
2894
2895 delete_single_step_breakpoints (tp);
2896
2897 clear_step_over_info ();
2898 tp->control.trap_expected = 0;
2899
2900 insert_breakpoints ();
2901 }
2902
2903 /* If STEP is set, it's a request to use hardware stepping
2904 facilities. But in that case, we should never
2905 use singlestep breakpoint. */
2906 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2907
2908 /* Decide the set of threads to ask the target to resume. */
2909 if (tp->control.trap_expected)
2910 {
2911 /* We're allowing a thread to run past a breakpoint it has
2912 hit, either by single-stepping the thread with the breakpoint
2913 removed, or by displaced stepping, with the breakpoint inserted.
2914 In the former case, we need to single-step only this thread,
2915 and keep others stopped, as they can miss this breakpoint if
2916 allowed to run. That's not really a problem for displaced
2917 stepping, but, we still keep other threads stopped, in case
2918 another thread is also stopped for a breakpoint waiting for
2919 its turn in the displaced stepping queue. */
2920 resume_ptid = inferior_ptid;
2921 }
2922 else
2923 resume_ptid = internal_resume_ptid (user_step);
2924
2925 if (execution_direction != EXEC_REVERSE
2926 && step && breakpoint_inserted_here_p (aspace, pc))
2927 {
2928 /* There are two cases where we currently need to step a
2929 breakpoint instruction when we have a signal to deliver:
2930
2931 - See handle_signal_stop where we handle random signals that
2932 could take out us out of the stepping range. Normally, in
2933 that case we end up continuing (instead of stepping) over the
2934 signal handler with a breakpoint at PC, but there are cases
2935 where we should _always_ single-step, even if we have a
2936 step-resume breakpoint, like when a software watchpoint is
2937 set. Assuming single-stepping and delivering a signal at the
2938 same time would takes us to the signal handler, then we could
2939 have removed the breakpoint at PC to step over it. However,
2940 some hardware step targets (like e.g., Mac OS) can't step
2941 into signal handlers, and for those, we need to leave the
2942 breakpoint at PC inserted, as otherwise if the handler
2943 recurses and executes PC again, it'll miss the breakpoint.
2944 So we leave the breakpoint inserted anyway, but we need to
2945 record that we tried to step a breakpoint instruction, so
2946 that adjust_pc_after_break doesn't end up confused.
2947
2948 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2949 in one thread after another thread that was stepping had been
2950 momentarily paused for a step-over. When we re-resume the
2951 stepping thread, it may be resumed from that address with a
2952 breakpoint that hasn't trapped yet. Seen with
2953 gdb.threads/non-stop-fair-events.exp, on targets that don't
2954 do displaced stepping. */
2955
2956 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2957 tp->ptid.to_string ().c_str ());
2958
2959 tp->stepped_breakpoint = 1;
2960
2961 /* Most targets can step a breakpoint instruction, thus
2962 executing it normally. But if this one cannot, just
2963 continue and we will hit it anyway. */
2964 if (gdbarch_cannot_step_breakpoint (gdbarch))
2965 step = false;
2966 }
2967
2968 if (tp->control.may_range_step)
2969 {
2970 /* If we're resuming a thread with the PC out of the step
2971 range, then we're doing some nested/finer run control
2972 operation, like stepping the thread out of the dynamic
2973 linker or the displaced stepping scratch pad. We
2974 shouldn't have allowed a range step then. */
2975 gdb_assert (pc_in_thread_step_range (pc, tp));
2976 }
2977
2978 do_target_resume (resume_ptid, step, sig);
2979 tp->set_resumed (true);
2980 }
2981
2982 /* Resume the inferior. SIG is the signal to give the inferior
2983 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2984 rolls back state on error. */
2985
2986 static void
2987 resume (gdb_signal sig)
2988 {
2989 try
2990 {
2991 resume_1 (sig);
2992 }
2993 catch (const gdb_exception &ex)
2994 {
2995 /* If resuming is being aborted for any reason, delete any
2996 single-step breakpoint resume_1 may have created, to avoid
2997 confusing the following resumption, and to avoid leaving
2998 single-step breakpoints perturbing other threads, in case
2999 we're running in non-stop mode. */
3000 if (inferior_ptid != null_ptid)
3001 delete_single_step_breakpoints (inferior_thread ());
3002 throw;
3003 }
3004 }
3005
3006 \f
3007 /* Proceeding. */
3008
3009 /* See infrun.h. */
3010
3011 /* Counter that tracks number of user visible stops. This can be used
3012 to tell whether a command has proceeded the inferior past the
3013 current location. This allows e.g., inferior function calls in
3014 breakpoint commands to not interrupt the command list. When the
3015 call finishes successfully, the inferior is standing at the same
3016 breakpoint as if nothing happened (and so we don't call
3017 normal_stop). */
3018 static ULONGEST current_stop_id;
3019
3020 /* See infrun.h. */
3021
3022 ULONGEST
3023 get_stop_id (void)
3024 {
3025 return current_stop_id;
3026 }
3027
3028 /* Called when we report a user visible stop. */
3029
3030 static void
3031 new_stop_id (void)
3032 {
3033 current_stop_id++;
3034 }
3035
3036 /* Clear out all variables saying what to do when inferior is continued.
3037 First do this, then set the ones you want, then call `proceed'. */
3038
3039 static void
3040 clear_proceed_status_thread (struct thread_info *tp)
3041 {
3042 infrun_debug_printf ("%s", tp->ptid.to_string ().c_str ());
3043
3044 /* If we're starting a new sequence, then the previous finished
3045 single-step is no longer relevant. */
3046 if (tp->has_pending_waitstatus ())
3047 {
3048 if (tp->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP)
3049 {
3050 infrun_debug_printf ("pending event of %s was a finished step. "
3051 "Discarding.",
3052 tp->ptid.to_string ().c_str ());
3053
3054 tp->clear_pending_waitstatus ();
3055 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
3056 }
3057 else
3058 {
3059 infrun_debug_printf
3060 ("thread %s has pending wait status %s (currently_stepping=%d).",
3061 tp->ptid.to_string ().c_str (),
3062 tp->pending_waitstatus ().to_string ().c_str (),
3063 currently_stepping (tp));
3064 }
3065 }
3066
3067 /* If this signal should not be seen by program, give it zero.
3068 Used for debugging signals. */
3069 if (!signal_pass_state (tp->stop_signal ()))
3070 tp->set_stop_signal (GDB_SIGNAL_0);
3071
3072 tp->release_thread_fsm ();
3073
3074 tp->control.trap_expected = 0;
3075 tp->control.step_range_start = 0;
3076 tp->control.step_range_end = 0;
3077 tp->control.may_range_step = 0;
3078 tp->control.step_frame_id = null_frame_id;
3079 tp->control.step_stack_frame_id = null_frame_id;
3080 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
3081 tp->control.step_start_function = nullptr;
3082 tp->stop_requested = 0;
3083
3084 tp->control.stop_step = 0;
3085
3086 tp->control.proceed_to_finish = 0;
3087
3088 tp->control.stepping_command = 0;
3089
3090 /* Discard any remaining commands or status from previous stop. */
3091 bpstat_clear (&tp->control.stop_bpstat);
3092 }
3093
3094 /* Notify the current interpreter and observers that the target is about to
3095 proceed. */
3096
3097 static void
3098 notify_about_to_proceed ()
3099 {
3100 top_level_interpreter ()->on_about_to_proceed ();
3101 gdb::observers::about_to_proceed.notify ();
3102 }
3103
3104 void
3105 clear_proceed_status (int step)
3106 {
3107 /* With scheduler-locking replay, stop replaying other threads if we're
3108 not replaying the user-visible resume ptid.
3109
3110 This is a convenience feature to not require the user to explicitly
3111 stop replaying the other threads. We're assuming that the user's
3112 intent is to resume tracing the recorded process. */
3113 if (!non_stop && scheduler_mode == schedlock_replay
3114 && target_record_is_replaying (minus_one_ptid)
3115 && !target_record_will_replay (user_visible_resume_ptid (step),
3116 execution_direction))
3117 target_record_stop_replaying ();
3118
3119 if (!non_stop && inferior_ptid != null_ptid)
3120 {
3121 ptid_t resume_ptid = user_visible_resume_ptid (step);
3122 process_stratum_target *resume_target
3123 = user_visible_resume_target (resume_ptid);
3124
3125 /* In all-stop mode, delete the per-thread status of all threads
3126 we're about to resume, implicitly and explicitly. */
3127 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
3128 clear_proceed_status_thread (tp);
3129 }
3130
3131 if (inferior_ptid != null_ptid)
3132 {
3133 struct inferior *inferior;
3134
3135 if (non_stop)
3136 {
3137 /* If in non-stop mode, only delete the per-thread status of
3138 the current thread. */
3139 clear_proceed_status_thread (inferior_thread ());
3140 }
3141
3142 inferior = current_inferior ();
3143 inferior->control.stop_soon = NO_STOP_QUIETLY;
3144 }
3145
3146 notify_about_to_proceed ();
3147 }
3148
3149 /* Returns true if TP is still stopped at a breakpoint that needs
3150 stepping-over in order to make progress. If the breakpoint is gone
3151 meanwhile, we can skip the whole step-over dance. */
3152
3153 static bool
3154 thread_still_needs_step_over_bp (struct thread_info *tp)
3155 {
3156 if (tp->stepping_over_breakpoint)
3157 {
3158 struct regcache *regcache = get_thread_regcache (tp);
3159
3160 if (breakpoint_here_p (tp->inf->aspace.get (),
3161 regcache_read_pc (regcache))
3162 == ordinary_breakpoint_here)
3163 return true;
3164
3165 tp->stepping_over_breakpoint = 0;
3166 }
3167
3168 return false;
3169 }
3170
3171 /* Check whether thread TP still needs to start a step-over in order
3172 to make progress when resumed. Returns an bitwise or of enum
3173 step_over_what bits, indicating what needs to be stepped over. */
3174
3175 static step_over_what
3176 thread_still_needs_step_over (struct thread_info *tp)
3177 {
3178 step_over_what what = 0;
3179
3180 if (thread_still_needs_step_over_bp (tp))
3181 what |= STEP_OVER_BREAKPOINT;
3182
3183 if (tp->stepping_over_watchpoint
3184 && !target_have_steppable_watchpoint ())
3185 what |= STEP_OVER_WATCHPOINT;
3186
3187 return what;
3188 }
3189
3190 /* Returns true if scheduler locking applies. STEP indicates whether
3191 we're about to do a step/next-like command to a thread. */
3192
3193 static bool
3194 schedlock_applies (struct thread_info *tp)
3195 {
3196 return (scheduler_mode == schedlock_on
3197 || (scheduler_mode == schedlock_step
3198 && tp->control.stepping_command)
3199 || (scheduler_mode == schedlock_replay
3200 && target_record_will_replay (minus_one_ptid,
3201 execution_direction)));
3202 }
3203
3204 /* Set process_stratum_target::COMMIT_RESUMED_STATE in all target
3205 stacks that have threads executing and don't have threads with
3206 pending events. */
3207
3208 static void
3209 maybe_set_commit_resumed_all_targets ()
3210 {
3211 scoped_restore_current_thread restore_thread;
3212
3213 for (inferior *inf : all_non_exited_inferiors ())
3214 {
3215 process_stratum_target *proc_target = inf->process_target ();
3216
3217 if (proc_target->commit_resumed_state)
3218 {
3219 /* We already set this in a previous iteration, via another
3220 inferior sharing the process_stratum target. */
3221 continue;
3222 }
3223
3224 /* If the target has no resumed threads, it would be useless to
3225 ask it to commit the resumed threads. */
3226 if (!proc_target->threads_executing)
3227 {
3228 infrun_debug_printf ("not requesting commit-resumed for target "
3229 "%s, no resumed threads",
3230 proc_target->shortname ());
3231 continue;
3232 }
3233
3234 /* As an optimization, if a thread from this target has some
3235 status to report, handle it before requiring the target to
3236 commit its resumed threads: handling the status might lead to
3237 resuming more threads. */
3238 if (proc_target->has_resumed_with_pending_wait_status ())
3239 {
3240 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
3241 " thread has a pending waitstatus",
3242 proc_target->shortname ());
3243 continue;
3244 }
3245
3246 switch_to_inferior_no_thread (inf);
3247
3248 if (target_has_pending_events ())
3249 {
3250 infrun_debug_printf ("not requesting commit-resumed for target %s, "
3251 "target has pending events",
3252 proc_target->shortname ());
3253 continue;
3254 }
3255
3256 infrun_debug_printf ("enabling commit-resumed for target %s",
3257 proc_target->shortname ());
3258
3259 proc_target->commit_resumed_state = true;
3260 }
3261 }
3262
3263 /* See infrun.h. */
3264
3265 void
3266 maybe_call_commit_resumed_all_targets ()
3267 {
3268 scoped_restore_current_thread restore_thread;
3269
3270 for (inferior *inf : all_non_exited_inferiors ())
3271 {
3272 process_stratum_target *proc_target = inf->process_target ();
3273
3274 if (!proc_target->commit_resumed_state)
3275 continue;
3276
3277 switch_to_inferior_no_thread (inf);
3278
3279 infrun_debug_printf ("calling commit_resumed for target %s",
3280 proc_target->shortname());
3281
3282 target_commit_resumed ();
3283 }
3284 }
3285
3286 /* To track nesting of scoped_disable_commit_resumed objects, ensuring
3287 that only the outermost one attempts to re-enable
3288 commit-resumed. */
3289 static bool enable_commit_resumed = true;
3290
3291 /* See infrun.h. */
3292
3293 scoped_disable_commit_resumed::scoped_disable_commit_resumed
3294 (const char *reason)
3295 : m_reason (reason),
3296 m_prev_enable_commit_resumed (enable_commit_resumed)
3297 {
3298 infrun_debug_printf ("reason=%s", m_reason);
3299
3300 enable_commit_resumed = false;
3301
3302 for (inferior *inf : all_non_exited_inferiors ())
3303 {
3304 process_stratum_target *proc_target = inf->process_target ();
3305
3306 if (m_prev_enable_commit_resumed)
3307 {
3308 /* This is the outermost instance: force all
3309 COMMIT_RESUMED_STATE to false. */
3310 proc_target->commit_resumed_state = false;
3311 }
3312 else
3313 {
3314 /* This is not the outermost instance, we expect
3315 COMMIT_RESUMED_STATE to have been cleared by the
3316 outermost instance. */
3317 gdb_assert (!proc_target->commit_resumed_state);
3318 }
3319 }
3320 }
3321
3322 /* See infrun.h. */
3323
3324 void
3325 scoped_disable_commit_resumed::reset ()
3326 {
3327 if (m_reset)
3328 return;
3329 m_reset = true;
3330
3331 infrun_debug_printf ("reason=%s", m_reason);
3332
3333 gdb_assert (!enable_commit_resumed);
3334
3335 enable_commit_resumed = m_prev_enable_commit_resumed;
3336
3337 if (m_prev_enable_commit_resumed)
3338 {
3339 /* This is the outermost instance, re-enable
3340 COMMIT_RESUMED_STATE on the targets where it's possible. */
3341 maybe_set_commit_resumed_all_targets ();
3342 }
3343 else
3344 {
3345 /* This is not the outermost instance, we expect
3346 COMMIT_RESUMED_STATE to still be false. */
3347 for (inferior *inf : all_non_exited_inferiors ())
3348 {
3349 process_stratum_target *proc_target = inf->process_target ();
3350 gdb_assert (!proc_target->commit_resumed_state);
3351 }
3352 }
3353 }
3354
3355 /* See infrun.h. */
3356
3357 scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
3358 {
3359 reset ();
3360 }
3361
3362 /* See infrun.h. */
3363
3364 void
3365 scoped_disable_commit_resumed::reset_and_commit ()
3366 {
3367 reset ();
3368 maybe_call_commit_resumed_all_targets ();
3369 }
3370
3371 /* See infrun.h. */
3372
3373 scoped_enable_commit_resumed::scoped_enable_commit_resumed
3374 (const char *reason)
3375 : m_reason (reason),
3376 m_prev_enable_commit_resumed (enable_commit_resumed)
3377 {
3378 infrun_debug_printf ("reason=%s", m_reason);
3379
3380 if (!enable_commit_resumed)
3381 {
3382 enable_commit_resumed = true;
3383
3384 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
3385 possible. */
3386 maybe_set_commit_resumed_all_targets ();
3387
3388 maybe_call_commit_resumed_all_targets ();
3389 }
3390 }
3391
3392 /* See infrun.h. */
3393
3394 scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
3395 {
3396 infrun_debug_printf ("reason=%s", m_reason);
3397
3398 gdb_assert (enable_commit_resumed);
3399
3400 enable_commit_resumed = m_prev_enable_commit_resumed;
3401
3402 if (!enable_commit_resumed)
3403 {
3404 /* Force all COMMIT_RESUMED_STATE back to false. */
3405 for (inferior *inf : all_non_exited_inferiors ())
3406 {
3407 process_stratum_target *proc_target = inf->process_target ();
3408 proc_target->commit_resumed_state = false;
3409 }
3410 }
3411 }
3412
3413 /* Check that all the targets we're about to resume are in non-stop
3414 mode. Ideally, we'd only care whether all targets support
3415 target-async, but we're not there yet. E.g., stop_all_threads
3416 doesn't know how to handle all-stop targets. Also, the remote
3417 protocol in all-stop mode is synchronous, irrespective of
3418 target-async, which means that things like a breakpoint re-set
3419 triggered by one target would try to read memory from all targets
3420 and fail. */
3421
3422 static void
3423 check_multi_target_resumption (process_stratum_target *resume_target)
3424 {
3425 if (!non_stop && resume_target == nullptr)
3426 {
3427 scoped_restore_current_thread restore_thread;
3428
3429 /* This is used to track whether we're resuming more than one
3430 target. */
3431 process_stratum_target *first_connection = nullptr;
3432
3433 /* The first inferior we see with a target that does not work in
3434 always-non-stop mode. */
3435 inferior *first_not_non_stop = nullptr;
3436
3437 for (inferior *inf : all_non_exited_inferiors ())
3438 {
3439 switch_to_inferior_no_thread (inf);
3440
3441 if (!target_has_execution ())
3442 continue;
3443
3444 process_stratum_target *proc_target
3445 = current_inferior ()->process_target();
3446
3447 if (!target_is_non_stop_p ())
3448 first_not_non_stop = inf;
3449
3450 if (first_connection == nullptr)
3451 first_connection = proc_target;
3452 else if (first_connection != proc_target
3453 && first_not_non_stop != nullptr)
3454 {
3455 switch_to_inferior_no_thread (first_not_non_stop);
3456
3457 proc_target = current_inferior ()->process_target();
3458
3459 error (_("Connection %d (%s) does not support "
3460 "multi-target resumption."),
3461 proc_target->connection_number,
3462 make_target_connection_string (proc_target).c_str ());
3463 }
3464 }
3465 }
3466 }
3467
3468 /* Helper function for `proceed`. Check if thread TP is suitable for
3469 resuming, and, if it is, switch to the thread and call
3470 `keep_going_pass_signal`. If TP is not suitable for resuming then this
3471 function will just return without switching threads. */
3472
3473 static void
3474 proceed_resume_thread_checked (thread_info *tp)
3475 {
3476 if (!tp->inf->has_execution ())
3477 {
3478 infrun_debug_printf ("[%s] target has no execution",
3479 tp->ptid.to_string ().c_str ());
3480 return;
3481 }
3482
3483 if (tp->resumed ())
3484 {
3485 infrun_debug_printf ("[%s] resumed",
3486 tp->ptid.to_string ().c_str ());
3487 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
3488 return;
3489 }
3490
3491 if (thread_is_in_step_over_chain (tp))
3492 {
3493 infrun_debug_printf ("[%s] needs step-over",
3494 tp->ptid.to_string ().c_str ());
3495 return;
3496 }
3497
3498 /* When handling a vfork GDB removes all breakpoints from the program
3499 space in which the vfork is being handled. If we are following the
3500 parent then GDB will set the thread_waiting_for_vfork_done member of
3501 the parent inferior. In this case we should take care to only resume
3502 the vfork parent thread, the kernel will hold this thread suspended
3503 until the vfork child has exited or execd, at which point the parent
3504 will be resumed and a VFORK_DONE event sent to GDB. */
3505 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
3506 {
3507 if (target_is_non_stop_p ())
3508 {
3509 /* For non-stop targets, regardless of whether GDB is using
3510 all-stop or non-stop mode, threads are controlled
3511 individually.
3512
3513 When a thread is handling a vfork, breakpoints are removed
3514 from the inferior (well, program space in fact), so it is
3515 critical that we don't try to resume any thread other than the
3516 vfork parent. */
3517 if (tp != tp->inf->thread_waiting_for_vfork_done)
3518 {
3519 infrun_debug_printf ("[%s] thread %s of this inferior is "
3520 "waiting for vfork-done",
3521 tp->ptid.to_string ().c_str (),
3522 tp->inf->thread_waiting_for_vfork_done
3523 ->ptid.to_string ().c_str ());
3524 return;
3525 }
3526 }
3527 else
3528 {
3529 /* For all-stop targets, when we attempt to resume the inferior,
3530 we will only resume the vfork parent thread, this is handled
3531 in internal_resume_ptid.
3532
3533 Additionally, we will always be called with the vfork parent
3534 thread as the current thread (TP) thanks to follow_fork, as
3535 such the following assertion should hold.
3536
3537 Beyond this there is nothing more that needs to be done
3538 here. */
3539 gdb_assert (tp == tp->inf->thread_waiting_for_vfork_done);
3540 }
3541 }
3542
3543 /* When handling a vfork GDB removes all breakpoints from the program
3544 space in which the vfork is being handled. If we are following the
3545 child then GDB will set vfork_child member of the vfork parent
3546 inferior. Once the child has either exited or execd then GDB will
3547 detach from the parent process. Until that point GDB should not
3548 resume any thread in the parent process. */
3549 if (tp->inf->vfork_child != nullptr)
3550 {
3551 infrun_debug_printf ("[%s] thread is part of a vfork parent, child is %d",
3552 tp->ptid.to_string ().c_str (),
3553 tp->inf->vfork_child->pid);
3554 return;
3555 }
3556
3557 infrun_debug_printf ("resuming %s",
3558 tp->ptid.to_string ().c_str ());
3559
3560 execution_control_state ecs (tp);
3561 switch_to_thread (tp);
3562 keep_going_pass_signal (&ecs);
3563 if (!ecs.wait_some_more)
3564 error (_("Command aborted."));
3565 }
3566
3567 /* Basic routine for continuing the program in various fashions.
3568
3569 ADDR is the address to resume at, or -1 for resume where stopped.
3570 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3571 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
3572
3573 You should call clear_proceed_status before calling proceed. */
3574
3575 void
3576 proceed (CORE_ADDR addr, enum gdb_signal siggnal)
3577 {
3578 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
3579
3580 struct gdbarch *gdbarch;
3581 CORE_ADDR pc;
3582
3583 /* If we're stopped at a fork/vfork, switch to either the parent or child
3584 thread as defined by the "set follow-fork-mode" command, or, if both
3585 the parent and child are controlled by GDB, and schedule-multiple is
3586 on, follow the child. If none of the above apply then we just proceed
3587 resuming the current thread. */
3588 if (!follow_fork ())
3589 {
3590 /* The target for some reason decided not to resume. */
3591 normal_stop ();
3592 if (target_can_async_p ())
3593 inferior_event_handler (INF_EXEC_COMPLETE);
3594 return;
3595 }
3596
3597 /* We'll update this if & when we switch to a new thread. */
3598 update_previous_thread ();
3599
3600 thread_info *cur_thr = inferior_thread ();
3601 infrun_debug_printf ("cur_thr = %s", cur_thr->ptid.to_string ().c_str ());
3602
3603 regcache *regcache = get_thread_regcache (cur_thr);
3604 gdbarch = regcache->arch ();
3605 pc = regcache_read_pc_protected (regcache);
3606
3607 /* Fill in with reasonable starting values. */
3608 init_thread_stepping_state (cur_thr);
3609
3610 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
3611
3612 ptid_t resume_ptid
3613 = user_visible_resume_ptid (cur_thr->control.stepping_command);
3614 process_stratum_target *resume_target
3615 = user_visible_resume_target (resume_ptid);
3616
3617 check_multi_target_resumption (resume_target);
3618
3619 if (addr == (CORE_ADDR) -1)
3620 {
3621 const address_space *aspace = cur_thr->inf->aspace.get ();
3622
3623 if (cur_thr->stop_pc_p ()
3624 && pc == cur_thr->stop_pc ()
3625 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
3626 && execution_direction != EXEC_REVERSE)
3627 /* There is a breakpoint at the address we will resume at,
3628 step one instruction before inserting breakpoints so that
3629 we do not stop right away (and report a second hit at this
3630 breakpoint).
3631
3632 Note, we don't do this in reverse, because we won't
3633 actually be executing the breakpoint insn anyway.
3634 We'll be (un-)executing the previous instruction. */
3635 cur_thr->stepping_over_breakpoint = 1;
3636 else if (gdbarch_single_step_through_delay_p (gdbarch)
3637 && gdbarch_single_step_through_delay (gdbarch,
3638 get_current_frame ()))
3639 /* We stepped onto an instruction that needs to be stepped
3640 again before re-inserting the breakpoint, do so. */
3641 cur_thr->stepping_over_breakpoint = 1;
3642 }
3643 else
3644 {
3645 regcache_write_pc (regcache, addr);
3646 }
3647
3648 if (siggnal != GDB_SIGNAL_DEFAULT)
3649 cur_thr->set_stop_signal (siggnal);
3650
3651 /* If an exception is thrown from this point on, make sure to
3652 propagate GDB's knowledge of the executing state to the
3653 frontend/user running state. */
3654 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
3655
3656 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3657 threads (e.g., we might need to set threads stepping over
3658 breakpoints first), from the user/frontend's point of view, all
3659 threads in RESUME_PTID are now running. Unless we're calling an
3660 inferior function, as in that case we pretend the inferior
3661 doesn't run at all. */
3662 if (!cur_thr->control.in_infcall)
3663 set_running (resume_target, resume_ptid, true);
3664
3665 infrun_debug_printf ("addr=%s, signal=%s, resume_ptid=%s",
3666 paddress (gdbarch, addr),
3667 gdb_signal_to_symbol_string (siggnal),
3668 resume_ptid.to_string ().c_str ());
3669
3670 annotate_starting ();
3671
3672 /* Make sure that output from GDB appears before output from the
3673 inferior. */
3674 gdb_flush (gdb_stdout);
3675
3676 /* Since we've marked the inferior running, give it the terminal. A
3677 QUIT/Ctrl-C from here on is forwarded to the target (which can
3678 still detect attempts to unblock a stuck connection with repeated
3679 Ctrl-C from within target_pass_ctrlc). */
3680 target_terminal::inferior ();
3681
3682 /* In a multi-threaded task we may select another thread and
3683 then continue or step.
3684
3685 But if a thread that we're resuming had stopped at a breakpoint,
3686 it will immediately cause another breakpoint stop without any
3687 execution (i.e. it will report a breakpoint hit incorrectly). So
3688 we must step over it first.
3689
3690 Look for threads other than the current (TP) that reported a
3691 breakpoint hit and haven't been resumed yet since. */
3692
3693 /* If scheduler locking applies, we can avoid iterating over all
3694 threads. */
3695 if (!non_stop && !schedlock_applies (cur_thr))
3696 {
3697 for (thread_info *tp : all_non_exited_threads (resume_target,
3698 resume_ptid))
3699 {
3700 switch_to_thread_no_regs (tp);
3701
3702 /* Ignore the current thread here. It's handled
3703 afterwards. */
3704 if (tp == cur_thr)
3705 continue;
3706
3707 if (!thread_still_needs_step_over (tp))
3708 continue;
3709
3710 gdb_assert (!thread_is_in_step_over_chain (tp));
3711
3712 infrun_debug_printf ("need to step-over [%s] first",
3713 tp->ptid.to_string ().c_str ());
3714
3715 global_thread_step_over_chain_enqueue (tp);
3716 }
3717
3718 switch_to_thread (cur_thr);
3719 }
3720
3721 /* Enqueue the current thread last, so that we move all other
3722 threads over their breakpoints first. */
3723 if (cur_thr->stepping_over_breakpoint)
3724 global_thread_step_over_chain_enqueue (cur_thr);
3725
3726 /* If the thread isn't started, we'll still need to set its prev_pc,
3727 so that switch_back_to_stepped_thread knows the thread hasn't
3728 advanced. Must do this before resuming any thread, as in
3729 all-stop/remote, once we resume we can't send any other packet
3730 until the target stops again. */
3731 cur_thr->prev_pc = regcache_read_pc_protected (regcache);
3732
3733 {
3734 scoped_disable_commit_resumed disable_commit_resumed ("proceeding");
3735 bool step_over_started = start_step_over ();
3736
3737 if (step_over_info_valid_p ())
3738 {
3739 /* Either this thread started a new in-line step over, or some
3740 other thread was already doing one. In either case, don't
3741 resume anything else until the step-over is finished. */
3742 }
3743 else if (step_over_started && !target_is_non_stop_p ())
3744 {
3745 /* A new displaced stepping sequence was started. In all-stop,
3746 we can't talk to the target anymore until it next stops. */
3747 }
3748 else if (!non_stop && target_is_non_stop_p ())
3749 {
3750 INFRUN_SCOPED_DEBUG_START_END
3751 ("resuming threads, all-stop-on-top-of-non-stop");
3752
3753 /* In all-stop, but the target is always in non-stop mode.
3754 Start all other threads that are implicitly resumed too. */
3755 for (thread_info *tp : all_non_exited_threads (resume_target,
3756 resume_ptid))
3757 {
3758 switch_to_thread_no_regs (tp);
3759 proceed_resume_thread_checked (tp);
3760 }
3761 }
3762 else
3763 proceed_resume_thread_checked (cur_thr);
3764
3765 disable_commit_resumed.reset_and_commit ();
3766 }
3767
3768 finish_state.release ();
3769
3770 /* If we've switched threads above, switch back to the previously
3771 current thread. We don't want the user to see a different
3772 selected thread. */
3773 switch_to_thread (cur_thr);
3774
3775 /* Tell the event loop to wait for it to stop. If the target
3776 supports asynchronous execution, it'll do this from within
3777 target_resume. */
3778 if (!target_can_async_p ())
3779 mark_async_event_handler (infrun_async_inferior_event_token);
3780 }
3781 \f
3782
3783 /* Start remote-debugging of a machine over a serial link. */
3784
3785 void
3786 start_remote (int from_tty)
3787 {
3788 inferior *inf = current_inferior ();
3789 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
3790
3791 /* Always go on waiting for the target, regardless of the mode. */
3792 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3793 indicate to wait_for_inferior that a target should timeout if
3794 nothing is returned (instead of just blocking). Because of this,
3795 targets expecting an immediate response need to, internally, set
3796 things up so that the target_wait() is forced to eventually
3797 timeout. */
3798 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3799 differentiate to its caller what the state of the target is after
3800 the initial open has been performed. Here we're assuming that
3801 the target has stopped. It should be possible to eventually have
3802 target_open() return to the caller an indication that the target
3803 is currently running and GDB state should be set to the same as
3804 for an async run. */
3805 wait_for_inferior (inf);
3806
3807 /* Now that the inferior has stopped, do any bookkeeping like
3808 loading shared libraries. We want to do this before normal_stop,
3809 so that the displayed frame is up to date. */
3810 post_create_inferior (from_tty);
3811
3812 normal_stop ();
3813 }
3814
3815 /* Initialize static vars when a new inferior begins. */
3816
3817 void
3818 init_wait_for_inferior (void)
3819 {
3820 /* These are meaningless until the first time through wait_for_inferior. */
3821
3822 breakpoint_init_inferior (current_inferior (), inf_starting);
3823
3824 clear_proceed_status (0);
3825
3826 nullify_last_target_wait_ptid ();
3827
3828 update_previous_thread ();
3829 }
3830
3831 \f
3832
3833 static void handle_inferior_event (struct execution_control_state *ecs);
3834
3835 static void handle_step_into_function (struct gdbarch *gdbarch,
3836 struct execution_control_state *ecs);
3837 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3838 struct execution_control_state *ecs);
3839 static void handle_signal_stop (struct execution_control_state *ecs);
3840 static void check_exception_resume (struct execution_control_state *,
3841 const frame_info_ptr &);
3842
3843 static void end_stepping_range (struct execution_control_state *ecs);
3844 static void stop_waiting (struct execution_control_state *ecs);
3845 static void keep_going (struct execution_control_state *ecs);
3846 static void process_event_stop_test (struct execution_control_state *ecs);
3847 static bool switch_back_to_stepped_thread (struct execution_control_state *ecs);
3848
3849 /* This function is attached as a "thread_stop_requested" observer.
3850 Cleanup local state that assumed the PTID was to be resumed, and
3851 report the stop to the frontend. */
3852
3853 static void
3854 infrun_thread_stop_requested (ptid_t ptid)
3855 {
3856 process_stratum_target *curr_target = current_inferior ()->process_target ();
3857
3858 /* PTID was requested to stop. If the thread was already stopped,
3859 but the user/frontend doesn't know about that yet (e.g., the
3860 thread had been temporarily paused for some step-over), set up
3861 for reporting the stop now. */
3862 for (thread_info *tp : all_threads (curr_target, ptid))
3863 {
3864 if (tp->state != THREAD_RUNNING)
3865 continue;
3866 if (tp->executing ())
3867 continue;
3868
3869 /* Remove matching threads from the step-over queue, so
3870 start_step_over doesn't try to resume them
3871 automatically. */
3872 if (thread_is_in_step_over_chain (tp))
3873 global_thread_step_over_chain_remove (tp);
3874
3875 /* If the thread is stopped, but the user/frontend doesn't
3876 know about that yet, queue a pending event, as if the
3877 thread had just stopped now. Unless the thread already had
3878 a pending event. */
3879 if (!tp->has_pending_waitstatus ())
3880 {
3881 target_waitstatus ws;
3882 ws.set_stopped (GDB_SIGNAL_0);
3883 tp->set_pending_waitstatus (ws);
3884 }
3885
3886 /* Clear the inline-frame state, since we're re-processing the
3887 stop. */
3888 clear_inline_frame_state (tp);
3889
3890 /* If this thread was paused because some other thread was
3891 doing an inline-step over, let that finish first. Once
3892 that happens, we'll restart all threads and consume pending
3893 stop events then. */
3894 if (step_over_info_valid_p ())
3895 continue;
3896
3897 /* Otherwise we can process the (new) pending event now. Set
3898 it so this pending event is considered by
3899 do_target_wait. */
3900 tp->set_resumed (true);
3901 }
3902 }
3903
3904 /* Delete the step resume, single-step and longjmp/exception resume
3905 breakpoints of TP. */
3906
3907 static void
3908 delete_thread_infrun_breakpoints (struct thread_info *tp)
3909 {
3910 delete_step_resume_breakpoint (tp);
3911 delete_exception_resume_breakpoint (tp);
3912 delete_single_step_breakpoints (tp);
3913 }
3914
3915 /* If the target still has execution, call FUNC for each thread that
3916 just stopped. In all-stop, that's all the non-exited threads; in
3917 non-stop, that's the current thread, only. */
3918
3919 typedef void (*for_each_just_stopped_thread_callback_func)
3920 (struct thread_info *tp);
3921
3922 static void
3923 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
3924 {
3925 if (!target_has_execution () || inferior_ptid == null_ptid)
3926 return;
3927
3928 if (target_is_non_stop_p ())
3929 {
3930 /* If in non-stop mode, only the current thread stopped. */
3931 func (inferior_thread ());
3932 }
3933 else
3934 {
3935 /* In all-stop mode, all threads have stopped. */
3936 for (thread_info *tp : all_non_exited_threads ())
3937 func (tp);
3938 }
3939 }
3940
3941 /* Delete the step resume and longjmp/exception resume breakpoints of
3942 the threads that just stopped. */
3943
3944 static void
3945 delete_just_stopped_threads_infrun_breakpoints (void)
3946 {
3947 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
3948 }
3949
3950 /* Delete the single-step breakpoints of the threads that just
3951 stopped. */
3952
3953 static void
3954 delete_just_stopped_threads_single_step_breakpoints (void)
3955 {
3956 for_each_just_stopped_thread (delete_single_step_breakpoints);
3957 }
3958
3959 /* See infrun.h. */
3960
3961 void
3962 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3963 const struct target_waitstatus &ws)
3964 {
3965 infrun_debug_printf ("target_wait (%s [%s], status) =",
3966 waiton_ptid.to_string ().c_str (),
3967 target_pid_to_str (waiton_ptid).c_str ());
3968 infrun_debug_printf (" %s [%s],",
3969 result_ptid.to_string ().c_str (),
3970 target_pid_to_str (result_ptid).c_str ());
3971 infrun_debug_printf (" %s", ws.to_string ().c_str ());
3972 }
3973
3974 /* Select a thread at random, out of those which are resumed and have
3975 had events. */
3976
3977 static struct thread_info *
3978 random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
3979 {
3980 process_stratum_target *proc_target = inf->process_target ();
3981 thread_info *thread
3982 = proc_target->random_resumed_with_pending_wait_status (inf, waiton_ptid);
3983
3984 if (thread == nullptr)
3985 {
3986 infrun_debug_printf ("None found.");
3987 return nullptr;
3988 }
3989
3990 infrun_debug_printf ("Found %s.", thread->ptid.to_string ().c_str ());
3991 gdb_assert (thread->resumed ());
3992 gdb_assert (thread->has_pending_waitstatus ());
3993
3994 return thread;
3995 }
3996
3997 /* Wrapper for target_wait that first checks whether threads have
3998 pending statuses to report before actually asking the target for
3999 more events. INF is the inferior we're using to call target_wait
4000 on. */
4001
4002 static ptid_t
4003 do_target_wait_1 (inferior *inf, ptid_t ptid,
4004 target_waitstatus *status, target_wait_flags options)
4005 {
4006 struct thread_info *tp;
4007
4008 /* We know that we are looking for an event in the target of inferior
4009 INF, but we don't know which thread the event might come from. As
4010 such we want to make sure that INFERIOR_PTID is reset so that none of
4011 the wait code relies on it - doing so is always a mistake. */
4012 switch_to_inferior_no_thread (inf);
4013
4014 /* First check if there is a resumed thread with a wait status
4015 pending. */
4016 if (ptid == minus_one_ptid || ptid.is_pid ())
4017 {
4018 tp = random_pending_event_thread (inf, ptid);
4019 }
4020 else
4021 {
4022 infrun_debug_printf ("Waiting for specific thread %s.",
4023 ptid.to_string ().c_str ());
4024
4025 /* We have a specific thread to check. */
4026 tp = inf->find_thread (ptid);
4027 gdb_assert (tp != nullptr);
4028 if (!tp->has_pending_waitstatus ())
4029 tp = nullptr;
4030 }
4031
4032 if (tp != nullptr
4033 && (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4034 || tp->stop_reason () == TARGET_STOPPED_BY_HW_BREAKPOINT))
4035 {
4036 struct regcache *regcache = get_thread_regcache (tp);
4037 struct gdbarch *gdbarch = regcache->arch ();
4038 CORE_ADDR pc;
4039 int discard = 0;
4040
4041 pc = regcache_read_pc (regcache);
4042
4043 if (pc != tp->stop_pc ())
4044 {
4045 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
4046 tp->ptid.to_string ().c_str (),
4047 paddress (gdbarch, tp->stop_pc ()),
4048 paddress (gdbarch, pc));
4049 discard = 1;
4050 }
4051 else if (!breakpoint_inserted_here_p (tp->inf->aspace.get (), pc))
4052 {
4053 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
4054 tp->ptid.to_string ().c_str (),
4055 paddress (gdbarch, pc));
4056
4057 discard = 1;
4058 }
4059
4060 if (discard)
4061 {
4062 infrun_debug_printf ("pending event of %s cancelled.",
4063 tp->ptid.to_string ().c_str ());
4064
4065 tp->clear_pending_waitstatus ();
4066 target_waitstatus ws;
4067 ws.set_spurious ();
4068 tp->set_pending_waitstatus (ws);
4069 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
4070 }
4071 }
4072
4073 if (tp != nullptr)
4074 {
4075 infrun_debug_printf ("Using pending wait status %s for %s.",
4076 tp->pending_waitstatus ().to_string ().c_str (),
4077 tp->ptid.to_string ().c_str ());
4078
4079 /* Now that we've selected our final event LWP, un-adjust its PC
4080 if it was a software breakpoint (and the target doesn't
4081 always adjust the PC itself). */
4082 if (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4083 && !target_supports_stopped_by_sw_breakpoint ())
4084 {
4085 struct regcache *regcache;
4086 struct gdbarch *gdbarch;
4087 int decr_pc;
4088
4089 regcache = get_thread_regcache (tp);
4090 gdbarch = regcache->arch ();
4091
4092 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4093 if (decr_pc != 0)
4094 {
4095 CORE_ADDR pc;
4096
4097 pc = regcache_read_pc (regcache);
4098 regcache_write_pc (regcache, pc + decr_pc);
4099 }
4100 }
4101
4102 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
4103 *status = tp->pending_waitstatus ();
4104 tp->clear_pending_waitstatus ();
4105
4106 /* Wake up the event loop again, until all pending events are
4107 processed. */
4108 if (target_is_async_p ())
4109 mark_async_event_handler (infrun_async_inferior_event_token);
4110 return tp->ptid;
4111 }
4112
4113 /* But if we don't find one, we'll have to wait. */
4114
4115 /* We can't ask a non-async target to do a non-blocking wait, so this will be
4116 a blocking wait. */
4117 if (!target_can_async_p ())
4118 options &= ~TARGET_WNOHANG;
4119
4120 return target_wait (ptid, status, options);
4121 }
4122
4123 /* Wrapper for target_wait that first checks whether threads have
4124 pending statuses to report before actually asking the target for
4125 more events. Polls for events from all inferiors/targets. */
4126
4127 static bool
4128 do_target_wait (execution_control_state *ecs, target_wait_flags options)
4129 {
4130 int num_inferiors = 0;
4131 int random_selector;
4132
4133 /* For fairness, we pick the first inferior/target to poll at random
4134 out of all inferiors that may report events, and then continue
4135 polling the rest of the inferior list starting from that one in a
4136 circular fashion until the whole list is polled once. */
4137
4138 auto inferior_matches = [] (inferior *inf)
4139 {
4140 return inf->process_target () != nullptr;
4141 };
4142
4143 /* First see how many matching inferiors we have. */
4144 for (inferior *inf : all_inferiors ())
4145 if (inferior_matches (inf))
4146 num_inferiors++;
4147
4148 if (num_inferiors == 0)
4149 {
4150 ecs->ws.set_ignore ();
4151 return false;
4152 }
4153
4154 /* Now randomly pick an inferior out of those that matched. */
4155 random_selector = (int)
4156 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
4157
4158 if (num_inferiors > 1)
4159 infrun_debug_printf ("Found %d inferiors, starting at #%d",
4160 num_inferiors, random_selector);
4161
4162 /* Select the Nth inferior that matched. */
4163
4164 inferior *selected = nullptr;
4165
4166 for (inferior *inf : all_inferiors ())
4167 if (inferior_matches (inf))
4168 if (random_selector-- == 0)
4169 {
4170 selected = inf;
4171 break;
4172 }
4173
4174 /* Now poll for events out of each of the matching inferior's
4175 targets, starting from the selected one. */
4176
4177 auto do_wait = [&] (inferior *inf)
4178 {
4179 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, options);
4180 ecs->target = inf->process_target ();
4181 return (ecs->ws.kind () != TARGET_WAITKIND_IGNORE);
4182 };
4183
4184 /* Needed in 'all-stop + target-non-stop' mode, because we end up
4185 here spuriously after the target is all stopped and we've already
4186 reported the stop to the user, polling for events. */
4187 scoped_restore_current_thread restore_thread;
4188
4189 intrusive_list_iterator<inferior> start
4190 = inferior_list.iterator_to (*selected);
4191
4192 for (intrusive_list_iterator<inferior> it = start;
4193 it != inferior_list.end ();
4194 ++it)
4195 {
4196 inferior *inf = &*it;
4197
4198 if (inferior_matches (inf) && do_wait (inf))
4199 return true;
4200 }
4201
4202 for (intrusive_list_iterator<inferior> it = inferior_list.begin ();
4203 it != start;
4204 ++it)
4205 {
4206 inferior *inf = &*it;
4207
4208 if (inferior_matches (inf) && do_wait (inf))
4209 return true;
4210 }
4211
4212 ecs->ws.set_ignore ();
4213 return false;
4214 }
4215
4216 /* An event reported by wait_one. */
4217
4218 struct wait_one_event
4219 {
4220 /* The target the event came out of. */
4221 process_stratum_target *target;
4222
4223 /* The PTID the event was for. */
4224 ptid_t ptid;
4225
4226 /* The waitstatus. */
4227 target_waitstatus ws;
4228 };
4229
4230 static bool handle_one (const wait_one_event &event);
4231 static int finish_step_over (struct execution_control_state *ecs);
4232
4233 /* Prepare and stabilize the inferior for detaching it. E.g.,
4234 detaching while a thread is displaced stepping is a recipe for
4235 crashing it, as nothing would readjust the PC out of the scratch
4236 pad. */
4237
4238 void
4239 prepare_for_detach (void)
4240 {
4241 struct inferior *inf = current_inferior ();
4242 ptid_t pid_ptid = ptid_t (inf->pid);
4243 scoped_restore_current_thread restore_thread;
4244
4245 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
4246
4247 /* Remove all threads of INF from the global step-over chain. We
4248 want to stop any ongoing step-over, not start any new one. */
4249 thread_step_over_list_safe_range range
4250 = make_thread_step_over_list_safe_range (global_thread_step_over_list);
4251
4252 for (thread_info *tp : range)
4253 if (tp->inf == inf)
4254 {
4255 infrun_debug_printf ("removing thread %s from global step over chain",
4256 tp->ptid.to_string ().c_str ());
4257 global_thread_step_over_chain_remove (tp);
4258 }
4259
4260 /* If we were already in the middle of an inline step-over, and the
4261 thread stepping belongs to the inferior we're detaching, we need
4262 to restart the threads of other inferiors. */
4263 if (step_over_info.thread != -1)
4264 {
4265 infrun_debug_printf ("inline step-over in-process while detaching");
4266
4267 thread_info *thr = find_thread_global_id (step_over_info.thread);
4268 if (thr->inf == inf)
4269 {
4270 /* Since we removed threads of INF from the step-over chain,
4271 we know this won't start a step-over for INF. */
4272 clear_step_over_info ();
4273
4274 if (target_is_non_stop_p ())
4275 {
4276 /* Start a new step-over in another thread if there's
4277 one that needs it. */
4278 start_step_over ();
4279
4280 /* Restart all other threads (except the
4281 previously-stepping thread, since that one is still
4282 running). */
4283 if (!step_over_info_valid_p ())
4284 restart_threads (thr);
4285 }
4286 }
4287 }
4288
4289 if (displaced_step_in_progress (inf))
4290 {
4291 infrun_debug_printf ("displaced-stepping in-process while detaching");
4292
4293 /* Stop threads currently displaced stepping, aborting it. */
4294
4295 for (thread_info *thr : inf->non_exited_threads ())
4296 {
4297 if (thr->displaced_step_state.in_progress ())
4298 {
4299 if (thr->executing ())
4300 {
4301 if (!thr->stop_requested)
4302 {
4303 target_stop (thr->ptid);
4304 thr->stop_requested = true;
4305 }
4306 }
4307 else
4308 thr->set_resumed (false);
4309 }
4310 }
4311
4312 while (displaced_step_in_progress (inf))
4313 {
4314 wait_one_event event;
4315
4316 event.target = inf->process_target ();
4317 event.ptid = do_target_wait_1 (inf, pid_ptid, &event.ws, 0);
4318
4319 if (debug_infrun)
4320 print_target_wait_results (pid_ptid, event.ptid, event.ws);
4321
4322 handle_one (event);
4323 }
4324
4325 /* It's OK to leave some of the threads of INF stopped, since
4326 they'll be detached shortly. */
4327 }
4328 }
4329
4330 /* If all-stop, but there exists a non-stop target, stop all threads
4331 now that we're presenting the stop to the user. */
4332
4333 static void
4334 stop_all_threads_if_all_stop_mode ()
4335 {
4336 if (!non_stop && exists_non_stop_target ())
4337 stop_all_threads ("presenting stop to user in all-stop");
4338 }
4339
4340 /* Wait for control to return from inferior to debugger.
4341
4342 If inferior gets a signal, we may decide to start it up again
4343 instead of returning. That is why there is a loop in this function.
4344 When this function actually returns it means the inferior
4345 should be left stopped and GDB should read more commands. */
4346
4347 static void
4348 wait_for_inferior (inferior *inf)
4349 {
4350 infrun_debug_printf ("wait_for_inferior ()");
4351
4352 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
4353
4354 /* If an error happens while handling the event, propagate GDB's
4355 knowledge of the executing state to the frontend/user running
4356 state. */
4357 scoped_finish_thread_state finish_state
4358 (inf->process_target (), minus_one_ptid);
4359
4360 while (1)
4361 {
4362 execution_control_state ecs;
4363
4364 overlay_cache_invalid = 1;
4365
4366 /* Flush target cache before starting to handle each event.
4367 Target was running and cache could be stale. This is just a
4368 heuristic. Running threads may modify target memory, but we
4369 don't get any event. */
4370 target_dcache_invalidate (current_program_space->aspace);
4371
4372 ecs.ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs.ws, 0);
4373 ecs.target = inf->process_target ();
4374
4375 if (debug_infrun)
4376 print_target_wait_results (minus_one_ptid, ecs.ptid, ecs.ws);
4377
4378 /* Now figure out what to do with the result of the result. */
4379 handle_inferior_event (&ecs);
4380
4381 if (!ecs.wait_some_more)
4382 break;
4383 }
4384
4385 stop_all_threads_if_all_stop_mode ();
4386
4387 /* No error, don't finish the state yet. */
4388 finish_state.release ();
4389 }
4390
4391 /* Cleanup that reinstalls the readline callback handler, if the
4392 target is running in the background. If while handling the target
4393 event something triggered a secondary prompt, like e.g., a
4394 pagination prompt, we'll have removed the callback handler (see
4395 gdb_readline_wrapper_line). Need to do this as we go back to the
4396 event loop, ready to process further input. Note this has no
4397 effect if the handler hasn't actually been removed, because calling
4398 rl_callback_handler_install resets the line buffer, thus losing
4399 input. */
4400
4401 static void
4402 reinstall_readline_callback_handler_cleanup ()
4403 {
4404 struct ui *ui = current_ui;
4405
4406 if (!ui->async)
4407 {
4408 /* We're not going back to the top level event loop yet. Don't
4409 install the readline callback, as it'd prep the terminal,
4410 readline-style (raw, noecho) (e.g., --batch). We'll install
4411 it the next time the prompt is displayed, when we're ready
4412 for input. */
4413 return;
4414 }
4415
4416 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
4417 gdb_rl_callback_handler_reinstall ();
4418 }
4419
4420 /* Clean up the FSMs of threads that are now stopped. In non-stop,
4421 that's just the event thread. In all-stop, that's all threads. In
4422 all-stop, threads that had a pending exit no longer have a reason
4423 to be around, as their FSMs/commands are canceled, so we delete
4424 them. This avoids "info threads" listing such threads as if they
4425 were alive (and failing to read their registers), the user being
4426 able to select and resume them (and that failing), etc. */
4427
4428 static void
4429 clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
4430 {
4431 /* The first clean_up call below assumes the event thread is the current
4432 one. */
4433 if (ecs->event_thread != nullptr)
4434 gdb_assert (ecs->event_thread == inferior_thread ());
4435
4436 if (ecs->event_thread != nullptr
4437 && ecs->event_thread->thread_fsm () != nullptr)
4438 ecs->event_thread->thread_fsm ()->clean_up (ecs->event_thread);
4439
4440 if (!non_stop)
4441 {
4442 scoped_restore_current_thread restore_thread;
4443
4444 for (thread_info *thr : all_threads_safe ())
4445 {
4446 if (thr->state == THREAD_EXITED)
4447 continue;
4448
4449 if (thr == ecs->event_thread)
4450 continue;
4451
4452 if (thr->thread_fsm () != nullptr)
4453 {
4454 switch_to_thread (thr);
4455 thr->thread_fsm ()->clean_up (thr);
4456 }
4457
4458 /* As we are cancelling the command/FSM of this thread,
4459 whatever was the reason we needed to report a thread
4460 exited event to the user, that reason is gone. Delete
4461 the thread, so that the user doesn't see it in the thread
4462 list, the next proceed doesn't try to resume it, etc. */
4463 if (thr->has_pending_waitstatus ()
4464 && (thr->pending_waitstatus ().kind ()
4465 == TARGET_WAITKIND_THREAD_EXITED))
4466 delete_thread (thr);
4467 }
4468 }
4469 }
4470
4471 /* Helper for all_uis_check_sync_execution_done that works on the
4472 current UI. */
4473
4474 static void
4475 check_curr_ui_sync_execution_done (void)
4476 {
4477 struct ui *ui = current_ui;
4478
4479 if (ui->prompt_state == PROMPT_NEEDED
4480 && ui->async
4481 && !gdb_in_secondary_prompt_p (ui))
4482 {
4483 target_terminal::ours ();
4484 top_level_interpreter ()->on_sync_execution_done ();
4485 ui->register_file_handler ();
4486 }
4487 }
4488
4489 /* See infrun.h. */
4490
4491 void
4492 all_uis_check_sync_execution_done (void)
4493 {
4494 SWITCH_THRU_ALL_UIS ()
4495 {
4496 check_curr_ui_sync_execution_done ();
4497 }
4498 }
4499
4500 /* See infrun.h. */
4501
4502 void
4503 all_uis_on_sync_execution_starting (void)
4504 {
4505 SWITCH_THRU_ALL_UIS ()
4506 {
4507 if (current_ui->prompt_state == PROMPT_NEEDED)
4508 async_disable_stdin ();
4509 }
4510 }
4511
4512 /* A quit_handler callback installed while we're handling inferior
4513 events. */
4514
4515 static void
4516 infrun_quit_handler ()
4517 {
4518 if (target_terminal::is_ours ())
4519 {
4520 /* Do nothing.
4521
4522 default_quit_handler would throw a quit in this case, but if
4523 we're handling an event while we have the terminal, it means
4524 the target is running a background execution command, and
4525 thus when users press Ctrl-C, they're wanting to interrupt
4526 whatever command they were executing in the command line.
4527 E.g.:
4528
4529 (gdb) c&
4530 (gdb) foo bar whatever<ctrl-c>
4531
4532 That Ctrl-C should clear the input line, not interrupt event
4533 handling if it happens that the user types Ctrl-C at just the
4534 "wrong" time!
4535
4536 It's as-if background event handling was handled by a
4537 separate background thread.
4538
4539 To be clear, the Ctrl-C is not lost -- it will be processed
4540 by the next QUIT call once we're out of fetch_inferior_event
4541 again. */
4542 }
4543 else
4544 {
4545 if (check_quit_flag ())
4546 target_pass_ctrlc ();
4547 }
4548 }
4549
4550 /* Asynchronous version of wait_for_inferior. It is called by the
4551 event loop whenever a change of state is detected on the file
4552 descriptor corresponding to the target. It can be called more than
4553 once to complete a single execution command. In such cases we need
4554 to keep the state in a global variable ECSS. If it is the last time
4555 that this function is called for a single execution command, then
4556 report to the user that the inferior has stopped, and do the
4557 necessary cleanups. */
4558
4559 void
4560 fetch_inferior_event ()
4561 {
4562 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
4563
4564 execution_control_state ecs;
4565 int cmd_done = 0;
4566
4567 /* Events are always processed with the main UI as current UI. This
4568 way, warnings, debug output, etc. are always consistently sent to
4569 the main console. */
4570 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
4571
4572 /* Temporarily disable pagination. Otherwise, the user would be
4573 given an option to press 'q' to quit, which would cause an early
4574 exit and could leave GDB in a half-baked state. */
4575 scoped_restore save_pagination
4576 = make_scoped_restore (&pagination_enabled, false);
4577
4578 /* Install a quit handler that does nothing if we have the terminal
4579 (meaning the target is running a background execution command),
4580 so that Ctrl-C never interrupts GDB before the event is fully
4581 handled. */
4582 scoped_restore restore_quit_handler
4583 = make_scoped_restore (&quit_handler, infrun_quit_handler);
4584
4585 /* Make sure a SIGINT does not interrupt an extension language while
4586 we're handling an event. That could interrupt a Python unwinder
4587 or a Python observer or some such. A Ctrl-C should either be
4588 forwarded to the inferior if the inferior has the terminal, or,
4589 if GDB has the terminal, should interrupt the command the user is
4590 typing in the CLI. */
4591 scoped_disable_cooperative_sigint_handling restore_coop_sigint;
4592
4593 /* End up with readline processing input, if necessary. */
4594 {
4595 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
4596
4597 /* We're handling a live event, so make sure we're doing live
4598 debugging. If we're looking at traceframes while the target is
4599 running, we're going to need to get back to that mode after
4600 handling the event. */
4601 std::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
4602 if (non_stop)
4603 {
4604 maybe_restore_traceframe.emplace ();
4605 set_current_traceframe (-1);
4606 }
4607
4608 /* The user/frontend should not notice a thread switch due to
4609 internal events. Make sure we revert to the user selected
4610 thread and frame after handling the event and running any
4611 breakpoint commands. */
4612 scoped_restore_current_thread restore_thread;
4613
4614 overlay_cache_invalid = 1;
4615 /* Flush target cache before starting to handle each event. Target
4616 was running and cache could be stale. This is just a heuristic.
4617 Running threads may modify target memory, but we don't get any
4618 event. */
4619 target_dcache_invalidate (current_program_space->aspace);
4620
4621 scoped_restore save_exec_dir
4622 = make_scoped_restore (&execution_direction,
4623 target_execution_direction ());
4624
4625 /* Allow targets to pause their resumed threads while we handle
4626 the event. */
4627 scoped_disable_commit_resumed disable_commit_resumed ("handling event");
4628
4629 if (!do_target_wait (&ecs, TARGET_WNOHANG))
4630 {
4631 infrun_debug_printf ("do_target_wait returned no event");
4632 disable_commit_resumed.reset_and_commit ();
4633 return;
4634 }
4635
4636 gdb_assert (ecs.ws.kind () != TARGET_WAITKIND_IGNORE);
4637
4638 /* Switch to the inferior that generated the event, so we can do
4639 target calls. If the event was not associated to a ptid, */
4640 if (ecs.ptid != null_ptid
4641 && ecs.ptid != minus_one_ptid)
4642 switch_to_inferior_no_thread (find_inferior_ptid (ecs.target, ecs.ptid));
4643 else
4644 switch_to_target_no_thread (ecs.target);
4645
4646 if (debug_infrun)
4647 print_target_wait_results (minus_one_ptid, ecs.ptid, ecs.ws);
4648
4649 /* If an error happens while handling the event, propagate GDB's
4650 knowledge of the executing state to the frontend/user running
4651 state. */
4652 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs.ptid;
4653 scoped_finish_thread_state finish_state (ecs.target, finish_ptid);
4654
4655 /* Get executed before scoped_restore_current_thread above to apply
4656 still for the thread which has thrown the exception. */
4657 auto defer_bpstat_clear
4658 = make_scope_exit (bpstat_clear_actions);
4659 auto defer_delete_threads
4660 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
4661
4662 int stop_id = get_stop_id ();
4663
4664 /* Now figure out what to do with the result of the result. */
4665 handle_inferior_event (&ecs);
4666
4667 if (!ecs.wait_some_more)
4668 {
4669 struct inferior *inf = find_inferior_ptid (ecs.target, ecs.ptid);
4670 bool should_stop = true;
4671 struct thread_info *thr = ecs.event_thread;
4672
4673 delete_just_stopped_threads_infrun_breakpoints ();
4674
4675 if (thr != nullptr && thr->thread_fsm () != nullptr)
4676 should_stop = thr->thread_fsm ()->should_stop (thr);
4677
4678 if (!should_stop)
4679 {
4680 keep_going (&ecs);
4681 }
4682 else
4683 {
4684 bool should_notify_stop = true;
4685 bool proceeded = false;
4686
4687 stop_all_threads_if_all_stop_mode ();
4688
4689 clean_up_just_stopped_threads_fsms (&ecs);
4690
4691 if (stop_id != get_stop_id ())
4692 {
4693 /* If the stop-id has changed then a stop has already been
4694 presented to the user in handle_inferior_event, this is
4695 likely a failed inferior call. As the stop has already
4696 been announced then we should not notify again.
4697
4698 Also, if the prompt state is not PROMPT_NEEDED then GDB
4699 will not be ready for user input after this function. */
4700 should_notify_stop = false;
4701 gdb_assert (current_ui->prompt_state == PROMPT_NEEDED);
4702 }
4703 else if (thr != nullptr && thr->thread_fsm () != nullptr)
4704 should_notify_stop
4705 = thr->thread_fsm ()->should_notify_stop ();
4706
4707 if (should_notify_stop)
4708 {
4709 /* We may not find an inferior if this was a process exit. */
4710 if (inf == nullptr || inf->control.stop_soon == NO_STOP_QUIETLY)
4711 proceeded = normal_stop ();
4712 }
4713
4714 if (!proceeded)
4715 {
4716 inferior_event_handler (INF_EXEC_COMPLETE);
4717 cmd_done = 1;
4718 }
4719
4720 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4721 previously selected thread is gone. We have two
4722 choices - switch to no thread selected, or restore the
4723 previously selected thread (now exited). We chose the
4724 later, just because that's what GDB used to do. After
4725 this, "info threads" says "The current thread <Thread
4726 ID 2> has terminated." instead of "No thread
4727 selected.". */
4728 if (!non_stop
4729 && cmd_done
4730 && ecs.ws.kind () != TARGET_WAITKIND_NO_RESUMED)
4731 restore_thread.dont_restore ();
4732 }
4733 }
4734
4735 defer_delete_threads.release ();
4736 defer_bpstat_clear.release ();
4737
4738 /* No error, don't finish the thread states yet. */
4739 finish_state.release ();
4740
4741 disable_commit_resumed.reset_and_commit ();
4742
4743 /* This scope is used to ensure that readline callbacks are
4744 reinstalled here. */
4745 }
4746
4747 /* Handling this event might have caused some inferiors to become prunable.
4748 For example, the exit of an inferior that was automatically added. Try
4749 to get rid of them. Keeping those around slows down things linearly.
4750
4751 Note that this never removes the current inferior. Therefore, call this
4752 after RESTORE_THREAD went out of scope, in case the event inferior (which was
4753 temporarily made the current inferior) is meant to be deleted.
4754
4755 Call this before all_uis_check_sync_execution_done, so that notifications about
4756 removed inferiors appear before the prompt. */
4757 prune_inferiors ();
4758
4759 /* If a UI was in sync execution mode, and now isn't, restore its
4760 prompt (a synchronous execution command has finished, and we're
4761 ready for input). */
4762 all_uis_check_sync_execution_done ();
4763
4764 if (cmd_done
4765 && exec_done_display_p
4766 && (inferior_ptid == null_ptid
4767 || inferior_thread ()->state != THREAD_RUNNING))
4768 gdb_printf (_("completed.\n"));
4769 }
4770
4771 /* See infrun.h. */
4772
4773 void
4774 set_step_info (thread_info *tp, const frame_info_ptr &frame,
4775 struct symtab_and_line sal)
4776 {
4777 /* This can be removed once this function no longer implicitly relies on the
4778 inferior_ptid value. */
4779 gdb_assert (inferior_ptid == tp->ptid);
4780
4781 tp->control.step_frame_id = get_frame_id (frame);
4782 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
4783
4784 tp->current_symtab = sal.symtab;
4785 tp->current_line = sal.line;
4786
4787 infrun_debug_printf
4788 ("symtab = %s, line = %d, step_frame_id = %s, step_stack_frame_id = %s",
4789 tp->current_symtab != nullptr ? tp->current_symtab->filename : "<null>",
4790 tp->current_line,
4791 tp->control.step_frame_id.to_string ().c_str (),
4792 tp->control.step_stack_frame_id.to_string ().c_str ());
4793 }
4794
4795 /* Clear context switchable stepping state. */
4796
4797 void
4798 init_thread_stepping_state (struct thread_info *tss)
4799 {
4800 tss->stepped_breakpoint = 0;
4801 tss->stepping_over_breakpoint = 0;
4802 tss->stepping_over_watchpoint = 0;
4803 tss->step_after_step_resume_breakpoint = 0;
4804 }
4805
4806 /* See infrun.h. */
4807
4808 void
4809 set_last_target_status (process_stratum_target *target, ptid_t ptid,
4810 const target_waitstatus &status)
4811 {
4812 target_last_proc_target = target;
4813 target_last_wait_ptid = ptid;
4814 target_last_waitstatus = status;
4815 }
4816
4817 /* See infrun.h. */
4818
4819 void
4820 get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4821 target_waitstatus *status)
4822 {
4823 if (target != nullptr)
4824 *target = target_last_proc_target;
4825 if (ptid != nullptr)
4826 *ptid = target_last_wait_ptid;
4827 if (status != nullptr)
4828 *status = target_last_waitstatus;
4829 }
4830
4831 /* See infrun.h. */
4832
4833 void
4834 nullify_last_target_wait_ptid (void)
4835 {
4836 target_last_proc_target = nullptr;
4837 target_last_wait_ptid = minus_one_ptid;
4838 target_last_waitstatus = {};
4839 }
4840
4841 /* Switch thread contexts. */
4842
4843 static void
4844 context_switch (execution_control_state *ecs)
4845 {
4846 if (ecs->ptid != inferior_ptid
4847 && (inferior_ptid == null_ptid
4848 || ecs->event_thread != inferior_thread ()))
4849 {
4850 infrun_debug_printf ("Switching context from %s to %s",
4851 inferior_ptid.to_string ().c_str (),
4852 ecs->ptid.to_string ().c_str ());
4853 }
4854
4855 switch_to_thread (ecs->event_thread);
4856 }
4857
4858 /* If the target can't tell whether we've hit breakpoints
4859 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4860 check whether that could have been caused by a breakpoint. If so,
4861 adjust the PC, per gdbarch_decr_pc_after_break. */
4862
4863 static void
4864 adjust_pc_after_break (struct thread_info *thread,
4865 const target_waitstatus &ws)
4866 {
4867 struct regcache *regcache;
4868 struct gdbarch *gdbarch;
4869 CORE_ADDR breakpoint_pc, decr_pc;
4870
4871 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4872 we aren't, just return.
4873
4874 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4875 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4876 implemented by software breakpoints should be handled through the normal
4877 breakpoint layer.
4878
4879 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4880 different signals (SIGILL or SIGEMT for instance), but it is less
4881 clear where the PC is pointing afterwards. It may not match
4882 gdbarch_decr_pc_after_break. I don't know any specific target that
4883 generates these signals at breakpoints (the code has been in GDB since at
4884 least 1992) so I can not guess how to handle them here.
4885
4886 In earlier versions of GDB, a target with
4887 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4888 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4889 target with both of these set in GDB history, and it seems unlikely to be
4890 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4891
4892 if (ws.kind () != TARGET_WAITKIND_STOPPED)
4893 return;
4894
4895 if (ws.sig () != GDB_SIGNAL_TRAP)
4896 return;
4897
4898 /* In reverse execution, when a breakpoint is hit, the instruction
4899 under it has already been de-executed. The reported PC always
4900 points at the breakpoint address, so adjusting it further would
4901 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4902 architecture:
4903
4904 B1 0x08000000 : INSN1
4905 B2 0x08000001 : INSN2
4906 0x08000002 : INSN3
4907 PC -> 0x08000003 : INSN4
4908
4909 Say you're stopped at 0x08000003 as above. Reverse continuing
4910 from that point should hit B2 as below. Reading the PC when the
4911 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4912 been de-executed already.
4913
4914 B1 0x08000000 : INSN1
4915 B2 PC -> 0x08000001 : INSN2
4916 0x08000002 : INSN3
4917 0x08000003 : INSN4
4918
4919 We can't apply the same logic as for forward execution, because
4920 we would wrongly adjust the PC to 0x08000000, since there's a
4921 breakpoint at PC - 1. We'd then report a hit on B1, although
4922 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4923 behaviour. */
4924 if (execution_direction == EXEC_REVERSE)
4925 return;
4926
4927 /* If the target can tell whether the thread hit a SW breakpoint,
4928 trust it. Targets that can tell also adjust the PC
4929 themselves. */
4930 if (target_supports_stopped_by_sw_breakpoint ())
4931 return;
4932
4933 /* Note that relying on whether a breakpoint is planted in memory to
4934 determine this can fail. E.g,. the breakpoint could have been
4935 removed since. Or the thread could have been told to step an
4936 instruction the size of a breakpoint instruction, and only
4937 _after_ was a breakpoint inserted at its address. */
4938
4939 /* If this target does not decrement the PC after breakpoints, then
4940 we have nothing to do. */
4941 regcache = get_thread_regcache (thread);
4942 gdbarch = regcache->arch ();
4943
4944 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4945 if (decr_pc == 0)
4946 return;
4947
4948 const address_space *aspace = thread->inf->aspace.get ();
4949
4950 /* Find the location where (if we've hit a breakpoint) the
4951 breakpoint would be. */
4952 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
4953
4954 /* If the target can't tell whether a software breakpoint triggered,
4955 fallback to figuring it out based on breakpoints we think were
4956 inserted in the target, and on whether the thread was stepped or
4957 continued. */
4958
4959 /* Check whether there actually is a software breakpoint inserted at
4960 that location.
4961
4962 If in non-stop mode, a race condition is possible where we've
4963 removed a breakpoint, but stop events for that breakpoint were
4964 already queued and arrive later. To suppress those spurious
4965 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
4966 and retire them after a number of stop events are reported. Note
4967 this is an heuristic and can thus get confused. The real fix is
4968 to get the "stopped by SW BP and needs adjustment" info out of
4969 the target/kernel (and thus never reach here; see above). */
4970 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
4971 || (target_is_non_stop_p ()
4972 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
4973 {
4974 std::optional<scoped_restore_tmpl<int>> restore_operation_disable;
4975
4976 if (record_full_is_used ())
4977 restore_operation_disable.emplace
4978 (record_full_gdb_operation_disable_set ());
4979
4980 /* When using hardware single-step, a SIGTRAP is reported for both
4981 a completed single-step and a software breakpoint. Need to
4982 differentiate between the two, as the latter needs adjusting
4983 but the former does not.
4984
4985 The SIGTRAP can be due to a completed hardware single-step only if
4986 - we didn't insert software single-step breakpoints
4987 - this thread is currently being stepped
4988
4989 If any of these events did not occur, we must have stopped due
4990 to hitting a software breakpoint, and have to back up to the
4991 breakpoint address.
4992
4993 As a special case, we could have hardware single-stepped a
4994 software breakpoint. In this case (prev_pc == breakpoint_pc),
4995 we also need to back up to the breakpoint address. */
4996
4997 if (thread_has_single_step_breakpoints_set (thread)
4998 || !currently_stepping (thread)
4999 || (thread->stepped_breakpoint
5000 && thread->prev_pc == breakpoint_pc))
5001 regcache_write_pc (regcache, breakpoint_pc);
5002 }
5003 }
5004
5005 static bool
5006 stepped_in_from (const frame_info_ptr &initial_frame, frame_id step_frame_id)
5007 {
5008 frame_info_ptr frame = initial_frame;
5009
5010 for (frame = get_prev_frame (frame);
5011 frame != nullptr;
5012 frame = get_prev_frame (frame))
5013 {
5014 if (get_frame_id (frame) == step_frame_id)
5015 return true;
5016
5017 if (get_frame_type (frame) != INLINE_FRAME)
5018 break;
5019 }
5020
5021 return false;
5022 }
5023
5024 /* Look for an inline frame that is marked for skip.
5025 If PREV_FRAME is TRUE start at the previous frame,
5026 otherwise start at the current frame. Stop at the
5027 first non-inline frame, or at the frame where the
5028 step started. */
5029
5030 static bool
5031 inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
5032 {
5033 frame_info_ptr frame = get_current_frame ();
5034
5035 if (prev_frame)
5036 frame = get_prev_frame (frame);
5037
5038 for (; frame != nullptr; frame = get_prev_frame (frame))
5039 {
5040 const char *fn = nullptr;
5041 symtab_and_line sal;
5042 struct symbol *sym;
5043
5044 if (get_frame_id (frame) == tp->control.step_frame_id)
5045 break;
5046 if (get_frame_type (frame) != INLINE_FRAME)
5047 break;
5048
5049 sal = find_frame_sal (frame);
5050 sym = get_frame_function (frame);
5051
5052 if (sym != nullptr)
5053 fn = sym->print_name ();
5054
5055 if (sal.line != 0
5056 && function_name_is_marked_for_skip (fn, sal))
5057 return true;
5058 }
5059
5060 return false;
5061 }
5062
5063 /* If the event thread has the stop requested flag set, pretend it
5064 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
5065 target_stop). */
5066
5067 static bool
5068 handle_stop_requested (struct execution_control_state *ecs)
5069 {
5070 if (ecs->event_thread->stop_requested)
5071 {
5072 ecs->ws.set_stopped (GDB_SIGNAL_0);
5073 handle_signal_stop (ecs);
5074 return true;
5075 }
5076 return false;
5077 }
5078
5079 /* Auxiliary function that handles syscall entry/return events.
5080 It returns true if the inferior should keep going (and GDB
5081 should ignore the event), or false if the event deserves to be
5082 processed. */
5083
5084 static bool
5085 handle_syscall_event (struct execution_control_state *ecs)
5086 {
5087 struct regcache *regcache;
5088 int syscall_number;
5089
5090 context_switch (ecs);
5091
5092 regcache = get_thread_regcache (ecs->event_thread);
5093 syscall_number = ecs->ws.syscall_number ();
5094 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
5095
5096 if (catch_syscall_enabled ()
5097 && catching_syscall_number (syscall_number))
5098 {
5099 infrun_debug_printf ("syscall number=%d", syscall_number);
5100
5101 ecs->event_thread->control.stop_bpstat
5102 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
5103 ecs->event_thread->stop_pc (),
5104 ecs->event_thread, ecs->ws);
5105
5106 if (handle_stop_requested (ecs))
5107 return false;
5108
5109 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5110 {
5111 /* Catchpoint hit. */
5112 return false;
5113 }
5114 }
5115
5116 if (handle_stop_requested (ecs))
5117 return false;
5118
5119 /* If no catchpoint triggered for this, then keep going. */
5120 keep_going (ecs);
5121
5122 return true;
5123 }
5124
5125 /* Lazily fill in the execution_control_state's stop_func_* fields. */
5126
5127 static void
5128 fill_in_stop_func (struct gdbarch *gdbarch,
5129 struct execution_control_state *ecs)
5130 {
5131 if (!ecs->stop_func_filled_in)
5132 {
5133 const block *block;
5134 const general_symbol_info *gsi;
5135
5136 /* Don't care about return value; stop_func_start and stop_func_name
5137 will both be 0 if it doesn't work. */
5138 find_pc_partial_function_sym (ecs->event_thread->stop_pc (),
5139 &gsi,
5140 &ecs->stop_func_start,
5141 &ecs->stop_func_end,
5142 &block);
5143 ecs->stop_func_name = gsi == nullptr ? nullptr : gsi->print_name ();
5144
5145 /* The call to find_pc_partial_function, above, will set
5146 stop_func_start and stop_func_end to the start and end
5147 of the range containing the stop pc. If this range
5148 contains the entry pc for the block (which is always the
5149 case for contiguous blocks), advance stop_func_start past
5150 the function's start offset and entrypoint. Note that
5151 stop_func_start is NOT advanced when in a range of a
5152 non-contiguous block that does not contain the entry pc. */
5153 if (block != nullptr
5154 && ecs->stop_func_start <= block->entry_pc ()
5155 && block->entry_pc () < ecs->stop_func_end)
5156 {
5157 ecs->stop_func_start
5158 += gdbarch_deprecated_function_start_offset (gdbarch);
5159
5160 /* PowerPC functions have a Local Entry Point (LEP) and a Global
5161 Entry Point (GEP). There is only one Entry Point (GEP = LEP) for
5162 other architectures. */
5163 ecs->stop_func_alt_start = ecs->stop_func_start;
5164
5165 if (gdbarch_skip_entrypoint_p (gdbarch))
5166 ecs->stop_func_start
5167 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
5168 }
5169
5170 ecs->stop_func_filled_in = 1;
5171 }
5172 }
5173
5174
5175 /* Return the STOP_SOON field of the inferior pointed at by ECS. */
5176
5177 static enum stop_kind
5178 get_inferior_stop_soon (execution_control_state *ecs)
5179 {
5180 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5181
5182 gdb_assert (inf != nullptr);
5183 return inf->control.stop_soon;
5184 }
5185
5186 /* Poll for one event out of the current target. Store the resulting
5187 waitstatus in WS, and return the event ptid. Does not block. */
5188
5189 static ptid_t
5190 poll_one_curr_target (struct target_waitstatus *ws)
5191 {
5192 ptid_t event_ptid;
5193
5194 overlay_cache_invalid = 1;
5195
5196 /* Flush target cache before starting to handle each event.
5197 Target was running and cache could be stale. This is just a
5198 heuristic. Running threads may modify target memory, but we
5199 don't get any event. */
5200 target_dcache_invalidate (current_program_space->aspace);
5201
5202 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
5203
5204 if (debug_infrun)
5205 print_target_wait_results (minus_one_ptid, event_ptid, *ws);
5206
5207 return event_ptid;
5208 }
5209
5210 /* Wait for one event out of any target. */
5211
5212 static wait_one_event
5213 wait_one ()
5214 {
5215 while (1)
5216 {
5217 for (inferior *inf : all_inferiors ())
5218 {
5219 process_stratum_target *target = inf->process_target ();
5220 if (target == nullptr
5221 || !target->is_async_p ()
5222 || !target->threads_executing)
5223 continue;
5224
5225 switch_to_inferior_no_thread (inf);
5226
5227 wait_one_event event;
5228 event.target = target;
5229 event.ptid = poll_one_curr_target (&event.ws);
5230
5231 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5232 {
5233 /* If nothing is resumed, remove the target from the
5234 event loop. */
5235 target_async (false);
5236 }
5237 else if (event.ws.kind () != TARGET_WAITKIND_IGNORE)
5238 return event;
5239 }
5240
5241 /* Block waiting for some event. */
5242
5243 fd_set readfds;
5244 int nfds = 0;
5245
5246 FD_ZERO (&readfds);
5247
5248 for (inferior *inf : all_inferiors ())
5249 {
5250 process_stratum_target *target = inf->process_target ();
5251 if (target == nullptr
5252 || !target->is_async_p ()
5253 || !target->threads_executing)
5254 continue;
5255
5256 int fd = target->async_wait_fd ();
5257 FD_SET (fd, &readfds);
5258 if (nfds <= fd)
5259 nfds = fd + 1;
5260 }
5261
5262 if (nfds == 0)
5263 {
5264 /* No waitable targets left. All must be stopped. */
5265 infrun_debug_printf ("no waitable targets left");
5266
5267 target_waitstatus ws;
5268 ws.set_no_resumed ();
5269 return {nullptr, minus_one_ptid, std::move (ws)};
5270 }
5271
5272 QUIT;
5273
5274 int numfds = interruptible_select (nfds, &readfds, 0, nullptr, 0);
5275 if (numfds < 0)
5276 {
5277 if (errno == EINTR)
5278 continue;
5279 else
5280 perror_with_name ("interruptible_select");
5281 }
5282 }
5283 }
5284
5285 /* Save the thread's event and stop reason to process it later. */
5286
5287 static void
5288 save_waitstatus (struct thread_info *tp, const target_waitstatus &ws)
5289 {
5290 infrun_debug_printf ("saving status %s for %s",
5291 ws.to_string ().c_str (),
5292 tp->ptid.to_string ().c_str ());
5293
5294 /* Record for later. */
5295 tp->set_pending_waitstatus (ws);
5296
5297 if (ws.kind () == TARGET_WAITKIND_STOPPED
5298 && ws.sig () == GDB_SIGNAL_TRAP)
5299 {
5300 struct regcache *regcache = get_thread_regcache (tp);
5301 const address_space *aspace = tp->inf->aspace.get ();
5302 CORE_ADDR pc = regcache_read_pc (regcache);
5303
5304 adjust_pc_after_break (tp, tp->pending_waitstatus ());
5305
5306 scoped_restore_current_thread restore_thread;
5307 switch_to_thread (tp);
5308
5309 if (target_stopped_by_watchpoint ())
5310 tp->set_stop_reason (TARGET_STOPPED_BY_WATCHPOINT);
5311 else if (target_supports_stopped_by_sw_breakpoint ()
5312 && target_stopped_by_sw_breakpoint ())
5313 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
5314 else if (target_supports_stopped_by_hw_breakpoint ()
5315 && target_stopped_by_hw_breakpoint ())
5316 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
5317 else if (!target_supports_stopped_by_hw_breakpoint ()
5318 && hardware_breakpoint_inserted_here_p (aspace, pc))
5319 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
5320 else if (!target_supports_stopped_by_sw_breakpoint ()
5321 && software_breakpoint_inserted_here_p (aspace, pc))
5322 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
5323 else if (!thread_has_single_step_breakpoints_set (tp)
5324 && currently_stepping (tp))
5325 tp->set_stop_reason (TARGET_STOPPED_BY_SINGLE_STEP);
5326 }
5327 }
5328
5329 /* Mark the non-executing threads accordingly. In all-stop, all
5330 threads of all processes are stopped when we get any event
5331 reported. In non-stop mode, only the event thread stops. */
5332
5333 static void
5334 mark_non_executing_threads (process_stratum_target *target,
5335 ptid_t event_ptid,
5336 const target_waitstatus &ws)
5337 {
5338 ptid_t mark_ptid;
5339
5340 if (!target_is_non_stop_p ())
5341 mark_ptid = minus_one_ptid;
5342 else if (ws.kind () == TARGET_WAITKIND_SIGNALLED
5343 || ws.kind () == TARGET_WAITKIND_EXITED)
5344 {
5345 /* If we're handling a process exit in non-stop mode, even
5346 though threads haven't been deleted yet, one would think
5347 that there is nothing to do, as threads of the dead process
5348 will be soon deleted, and threads of any other process were
5349 left running. However, on some targets, threads survive a
5350 process exit event. E.g., for the "checkpoint" command,
5351 when the current checkpoint/fork exits, linux-fork.c
5352 automatically switches to another fork from within
5353 target_mourn_inferior, by associating the same
5354 inferior/thread to another fork. We haven't mourned yet at
5355 this point, but we must mark any threads left in the
5356 process as not-executing so that finish_thread_state marks
5357 them stopped (in the user's perspective) if/when we present
5358 the stop to the user. */
5359 mark_ptid = ptid_t (event_ptid.pid ());
5360 }
5361 else
5362 mark_ptid = event_ptid;
5363
5364 set_executing (target, mark_ptid, false);
5365
5366 /* Likewise the resumed flag. */
5367 set_resumed (target, mark_ptid, false);
5368 }
5369
5370 /* Handle one event after stopping threads. If the eventing thread
5371 reports back any interesting event, we leave it pending. If the
5372 eventing thread was in the middle of a displaced step, we
5373 cancel/finish it, and unless the thread's inferior is being
5374 detached, put the thread back in the step-over chain. Returns true
5375 if there are no resumed threads left in the target (thus there's no
5376 point in waiting further), false otherwise. */
5377
5378 static bool
5379 handle_one (const wait_one_event &event)
5380 {
5381 infrun_debug_printf
5382 ("%s %s", event.ws.to_string ().c_str (),
5383 event.ptid.to_string ().c_str ());
5384
5385 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5386 {
5387 /* All resumed threads exited. */
5388 return true;
5389 }
5390 else if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED
5391 || event.ws.kind () == TARGET_WAITKIND_EXITED
5392 || event.ws.kind () == TARGET_WAITKIND_SIGNALLED)
5393 {
5394 /* One thread/process exited/signalled. */
5395
5396 thread_info *t = nullptr;
5397
5398 /* The target may have reported just a pid. If so, try
5399 the first non-exited thread. */
5400 if (event.ptid.is_pid ())
5401 {
5402 int pid = event.ptid.pid ();
5403 inferior *inf = find_inferior_pid (event.target, pid);
5404 for (thread_info *tp : inf->non_exited_threads ())
5405 {
5406 t = tp;
5407 break;
5408 }
5409
5410 /* If there is no available thread, the event would
5411 have to be appended to a per-inferior event list,
5412 which does not exist (and if it did, we'd have
5413 to adjust run control command to be able to
5414 resume such an inferior). We assert here instead
5415 of going into an infinite loop. */
5416 gdb_assert (t != nullptr);
5417
5418 infrun_debug_printf
5419 ("using %s", t->ptid.to_string ().c_str ());
5420 }
5421 else
5422 {
5423 t = event.target->find_thread (event.ptid);
5424 /* Check if this is the first time we see this thread.
5425 Don't bother adding if it individually exited. */
5426 if (t == nullptr
5427 && event.ws.kind () != TARGET_WAITKIND_THREAD_EXITED)
5428 t = add_thread (event.target, event.ptid);
5429 }
5430
5431 if (t != nullptr)
5432 {
5433 /* Set the threads as non-executing to avoid
5434 another stop attempt on them. */
5435 switch_to_thread_no_regs (t);
5436 mark_non_executing_threads (event.target, event.ptid,
5437 event.ws);
5438 save_waitstatus (t, event.ws);
5439 t->stop_requested = false;
5440
5441 if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED)
5442 {
5443 if (displaced_step_finish (t, event.ws)
5444 != DISPLACED_STEP_FINISH_STATUS_OK)
5445 {
5446 gdb_assert_not_reached ("displaced_step_finish on "
5447 "exited thread failed");
5448 }
5449 }
5450 }
5451 }
5452 else
5453 {
5454 thread_info *t = event.target->find_thread (event.ptid);
5455 if (t == nullptr)
5456 t = add_thread (event.target, event.ptid);
5457
5458 t->stop_requested = 0;
5459 t->set_executing (false);
5460 t->set_resumed (false);
5461 t->control.may_range_step = 0;
5462
5463 /* This may be the first time we see the inferior report
5464 a stop. */
5465 if (t->inf->needs_setup)
5466 {
5467 switch_to_thread_no_regs (t);
5468 setup_inferior (0);
5469 }
5470
5471 if (event.ws.kind () == TARGET_WAITKIND_STOPPED
5472 && event.ws.sig () == GDB_SIGNAL_0)
5473 {
5474 /* We caught the event that we intended to catch, so
5475 there's no event to save as pending. */
5476
5477 if (displaced_step_finish (t, event.ws)
5478 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5479 {
5480 /* Add it back to the step-over queue. */
5481 infrun_debug_printf
5482 ("displaced-step of %s canceled",
5483 t->ptid.to_string ().c_str ());
5484
5485 t->control.trap_expected = 0;
5486 if (!t->inf->detaching)
5487 global_thread_step_over_chain_enqueue (t);
5488 }
5489 }
5490 else
5491 {
5492 struct regcache *regcache;
5493
5494 infrun_debug_printf
5495 ("target_wait %s, saving status for %s",
5496 event.ws.to_string ().c_str (),
5497 t->ptid.to_string ().c_str ());
5498
5499 /* Record for later. */
5500 save_waitstatus (t, event.ws);
5501
5502 if (displaced_step_finish (t, event.ws)
5503 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5504 {
5505 /* Add it back to the step-over queue. */
5506 t->control.trap_expected = 0;
5507 if (!t->inf->detaching)
5508 global_thread_step_over_chain_enqueue (t);
5509 }
5510
5511 regcache = get_thread_regcache (t);
5512 t->set_stop_pc (regcache_read_pc (regcache));
5513
5514 infrun_debug_printf ("saved stop_pc=%s for %s "
5515 "(currently_stepping=%d)",
5516 paddress (current_inferior ()->arch (),
5517 t->stop_pc ()),
5518 t->ptid.to_string ().c_str (),
5519 currently_stepping (t));
5520 }
5521 }
5522
5523 return false;
5524 }
5525
5526 /* Helper for stop_all_threads. wait_one waits for events until it
5527 sees a TARGET_WAITKIND_NO_RESUMED event. When it sees one, it
5528 disables target_async for the target to stop waiting for events
5529 from it. TARGET_WAITKIND_NO_RESUMED can be delayed though,
5530 consider, debugging against gdbserver:
5531
5532 #1 - Threads 1-5 are running, and thread 1 hits a breakpoint.
5533
5534 #2 - gdb processes the breakpoint hit for thread 1, stops all
5535 threads, and steps thread 1 over the breakpoint. while
5536 stopping threads, some other threads reported interesting
5537 events, which were left pending in the thread's objects
5538 (infrun's queue).
5539
5540 #2 - Thread 1 exits (it stepped an exit syscall), and gdbserver
5541 reports the thread exit for thread 1. The event ends up in
5542 remote's stop reply queue.
5543
5544 #3 - That was the last resumed thread, so gdbserver reports
5545 no-resumed, and that event also ends up in remote's stop
5546 reply queue, queued after the thread exit from #2.
5547
5548 #4 - gdb processes the thread exit event, which finishes the
5549 step-over, and so gdb restarts all threads (threads with
5550 pending events are left marked resumed, but aren't set
5551 executing). The no-resumed event is still left pending in
5552 the remote stop reply queue.
5553
5554 #5 - Since there are now resumed threads with pending breakpoint
5555 hits, gdb picks one at random to process next.
5556
5557 #5 - gdb picks the breakpoint hit for thread 2 this time, and that
5558 breakpoint also needs to be stepped over, so gdb stops all
5559 threads again.
5560
5561 #6 - stop_all_threads counts number of expected stops and calls
5562 wait_one once for each.
5563
5564 #7 - The first wait_one call collects the no-resumed event from #3
5565 above.
5566
5567 #9 - Seeing the no-resumed event, wait_one disables target async
5568 for the remote target, to stop waiting for events from it.
5569 wait_one from here on always return no-resumed directly
5570 without reaching the target.
5571
5572 #10 - stop_all_threads still hasn't seen all the stops it expects,
5573 so it does another pass.
5574
5575 #11 - Since the remote target is not async (disabled in #9),
5576 wait_one doesn't wait on it, so it won't see the expected
5577 stops, and instead returns no-resumed directly.
5578
5579 #12 - stop_all_threads still haven't seen all the stops, so it
5580 does another pass. goto #11, looping forever.
5581
5582 To handle this, we explicitly (re-)enable target async on all
5583 targets that can async every time stop_all_threads goes wait for
5584 the expected stops. */
5585
5586 static void
5587 reenable_target_async ()
5588 {
5589 for (inferior *inf : all_inferiors ())
5590 {
5591 process_stratum_target *target = inf->process_target ();
5592 if (target != nullptr
5593 && target->threads_executing
5594 && target->can_async_p ()
5595 && !target->is_async_p ())
5596 {
5597 switch_to_inferior_no_thread (inf);
5598 target_async (1);
5599 }
5600 }
5601 }
5602
5603 /* See infrun.h. */
5604
5605 void
5606 stop_all_threads (const char *reason, inferior *inf)
5607 {
5608 /* We may need multiple passes to discover all threads. */
5609 int pass;
5610 int iterations = 0;
5611
5612 gdb_assert (exists_non_stop_target ());
5613
5614 INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason,
5615 inf != nullptr ? inf->num : -1);
5616
5617 infrun_debug_show_threads ("non-exited threads",
5618 all_non_exited_threads ());
5619
5620 scoped_restore_current_thread restore_thread;
5621
5622 /* Enable thread events on relevant targets. */
5623 for (auto *target : all_non_exited_process_targets ())
5624 {
5625 if (inf != nullptr && inf->process_target () != target)
5626 continue;
5627
5628 switch_to_target_no_thread (target);
5629 target_thread_events (true);
5630 }
5631
5632 SCOPE_EXIT
5633 {
5634 /* Disable thread events on relevant targets. */
5635 for (auto *target : all_non_exited_process_targets ())
5636 {
5637 if (inf != nullptr && inf->process_target () != target)
5638 continue;
5639
5640 switch_to_target_no_thread (target);
5641 target_thread_events (false);
5642 }
5643
5644 /* Use debug_prefixed_printf directly to get a meaningful function
5645 name. */
5646 if (debug_infrun)
5647 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
5648 };
5649
5650 /* Request threads to stop, and then wait for the stops. Because
5651 threads we already know about can spawn more threads while we're
5652 trying to stop them, and we only learn about new threads when we
5653 update the thread list, do this in a loop, and keep iterating
5654 until two passes find no threads that need to be stopped. */
5655 for (pass = 0; pass < 2; pass++, iterations++)
5656 {
5657 infrun_debug_printf ("pass=%d, iterations=%d", pass, iterations);
5658 while (1)
5659 {
5660 int waits_needed = 0;
5661
5662 for (auto *target : all_non_exited_process_targets ())
5663 {
5664 if (inf != nullptr && inf->process_target () != target)
5665 continue;
5666
5667 switch_to_target_no_thread (target);
5668 update_thread_list ();
5669 }
5670
5671 /* Go through all threads looking for threads that we need
5672 to tell the target to stop. */
5673 for (thread_info *t : all_non_exited_threads ())
5674 {
5675 if (inf != nullptr && t->inf != inf)
5676 continue;
5677
5678 /* For a single-target setting with an all-stop target,
5679 we would not even arrive here. For a multi-target
5680 setting, until GDB is able to handle a mixture of
5681 all-stop and non-stop targets, simply skip all-stop
5682 targets' threads. This should be fine due to the
5683 protection of 'check_multi_target_resumption'. */
5684
5685 switch_to_thread_no_regs (t);
5686 if (!target_is_non_stop_p ())
5687 continue;
5688
5689 if (t->executing ())
5690 {
5691 /* If already stopping, don't request a stop again.
5692 We just haven't seen the notification yet. */
5693 if (!t->stop_requested)
5694 {
5695 infrun_debug_printf (" %s executing, need stop",
5696 t->ptid.to_string ().c_str ());
5697 target_stop (t->ptid);
5698 t->stop_requested = 1;
5699 }
5700 else
5701 {
5702 infrun_debug_printf (" %s executing, already stopping",
5703 t->ptid.to_string ().c_str ());
5704 }
5705
5706 if (t->stop_requested)
5707 waits_needed++;
5708 }
5709 else
5710 {
5711 infrun_debug_printf (" %s not executing",
5712 t->ptid.to_string ().c_str ());
5713
5714 /* The thread may be not executing, but still be
5715 resumed with a pending status to process. */
5716 t->set_resumed (false);
5717 }
5718 }
5719
5720 if (waits_needed == 0)
5721 break;
5722
5723 /* If we find new threads on the second iteration, restart
5724 over. We want to see two iterations in a row with all
5725 threads stopped. */
5726 if (pass > 0)
5727 pass = -1;
5728
5729 reenable_target_async ();
5730
5731 for (int i = 0; i < waits_needed; i++)
5732 {
5733 wait_one_event event = wait_one ();
5734 if (handle_one (event))
5735 break;
5736 }
5737 }
5738 }
5739 }
5740
5741 /* Handle a TARGET_WAITKIND_NO_RESUMED event. Return true if we
5742 handled the event and should continue waiting. Return false if we
5743 should stop and report the event to the user. */
5744
5745 static bool
5746 handle_no_resumed (struct execution_control_state *ecs)
5747 {
5748 if (target_can_async_p ())
5749 {
5750 bool any_sync = false;
5751
5752 for (ui *ui : all_uis ())
5753 {
5754 if (ui->prompt_state == PROMPT_BLOCKED)
5755 {
5756 any_sync = true;
5757 break;
5758 }
5759 }
5760 if (!any_sync)
5761 {
5762 /* There were no unwaited-for children left in the target, but,
5763 we're not synchronously waiting for events either. Just
5764 ignore. */
5765
5766 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
5767 prepare_to_wait (ecs);
5768 return true;
5769 }
5770 }
5771
5772 /* Otherwise, if we were running a synchronous execution command, we
5773 may need to cancel it and give the user back the terminal.
5774
5775 In non-stop mode, the target can't tell whether we've already
5776 consumed previous stop events, so it can end up sending us a
5777 no-resumed event like so:
5778
5779 #0 - thread 1 is left stopped
5780
5781 #1 - thread 2 is resumed and hits breakpoint
5782 -> TARGET_WAITKIND_STOPPED
5783
5784 #2 - thread 3 is resumed and exits
5785 this is the last resumed thread, so
5786 -> TARGET_WAITKIND_NO_RESUMED
5787
5788 #3 - gdb processes stop for thread 2 and decides to re-resume
5789 it.
5790
5791 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
5792 thread 2 is now resumed, so the event should be ignored.
5793
5794 IOW, if the stop for thread 2 doesn't end a foreground command,
5795 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5796 event. But it could be that the event meant that thread 2 itself
5797 (or whatever other thread was the last resumed thread) exited.
5798
5799 To address this we refresh the thread list and check whether we
5800 have resumed threads _now_. In the example above, this removes
5801 thread 3 from the thread list. If thread 2 was re-resumed, we
5802 ignore this event. If we find no thread resumed, then we cancel
5803 the synchronous command and show "no unwaited-for " to the
5804 user. */
5805
5806 inferior *curr_inf = current_inferior ();
5807
5808 scoped_restore_current_thread restore_thread;
5809 update_thread_list ();
5810
5811 /* If:
5812
5813 - the current target has no thread executing, and
5814 - the current inferior is native, and
5815 - the current inferior is the one which has the terminal, and
5816 - we did nothing,
5817
5818 then a Ctrl-C from this point on would remain stuck in the
5819 kernel, until a thread resumes and dequeues it. That would
5820 result in the GDB CLI not reacting to Ctrl-C, not able to
5821 interrupt the program. To address this, if the current inferior
5822 no longer has any thread executing, we give the terminal to some
5823 other inferior that has at least one thread executing. */
5824 bool swap_terminal = true;
5825
5826 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5827 whether to report it to the user. */
5828 bool ignore_event = false;
5829
5830 for (thread_info *thread : all_non_exited_threads ())
5831 {
5832 if (swap_terminal && thread->executing ())
5833 {
5834 if (thread->inf != curr_inf)
5835 {
5836 target_terminal::ours ();
5837
5838 switch_to_thread (thread);
5839 target_terminal::inferior ();
5840 }
5841 swap_terminal = false;
5842 }
5843
5844 if (!ignore_event && thread->resumed ())
5845 {
5846 /* Either there were no unwaited-for children left in the
5847 target at some point, but there are now, or some target
5848 other than the eventing one has unwaited-for children
5849 left. Just ignore. */
5850 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5851 "(ignoring: found resumed)");
5852
5853 ignore_event = true;
5854 }
5855
5856 if (ignore_event && !swap_terminal)
5857 break;
5858 }
5859
5860 if (ignore_event)
5861 {
5862 switch_to_inferior_no_thread (curr_inf);
5863 prepare_to_wait (ecs);
5864 return true;
5865 }
5866
5867 /* Go ahead and report the event. */
5868 return false;
5869 }
5870
5871 /* Handle a TARGET_WAITKIND_THREAD_EXITED event. Return true if we
5872 handled the event and should continue waiting. Return false if we
5873 should stop and report the event to the user. */
5874
5875 static bool
5876 handle_thread_exited (execution_control_state *ecs)
5877 {
5878 context_switch (ecs);
5879
5880 /* Clear these so we don't re-start the thread stepping over a
5881 breakpoint/watchpoint. */
5882 ecs->event_thread->stepping_over_breakpoint = 0;
5883 ecs->event_thread->stepping_over_watchpoint = 0;
5884
5885 /* If the thread had an FSM, then abort the command. But only after
5886 finishing the step over, as in non-stop mode, aborting this
5887 thread's command should not interfere with other threads. We
5888 must check this before finish_step over, however, which may
5889 update the thread list and delete the event thread. */
5890 bool abort_cmd = (ecs->event_thread->thread_fsm () != nullptr);
5891
5892 /* Mark the thread exited right now, because finish_step_over may
5893 update the thread list and that may delete the thread silently
5894 (depending on target), while we always want to emit the "[Thread
5895 ... exited]" notification. Don't actually delete the thread yet,
5896 because we need to pass its pointer down to finish_step_over. */
5897 set_thread_exited (ecs->event_thread);
5898
5899 /* Maybe the thread was doing a step-over, if so release
5900 resources and start any further pending step-overs.
5901
5902 If we are on a non-stop target and the thread was doing an
5903 in-line step, this also restarts the other threads. */
5904 int ret = finish_step_over (ecs);
5905
5906 /* finish_step_over returns true if it moves ecs' wait status
5907 back into the thread, so that we go handle another pending
5908 event before this one. But we know it never does that if
5909 the event thread has exited. */
5910 gdb_assert (ret == 0);
5911
5912 if (abort_cmd)
5913 {
5914 /* We're stopping for the thread exit event. Switch to the
5915 event thread again, as finish_step_over may have switched
5916 threads. */
5917 switch_to_thread (ecs->event_thread);
5918 ecs->event_thread = nullptr;
5919 return false;
5920 }
5921
5922 /* If finish_step_over started a new in-line step-over, don't
5923 try to restart anything else. */
5924 if (step_over_info_valid_p ())
5925 {
5926 delete_thread (ecs->event_thread);
5927 return true;
5928 }
5929
5930 /* Maybe we are on an all-stop target and we got this event
5931 while doing a step-like command on another thread. If so,
5932 go back to doing that. If this thread was stepping,
5933 switch_back_to_stepped_thread will consider that the thread
5934 was interrupted mid-step and will try keep stepping it. We
5935 don't want that, the thread is gone. So clear the proceed
5936 status so it doesn't do that. */
5937 clear_proceed_status_thread (ecs->event_thread);
5938 if (switch_back_to_stepped_thread (ecs))
5939 {
5940 delete_thread (ecs->event_thread);
5941 return true;
5942 }
5943
5944 inferior *inf = ecs->event_thread->inf;
5945 bool slock_applies = schedlock_applies (ecs->event_thread);
5946
5947 delete_thread (ecs->event_thread);
5948 ecs->event_thread = nullptr;
5949
5950 /* Continue handling the event as if we had gotten a
5951 TARGET_WAITKIND_NO_RESUMED. */
5952 auto handle_as_no_resumed = [ecs] ()
5953 {
5954 /* handle_no_resumed doesn't really look at the event kind, but
5955 normal_stop does. */
5956 ecs->ws.set_no_resumed ();
5957 ecs->event_thread = nullptr;
5958 ecs->ptid = minus_one_ptid;
5959
5960 /* Re-record the last target status. */
5961 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
5962
5963 return handle_no_resumed (ecs);
5964 };
5965
5966 /* If we are on an all-stop target, the target has stopped all
5967 threads to report the event. We don't actually want to
5968 stop, so restart the threads. */
5969 if (!target_is_non_stop_p ())
5970 {
5971 if (slock_applies)
5972 {
5973 /* Since the target is !non-stop, then everything is stopped
5974 at this point, and we can't assume we'll get further
5975 events until we resume the target again. Handle this
5976 event like if it were a TARGET_WAITKIND_NO_RESUMED. Note
5977 this refreshes the thread list and checks whether there
5978 are other resumed threads before deciding whether to
5979 print "no-unwaited-for left". This is important because
5980 the user could have done:
5981
5982 (gdb) set scheduler-locking on
5983 (gdb) thread 1
5984 (gdb) c&
5985 (gdb) thread 2
5986 (gdb) c
5987
5988 ... and only one of the threads exited. */
5989 return handle_as_no_resumed ();
5990 }
5991 else
5992 {
5993 /* Switch to the first non-exited thread we can find, and
5994 resume. */
5995 auto range = inf->non_exited_threads ();
5996 if (range.begin () == range.end ())
5997 {
5998 /* Looks like the target reported a
5999 TARGET_WAITKIND_THREAD_EXITED for its last known
6000 thread. */
6001 return handle_as_no_resumed ();
6002 }
6003 thread_info *non_exited_thread = *range.begin ();
6004 switch_to_thread (non_exited_thread);
6005 insert_breakpoints ();
6006 resume (GDB_SIGNAL_0);
6007 }
6008 }
6009
6010 prepare_to_wait (ecs);
6011 return true;
6012 }
6013
6014 /* Given an execution control state that has been freshly filled in by
6015 an event from the inferior, figure out what it means and take
6016 appropriate action.
6017
6018 The alternatives are:
6019
6020 1) stop_waiting and return; to really stop and return to the
6021 debugger.
6022
6023 2) keep_going and return; to wait for the next event (set
6024 ecs->event_thread->stepping_over_breakpoint to 1 to single step
6025 once). */
6026
6027 static void
6028 handle_inferior_event (struct execution_control_state *ecs)
6029 {
6030 /* Make sure that all temporary struct value objects that were
6031 created during the handling of the event get deleted at the
6032 end. */
6033 scoped_value_mark free_values;
6034
6035 infrun_debug_printf ("%s", ecs->ws.to_string ().c_str ());
6036
6037 if (ecs->ws.kind () == TARGET_WAITKIND_IGNORE)
6038 {
6039 /* We had an event in the inferior, but we are not interested in
6040 handling it at this level. The lower layers have already
6041 done what needs to be done, if anything.
6042
6043 One of the possible circumstances for this is when the
6044 inferior produces output for the console. The inferior has
6045 not stopped, and we are ignoring the event. Another possible
6046 circumstance is any event which the lower level knows will be
6047 reported multiple times without an intervening resume. */
6048 prepare_to_wait (ecs);
6049 return;
6050 }
6051
6052 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED
6053 && handle_no_resumed (ecs))
6054 return;
6055
6056 /* Cache the last target/ptid/waitstatus. */
6057 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
6058
6059 /* Always clear state belonging to the previous time we stopped. */
6060 stop_stack_dummy = STOP_NONE;
6061
6062 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED)
6063 {
6064 /* No unwaited-for children left. IOW, all resumed children
6065 have exited. */
6066 stop_waiting (ecs);
6067 return;
6068 }
6069
6070 if (ecs->ws.kind () != TARGET_WAITKIND_EXITED
6071 && ecs->ws.kind () != TARGET_WAITKIND_SIGNALLED)
6072 {
6073 ecs->event_thread = ecs->target->find_thread (ecs->ptid);
6074 /* If it's a new thread, add it to the thread database. */
6075 if (ecs->event_thread == nullptr)
6076 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
6077
6078 /* Disable range stepping. If the next step request could use a
6079 range, this will be end up re-enabled then. */
6080 ecs->event_thread->control.may_range_step = 0;
6081 }
6082
6083 /* Dependent on valid ECS->EVENT_THREAD. */
6084 adjust_pc_after_break (ecs->event_thread, ecs->ws);
6085
6086 /* Dependent on the current PC value modified by adjust_pc_after_break. */
6087 reinit_frame_cache ();
6088
6089 breakpoint_retire_moribund ();
6090
6091 /* First, distinguish signals caused by the debugger from signals
6092 that have to do with the program's own actions. Note that
6093 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
6094 on the operating system version. Here we detect when a SIGILL or
6095 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
6096 something similar for SIGSEGV, since a SIGSEGV will be generated
6097 when we're trying to execute a breakpoint instruction on a
6098 non-executable stack. This happens for call dummy breakpoints
6099 for architectures like SPARC that place call dummies on the
6100 stack. */
6101 if (ecs->ws.kind () == TARGET_WAITKIND_STOPPED
6102 && (ecs->ws.sig () == GDB_SIGNAL_ILL
6103 || ecs->ws.sig () == GDB_SIGNAL_SEGV
6104 || ecs->ws.sig () == GDB_SIGNAL_EMT))
6105 {
6106 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
6107
6108 if (breakpoint_inserted_here_p (ecs->event_thread->inf->aspace.get (),
6109 regcache_read_pc (regcache)))
6110 {
6111 infrun_debug_printf ("Treating signal as SIGTRAP");
6112 ecs->ws.set_stopped (GDB_SIGNAL_TRAP);
6113 }
6114 }
6115
6116 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
6117
6118 switch (ecs->ws.kind ())
6119 {
6120 case TARGET_WAITKIND_LOADED:
6121 {
6122 context_switch (ecs);
6123 /* Ignore gracefully during startup of the inferior, as it might
6124 be the shell which has just loaded some objects, otherwise
6125 add the symbols for the newly loaded objects. Also ignore at
6126 the beginning of an attach or remote session; we will query
6127 the full list of libraries once the connection is
6128 established. */
6129
6130 stop_kind stop_soon = get_inferior_stop_soon (ecs);
6131 if (stop_soon == NO_STOP_QUIETLY)
6132 {
6133 struct regcache *regcache;
6134
6135 regcache = get_thread_regcache (ecs->event_thread);
6136
6137 handle_solib_event ();
6138
6139 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
6140 address_space *aspace = ecs->event_thread->inf->aspace.get ();
6141 ecs->event_thread->control.stop_bpstat
6142 = bpstat_stop_status_nowatch (aspace,
6143 ecs->event_thread->stop_pc (),
6144 ecs->event_thread, ecs->ws);
6145
6146 if (handle_stop_requested (ecs))
6147 return;
6148
6149 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6150 {
6151 /* A catchpoint triggered. */
6152 process_event_stop_test (ecs);
6153 return;
6154 }
6155
6156 /* If requested, stop when the dynamic linker notifies
6157 gdb of events. This allows the user to get control
6158 and place breakpoints in initializer routines for
6159 dynamically loaded objects (among other things). */
6160 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6161 if (stop_on_solib_events)
6162 {
6163 /* Make sure we print "Stopped due to solib-event" in
6164 normal_stop. */
6165 stop_print_frame = true;
6166
6167 stop_waiting (ecs);
6168 return;
6169 }
6170 }
6171
6172 /* If we are skipping through a shell, or through shared library
6173 loading that we aren't interested in, resume the program. If
6174 we're running the program normally, also resume. */
6175 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
6176 {
6177 /* Loading of shared libraries might have changed breakpoint
6178 addresses. Make sure new breakpoints are inserted. */
6179 if (stop_soon == NO_STOP_QUIETLY)
6180 insert_breakpoints ();
6181 resume (GDB_SIGNAL_0);
6182 prepare_to_wait (ecs);
6183 return;
6184 }
6185
6186 /* But stop if we're attaching or setting up a remote
6187 connection. */
6188 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6189 || stop_soon == STOP_QUIETLY_REMOTE)
6190 {
6191 infrun_debug_printf ("quietly stopped");
6192 stop_waiting (ecs);
6193 return;
6194 }
6195
6196 internal_error (_("unhandled stop_soon: %d"), (int) stop_soon);
6197 }
6198
6199 case TARGET_WAITKIND_SPURIOUS:
6200 if (handle_stop_requested (ecs))
6201 return;
6202 context_switch (ecs);
6203 resume (GDB_SIGNAL_0);
6204 prepare_to_wait (ecs);
6205 return;
6206
6207 case TARGET_WAITKIND_THREAD_CREATED:
6208 if (handle_stop_requested (ecs))
6209 return;
6210 context_switch (ecs);
6211 if (!switch_back_to_stepped_thread (ecs))
6212 keep_going (ecs);
6213 return;
6214
6215 case TARGET_WAITKIND_THREAD_EXITED:
6216 if (handle_thread_exited (ecs))
6217 return;
6218 stop_waiting (ecs);
6219 break;
6220
6221 case TARGET_WAITKIND_EXITED:
6222 case TARGET_WAITKIND_SIGNALLED:
6223 {
6224 /* Depending on the system, ecs->ptid may point to a thread or
6225 to a process. On some targets, target_mourn_inferior may
6226 need to have access to the just-exited thread. That is the
6227 case of GNU/Linux's "checkpoint" support, for example.
6228 Call the switch_to_xxx routine as appropriate. */
6229 thread_info *thr = ecs->target->find_thread (ecs->ptid);
6230 if (thr != nullptr)
6231 switch_to_thread (thr);
6232 else
6233 {
6234 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
6235 switch_to_inferior_no_thread (inf);
6236 }
6237 }
6238 handle_vfork_child_exec_or_exit (0);
6239 target_terminal::ours (); /* Must do this before mourn anyway. */
6240
6241 /* Clearing any previous state of convenience variables. */
6242 clear_exit_convenience_vars ();
6243
6244 if (ecs->ws.kind () == TARGET_WAITKIND_EXITED)
6245 {
6246 /* Record the exit code in the convenience variable $_exitcode, so
6247 that the user can inspect this again later. */
6248 set_internalvar_integer (lookup_internalvar ("_exitcode"),
6249 (LONGEST) ecs->ws.exit_status ());
6250
6251 /* Also record this in the inferior itself. */
6252 current_inferior ()->has_exit_code = true;
6253 current_inferior ()->exit_code = (LONGEST) ecs->ws.exit_status ();
6254
6255 /* Support the --return-child-result option. */
6256 return_child_result_value = ecs->ws.exit_status ();
6257
6258 interps_notify_exited (ecs->ws.exit_status ());
6259 }
6260 else
6261 {
6262 struct gdbarch *gdbarch = current_inferior ()->arch ();
6263
6264 if (gdbarch_gdb_signal_to_target_p (gdbarch))
6265 {
6266 /* Set the value of the internal variable $_exitsignal,
6267 which holds the signal uncaught by the inferior. */
6268 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
6269 gdbarch_gdb_signal_to_target (gdbarch,
6270 ecs->ws.sig ()));
6271 }
6272 else
6273 {
6274 /* We don't have access to the target's method used for
6275 converting between signal numbers (GDB's internal
6276 representation <-> target's representation).
6277 Therefore, we cannot do a good job at displaying this
6278 information to the user. It's better to just warn
6279 her about it (if infrun debugging is enabled), and
6280 give up. */
6281 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
6282 "signal number.");
6283 }
6284
6285 interps_notify_signal_exited (ecs->ws.sig ());
6286 }
6287
6288 gdb_flush (gdb_stdout);
6289 target_mourn_inferior (inferior_ptid);
6290 stop_print_frame = false;
6291 stop_waiting (ecs);
6292 return;
6293
6294 case TARGET_WAITKIND_FORKED:
6295 case TARGET_WAITKIND_VFORKED:
6296 case TARGET_WAITKIND_THREAD_CLONED:
6297
6298 displaced_step_finish (ecs->event_thread, ecs->ws);
6299
6300 /* Start a new step-over in another thread if there's one that
6301 needs it. */
6302 start_step_over ();
6303
6304 context_switch (ecs);
6305
6306 /* Immediately detach breakpoints from the child before there's
6307 any chance of letting the user delete breakpoints from the
6308 breakpoint lists. If we don't do this early, it's easy to
6309 leave left over traps in the child, vis: "break foo; catch
6310 fork; c; <fork>; del; c; <child calls foo>". We only follow
6311 the fork on the last `continue', and by that time the
6312 breakpoint at "foo" is long gone from the breakpoint table.
6313 If we vforked, then we don't need to unpatch here, since both
6314 parent and child are sharing the same memory pages; we'll
6315 need to unpatch at follow/detach time instead to be certain
6316 that new breakpoints added between catchpoint hit time and
6317 vfork follow are detached. */
6318 if (ecs->ws.kind () == TARGET_WAITKIND_FORKED)
6319 {
6320 /* This won't actually modify the breakpoint list, but will
6321 physically remove the breakpoints from the child. */
6322 detach_breakpoints (ecs->ws.child_ptid ());
6323 }
6324
6325 delete_just_stopped_threads_single_step_breakpoints ();
6326
6327 /* In case the event is caught by a catchpoint, remember that
6328 the event is to be followed at the next resume of the thread,
6329 and not immediately. */
6330 ecs->event_thread->pending_follow = ecs->ws;
6331
6332 ecs->event_thread->set_stop_pc
6333 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6334
6335 ecs->event_thread->control.stop_bpstat
6336 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
6337 ecs->event_thread->stop_pc (),
6338 ecs->event_thread, ecs->ws);
6339
6340 if (handle_stop_requested (ecs))
6341 return;
6342
6343 /* If no catchpoint triggered for this, then keep going. Note
6344 that we're interested in knowing the bpstat actually causes a
6345 stop, not just if it may explain the signal. Software
6346 watchpoints, for example, always appear in the bpstat. */
6347 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6348 {
6349 bool follow_child
6350 = (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6351 && follow_fork_mode_string == follow_fork_mode_child);
6352
6353 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6354
6355 process_stratum_target *targ
6356 = ecs->event_thread->inf->process_target ();
6357
6358 bool should_resume;
6359 if (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED)
6360 should_resume = follow_fork ();
6361 else
6362 {
6363 should_resume = true;
6364 inferior *inf = ecs->event_thread->inf;
6365 inf->top_target ()->follow_clone (ecs->ws.child_ptid ());
6366 ecs->event_thread->pending_follow.set_spurious ();
6367 }
6368
6369 /* Note that one of these may be an invalid pointer,
6370 depending on detach_fork. */
6371 thread_info *parent = ecs->event_thread;
6372 thread_info *child = targ->find_thread (ecs->ws.child_ptid ());
6373
6374 /* At this point, the parent is marked running, and the
6375 child is marked stopped. */
6376
6377 /* If not resuming the parent, mark it stopped. */
6378 if (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6379 && follow_child && !detach_fork && !non_stop && !sched_multi)
6380 parent->set_running (false);
6381
6382 /* If resuming the child, mark it running. */
6383 if ((ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED
6384 && !schedlock_applies (ecs->event_thread))
6385 || (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6386 && (follow_child
6387 || (!detach_fork && (non_stop || sched_multi)))))
6388 child->set_running (true);
6389
6390 /* In non-stop mode, also resume the other branch. */
6391 if ((ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED
6392 && target_is_non_stop_p ()
6393 && !schedlock_applies (ecs->event_thread))
6394 || (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6395 && (!detach_fork && (non_stop
6396 || (sched_multi
6397 && target_is_non_stop_p ())))))
6398 {
6399 if (follow_child)
6400 switch_to_thread (parent);
6401 else
6402 switch_to_thread (child);
6403
6404 ecs->event_thread = inferior_thread ();
6405 ecs->ptid = inferior_ptid;
6406 keep_going (ecs);
6407 }
6408
6409 if (follow_child)
6410 switch_to_thread (child);
6411 else
6412 switch_to_thread (parent);
6413
6414 ecs->event_thread = inferior_thread ();
6415 ecs->ptid = inferior_ptid;
6416
6417 if (should_resume)
6418 {
6419 /* Never call switch_back_to_stepped_thread if we are waiting for
6420 vfork-done (waiting for an external vfork child to exec or
6421 exit). We will resume only the vforking thread for the purpose
6422 of collecting the vfork-done event, and we will restart any
6423 step once the critical shared address space window is done. */
6424 if ((!follow_child
6425 && detach_fork
6426 && parent->inf->thread_waiting_for_vfork_done != nullptr)
6427 || !switch_back_to_stepped_thread (ecs))
6428 keep_going (ecs);
6429 }
6430 else
6431 stop_waiting (ecs);
6432 return;
6433 }
6434 process_event_stop_test (ecs);
6435 return;
6436
6437 case TARGET_WAITKIND_VFORK_DONE:
6438 /* Done with the shared memory region. Re-insert breakpoints in
6439 the parent, and keep going. */
6440
6441 context_switch (ecs);
6442
6443 handle_vfork_done (ecs->event_thread);
6444 gdb_assert (inferior_thread () == ecs->event_thread);
6445
6446 if (handle_stop_requested (ecs))
6447 return;
6448
6449 if (!switch_back_to_stepped_thread (ecs))
6450 {
6451 gdb_assert (inferior_thread () == ecs->event_thread);
6452 /* This also takes care of reinserting breakpoints in the
6453 previously locked inferior. */
6454 keep_going (ecs);
6455 }
6456 return;
6457
6458 case TARGET_WAITKIND_EXECD:
6459
6460 /* Note we can't read registers yet (the stop_pc), because we
6461 don't yet know the inferior's post-exec architecture.
6462 'stop_pc' is explicitly read below instead. */
6463 switch_to_thread_no_regs (ecs->event_thread);
6464
6465 /* Do whatever is necessary to the parent branch of the vfork. */
6466 handle_vfork_child_exec_or_exit (1);
6467
6468 /* This causes the eventpoints and symbol table to be reset.
6469 Must do this now, before trying to determine whether to
6470 stop. */
6471 follow_exec (inferior_ptid, ecs->ws.execd_pathname ());
6472
6473 /* In follow_exec we may have deleted the original thread and
6474 created a new one. Make sure that the event thread is the
6475 execd thread for that case (this is a nop otherwise). */
6476 ecs->event_thread = inferior_thread ();
6477
6478 ecs->event_thread->set_stop_pc
6479 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6480
6481 ecs->event_thread->control.stop_bpstat
6482 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
6483 ecs->event_thread->stop_pc (),
6484 ecs->event_thread, ecs->ws);
6485
6486 if (handle_stop_requested (ecs))
6487 return;
6488
6489 /* If no catchpoint triggered for this, then keep going. */
6490 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6491 {
6492 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6493 keep_going (ecs);
6494 return;
6495 }
6496 process_event_stop_test (ecs);
6497 return;
6498
6499 /* Be careful not to try to gather much state about a thread
6500 that's in a syscall. It's frequently a losing proposition. */
6501 case TARGET_WAITKIND_SYSCALL_ENTRY:
6502 /* Getting the current syscall number. */
6503 if (handle_syscall_event (ecs) == 0)
6504 process_event_stop_test (ecs);
6505 return;
6506
6507 /* Before examining the threads further, step this thread to
6508 get it entirely out of the syscall. (We get notice of the
6509 event when the thread is just on the verge of exiting a
6510 syscall. Stepping one instruction seems to get it back
6511 into user code.) */
6512 case TARGET_WAITKIND_SYSCALL_RETURN:
6513 if (handle_syscall_event (ecs) == 0)
6514 process_event_stop_test (ecs);
6515 return;
6516
6517 case TARGET_WAITKIND_STOPPED:
6518 handle_signal_stop (ecs);
6519 return;
6520
6521 case TARGET_WAITKIND_NO_HISTORY:
6522 /* Reverse execution: target ran out of history info. */
6523
6524 /* Switch to the stopped thread. */
6525 context_switch (ecs);
6526 infrun_debug_printf ("stopped");
6527
6528 delete_just_stopped_threads_single_step_breakpoints ();
6529 ecs->event_thread->set_stop_pc
6530 (regcache_read_pc (get_thread_regcache (inferior_thread ())));
6531
6532 if (handle_stop_requested (ecs))
6533 return;
6534
6535 interps_notify_no_history ();
6536 stop_waiting (ecs);
6537 return;
6538 }
6539 }
6540
6541 /* Restart threads back to what they were trying to do back when we
6542 paused them (because of an in-line step-over or vfork, for example).
6543 The EVENT_THREAD thread is ignored (not restarted).
6544
6545 If INF is non-nullptr, only resume threads from INF. */
6546
6547 static void
6548 restart_threads (struct thread_info *event_thread, inferior *inf)
6549 {
6550 INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d",
6551 event_thread->ptid.to_string ().c_str (),
6552 inf != nullptr ? inf->num : -1);
6553
6554 gdb_assert (!step_over_info_valid_p ());
6555
6556 /* In case the instruction just stepped spawned a new thread. */
6557 update_thread_list ();
6558
6559 for (thread_info *tp : all_non_exited_threads ())
6560 {
6561 if (inf != nullptr && tp->inf != inf)
6562 continue;
6563
6564 if (tp->inf->detaching)
6565 {
6566 infrun_debug_printf ("restart threads: [%s] inferior detaching",
6567 tp->ptid.to_string ().c_str ());
6568 continue;
6569 }
6570
6571 switch_to_thread_no_regs (tp);
6572
6573 if (tp == event_thread)
6574 {
6575 infrun_debug_printf ("restart threads: [%s] is event thread",
6576 tp->ptid.to_string ().c_str ());
6577 continue;
6578 }
6579
6580 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
6581 {
6582 infrun_debug_printf ("restart threads: [%s] not meant to be running",
6583 tp->ptid.to_string ().c_str ());
6584 continue;
6585 }
6586
6587 if (tp->resumed ())
6588 {
6589 infrun_debug_printf ("restart threads: [%s] resumed",
6590 tp->ptid.to_string ().c_str ());
6591 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
6592 continue;
6593 }
6594
6595 if (thread_is_in_step_over_chain (tp))
6596 {
6597 infrun_debug_printf ("restart threads: [%s] needs step-over",
6598 tp->ptid.to_string ().c_str ());
6599 gdb_assert (!tp->resumed ());
6600 continue;
6601 }
6602
6603
6604 if (tp->has_pending_waitstatus ())
6605 {
6606 infrun_debug_printf ("restart threads: [%s] has pending status",
6607 tp->ptid.to_string ().c_str ());
6608 tp->set_resumed (true);
6609 continue;
6610 }
6611
6612 gdb_assert (!tp->stop_requested);
6613
6614 /* If some thread needs to start a step-over at this point, it
6615 should still be in the step-over queue, and thus skipped
6616 above. */
6617 if (thread_still_needs_step_over (tp))
6618 {
6619 internal_error ("thread [%s] needs a step-over, but not in "
6620 "step-over queue\n",
6621 tp->ptid.to_string ().c_str ());
6622 }
6623
6624 if (currently_stepping (tp))
6625 {
6626 infrun_debug_printf ("restart threads: [%s] was stepping",
6627 tp->ptid.to_string ().c_str ());
6628 keep_going_stepped_thread (tp);
6629 }
6630 else
6631 {
6632 infrun_debug_printf ("restart threads: [%s] continuing",
6633 tp->ptid.to_string ().c_str ());
6634 execution_control_state ecs (tp);
6635 switch_to_thread (tp);
6636 keep_going_pass_signal (&ecs);
6637 }
6638 }
6639 }
6640
6641 /* Callback for iterate_over_threads. Find a resumed thread that has
6642 a pending waitstatus. */
6643
6644 static int
6645 resumed_thread_with_pending_status (struct thread_info *tp,
6646 void *arg)
6647 {
6648 return tp->resumed () && tp->has_pending_waitstatus ();
6649 }
6650
6651 /* Called when we get an event that may finish an in-line or
6652 out-of-line (displaced stepping) step-over started previously.
6653 Return true if the event is processed and we should go back to the
6654 event loop; false if the caller should continue processing the
6655 event. */
6656
6657 static int
6658 finish_step_over (struct execution_control_state *ecs)
6659 {
6660 displaced_step_finish (ecs->event_thread, ecs->ws);
6661
6662 bool had_step_over_info = step_over_info_valid_p ();
6663
6664 if (had_step_over_info)
6665 {
6666 /* If we're stepping over a breakpoint with all threads locked,
6667 then only the thread that was stepped should be reporting
6668 back an event. */
6669 gdb_assert (ecs->event_thread->control.trap_expected);
6670
6671 update_thread_events_after_step_over (ecs->event_thread, ecs->ws);
6672
6673 clear_step_over_info ();
6674 }
6675
6676 if (!target_is_non_stop_p ())
6677 return 0;
6678
6679 /* Start a new step-over in another thread if there's one that
6680 needs it. */
6681 start_step_over ();
6682
6683 /* If we were stepping over a breakpoint before, and haven't started
6684 a new in-line step-over sequence, then restart all other threads
6685 (except the event thread). We can't do this in all-stop, as then
6686 e.g., we wouldn't be able to issue any other remote packet until
6687 these other threads stop. */
6688 if (had_step_over_info && !step_over_info_valid_p ())
6689 {
6690 struct thread_info *pending;
6691
6692 /* If we only have threads with pending statuses, the restart
6693 below won't restart any thread and so nothing re-inserts the
6694 breakpoint we just stepped over. But we need it inserted
6695 when we later process the pending events, otherwise if
6696 another thread has a pending event for this breakpoint too,
6697 we'd discard its event (because the breakpoint that
6698 originally caused the event was no longer inserted). */
6699 context_switch (ecs);
6700 insert_breakpoints ();
6701
6702 restart_threads (ecs->event_thread);
6703
6704 /* If we have events pending, go through handle_inferior_event
6705 again, picking up a pending event at random. This avoids
6706 thread starvation. */
6707
6708 /* But not if we just stepped over a watchpoint in order to let
6709 the instruction execute so we can evaluate its expression.
6710 The set of watchpoints that triggered is recorded in the
6711 breakpoint objects themselves (see bp->watchpoint_triggered).
6712 If we processed another event first, that other event could
6713 clobber this info. */
6714 if (ecs->event_thread->stepping_over_watchpoint)
6715 return 0;
6716
6717 /* The code below is meant to avoid one thread hogging the event
6718 loop by doing constant in-line step overs. If the stepping
6719 thread exited, there's no risk for this to happen, so we can
6720 safely let our caller process the event immediately. */
6721 if (ecs->ws.kind () == TARGET_WAITKIND_THREAD_EXITED)
6722 return 0;
6723
6724 pending = iterate_over_threads (resumed_thread_with_pending_status,
6725 nullptr);
6726 if (pending != nullptr)
6727 {
6728 struct thread_info *tp = ecs->event_thread;
6729 struct regcache *regcache;
6730
6731 infrun_debug_printf ("found resumed threads with "
6732 "pending events, saving status");
6733
6734 gdb_assert (pending != tp);
6735
6736 /* Record the event thread's event for later. */
6737 save_waitstatus (tp, ecs->ws);
6738 /* This was cleared early, by handle_inferior_event. Set it
6739 so this pending event is considered by
6740 do_target_wait. */
6741 tp->set_resumed (true);
6742
6743 gdb_assert (!tp->executing ());
6744
6745 regcache = get_thread_regcache (tp);
6746 tp->set_stop_pc (regcache_read_pc (regcache));
6747
6748 infrun_debug_printf ("saved stop_pc=%s for %s "
6749 "(currently_stepping=%d)",
6750 paddress (current_inferior ()->arch (),
6751 tp->stop_pc ()),
6752 tp->ptid.to_string ().c_str (),
6753 currently_stepping (tp));
6754
6755 /* This in-line step-over finished; clear this so we won't
6756 start a new one. This is what handle_signal_stop would
6757 do, if we returned false. */
6758 tp->stepping_over_breakpoint = 0;
6759
6760 /* Wake up the event loop again. */
6761 mark_async_event_handler (infrun_async_inferior_event_token);
6762
6763 prepare_to_wait (ecs);
6764 return 1;
6765 }
6766 }
6767
6768 return 0;
6769 }
6770
6771 /* See infrun.h. */
6772
6773 void
6774 notify_signal_received (gdb_signal sig)
6775 {
6776 interps_notify_signal_received (sig);
6777 gdb::observers::signal_received.notify (sig);
6778 }
6779
6780 /* See infrun.h. */
6781
6782 void
6783 notify_normal_stop (bpstat *bs, int print_frame)
6784 {
6785 interps_notify_normal_stop (bs, print_frame);
6786 gdb::observers::normal_stop.notify (bs, print_frame);
6787 }
6788
6789 /* See infrun.h. */
6790
6791 void notify_user_selected_context_changed (user_selected_what selection)
6792 {
6793 interps_notify_user_selected_context_changed (selection);
6794 gdb::observers::user_selected_context_changed.notify (selection);
6795 }
6796
6797 /* Come here when the program has stopped with a signal. */
6798
6799 static void
6800 handle_signal_stop (struct execution_control_state *ecs)
6801 {
6802 frame_info_ptr frame;
6803 struct gdbarch *gdbarch;
6804 int stopped_by_watchpoint;
6805 enum stop_kind stop_soon;
6806 int random_signal;
6807
6808 gdb_assert (ecs->ws.kind () == TARGET_WAITKIND_STOPPED);
6809
6810 ecs->event_thread->set_stop_signal (ecs->ws.sig ());
6811
6812 /* Do we need to clean up the state of a thread that has
6813 completed a displaced single-step? (Doing so usually affects
6814 the PC, so do it here, before we set stop_pc.) */
6815 if (finish_step_over (ecs))
6816 return;
6817
6818 /* If we either finished a single-step or hit a breakpoint, but
6819 the user wanted this thread to be stopped, pretend we got a
6820 SIG0 (generic unsignaled stop). */
6821 if (ecs->event_thread->stop_requested
6822 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6823 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6824
6825 ecs->event_thread->set_stop_pc
6826 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6827
6828 context_switch (ecs);
6829
6830 if (deprecated_context_hook)
6831 deprecated_context_hook (ecs->event_thread->global_num);
6832
6833 if (debug_infrun)
6834 {
6835 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
6836 struct gdbarch *reg_gdbarch = regcache->arch ();
6837
6838 infrun_debug_printf
6839 ("stop_pc=%s", paddress (reg_gdbarch, ecs->event_thread->stop_pc ()));
6840 if (target_stopped_by_watchpoint ())
6841 {
6842 CORE_ADDR addr;
6843
6844 infrun_debug_printf ("stopped by watchpoint");
6845
6846 if (target_stopped_data_address (current_inferior ()->top_target (),
6847 &addr))
6848 infrun_debug_printf ("stopped data address=%s",
6849 paddress (reg_gdbarch, addr));
6850 else
6851 infrun_debug_printf ("(no data address available)");
6852 }
6853 }
6854
6855 /* This is originated from start_remote(), start_inferior() and
6856 shared libraries hook functions. */
6857 stop_soon = get_inferior_stop_soon (ecs);
6858 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
6859 {
6860 infrun_debug_printf ("quietly stopped");
6861 stop_print_frame = true;
6862 stop_waiting (ecs);
6863 return;
6864 }
6865
6866 /* This originates from attach_command(). We need to overwrite
6867 the stop_signal here, because some kernels don't ignore a
6868 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6869 See more comments in inferior.h. On the other hand, if we
6870 get a non-SIGSTOP, report it to the user - assume the backend
6871 will handle the SIGSTOP if it should show up later.
6872
6873 Also consider that the attach is complete when we see a
6874 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6875 target extended-remote report it instead of a SIGSTOP
6876 (e.g. gdbserver). We already rely on SIGTRAP being our
6877 signal, so this is no exception.
6878
6879 Also consider that the attach is complete when we see a
6880 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6881 the target to stop all threads of the inferior, in case the
6882 low level attach operation doesn't stop them implicitly. If
6883 they weren't stopped implicitly, then the stub will report a
6884 GDB_SIGNAL_0, meaning: stopped for no particular reason
6885 other than GDB's request. */
6886 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6887 && (ecs->event_thread->stop_signal () == GDB_SIGNAL_STOP
6888 || ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6889 || ecs->event_thread->stop_signal () == GDB_SIGNAL_0))
6890 {
6891 stop_print_frame = true;
6892 stop_waiting (ecs);
6893 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6894 return;
6895 }
6896
6897 /* At this point, get hold of the now-current thread's frame. */
6898 frame = get_current_frame ();
6899 gdbarch = get_frame_arch (frame);
6900
6901 /* Pull the single step breakpoints out of the target. */
6902 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6903 {
6904 struct regcache *regcache;
6905 CORE_ADDR pc;
6906
6907 regcache = get_thread_regcache (ecs->event_thread);
6908 const address_space *aspace = ecs->event_thread->inf->aspace.get ();
6909
6910 pc = regcache_read_pc (regcache);
6911
6912 /* However, before doing so, if this single-step breakpoint was
6913 actually for another thread, set this thread up for moving
6914 past it. */
6915 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
6916 aspace, pc))
6917 {
6918 if (single_step_breakpoint_inserted_here_p (aspace, pc))
6919 {
6920 infrun_debug_printf ("[%s] hit another thread's single-step "
6921 "breakpoint",
6922 ecs->ptid.to_string ().c_str ());
6923 ecs->hit_singlestep_breakpoint = 1;
6924 }
6925 }
6926 else
6927 {
6928 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6929 ecs->ptid.to_string ().c_str ());
6930 }
6931 }
6932 delete_just_stopped_threads_single_step_breakpoints ();
6933
6934 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6935 && ecs->event_thread->control.trap_expected
6936 && ecs->event_thread->stepping_over_watchpoint)
6937 stopped_by_watchpoint = 0;
6938 else
6939 stopped_by_watchpoint = watchpoints_triggered (ecs->ws);
6940
6941 /* If necessary, step over this watchpoint. We'll be back to display
6942 it in a moment. */
6943 if (stopped_by_watchpoint
6944 && (target_have_steppable_watchpoint ()
6945 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
6946 {
6947 /* At this point, we are stopped at an instruction which has
6948 attempted to write to a piece of memory under control of
6949 a watchpoint. The instruction hasn't actually executed
6950 yet. If we were to evaluate the watchpoint expression
6951 now, we would get the old value, and therefore no change
6952 would seem to have occurred.
6953
6954 In order to make watchpoints work `right', we really need
6955 to complete the memory write, and then evaluate the
6956 watchpoint expression. We do this by single-stepping the
6957 target.
6958
6959 It may not be necessary to disable the watchpoint to step over
6960 it. For example, the PA can (with some kernel cooperation)
6961 single step over a watchpoint without disabling the watchpoint.
6962
6963 It is far more common to need to disable a watchpoint to step
6964 the inferior over it. If we have non-steppable watchpoints,
6965 we must disable the current watchpoint; it's simplest to
6966 disable all watchpoints.
6967
6968 Any breakpoint at PC must also be stepped over -- if there's
6969 one, it will have already triggered before the watchpoint
6970 triggered, and we either already reported it to the user, or
6971 it didn't cause a stop and we called keep_going. In either
6972 case, if there was a breakpoint at PC, we must be trying to
6973 step past it. */
6974 ecs->event_thread->stepping_over_watchpoint = 1;
6975 keep_going (ecs);
6976 return;
6977 }
6978
6979 ecs->event_thread->stepping_over_breakpoint = 0;
6980 ecs->event_thread->stepping_over_watchpoint = 0;
6981 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
6982 ecs->event_thread->control.stop_step = 0;
6983 stop_print_frame = true;
6984 stopped_by_random_signal = 0;
6985 bpstat *stop_chain = nullptr;
6986
6987 /* Hide inlined functions starting here, unless we just performed stepi or
6988 nexti. After stepi and nexti, always show the innermost frame (not any
6989 inline function call sites). */
6990 if (ecs->event_thread->control.step_range_end != 1)
6991 {
6992 const address_space *aspace = ecs->event_thread->inf->aspace.get ();
6993
6994 /* skip_inline_frames is expensive, so we avoid it if we can
6995 determine that the address is one where functions cannot have
6996 been inlined. This improves performance with inferiors that
6997 load a lot of shared libraries, because the solib event
6998 breakpoint is defined as the address of a function (i.e. not
6999 inline). Note that we have to check the previous PC as well
7000 as the current one to catch cases when we have just
7001 single-stepped off a breakpoint prior to reinstating it.
7002 Note that we're assuming that the code we single-step to is
7003 not inline, but that's not definitive: there's nothing
7004 preventing the event breakpoint function from containing
7005 inlined code, and the single-step ending up there. If the
7006 user had set a breakpoint on that inlined code, the missing
7007 skip_inline_frames call would break things. Fortunately
7008 that's an extremely unlikely scenario. */
7009 if (!pc_at_non_inline_function (aspace,
7010 ecs->event_thread->stop_pc (),
7011 ecs->ws)
7012 && !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7013 && ecs->event_thread->control.trap_expected
7014 && pc_at_non_inline_function (aspace,
7015 ecs->event_thread->prev_pc,
7016 ecs->ws)))
7017 {
7018 stop_chain = build_bpstat_chain (aspace,
7019 ecs->event_thread->stop_pc (),
7020 ecs->ws);
7021 skip_inline_frames (ecs->event_thread, stop_chain);
7022
7023 /* Re-fetch current thread's frame in case that invalidated
7024 the frame cache. */
7025 frame = get_current_frame ();
7026 gdbarch = get_frame_arch (frame);
7027 }
7028 }
7029
7030 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7031 && ecs->event_thread->control.trap_expected
7032 && gdbarch_single_step_through_delay_p (gdbarch)
7033 && currently_stepping (ecs->event_thread))
7034 {
7035 /* We're trying to step off a breakpoint. Turns out that we're
7036 also on an instruction that needs to be stepped multiple
7037 times before it's been fully executing. E.g., architectures
7038 with a delay slot. It needs to be stepped twice, once for
7039 the instruction and once for the delay slot. */
7040 int step_through_delay
7041 = gdbarch_single_step_through_delay (gdbarch, frame);
7042
7043 if (step_through_delay)
7044 infrun_debug_printf ("step through delay");
7045
7046 if (ecs->event_thread->control.step_range_end == 0
7047 && step_through_delay)
7048 {
7049 /* The user issued a continue when stopped at a breakpoint.
7050 Set up for another trap and get out of here. */
7051 ecs->event_thread->stepping_over_breakpoint = 1;
7052 keep_going (ecs);
7053 return;
7054 }
7055 else if (step_through_delay)
7056 {
7057 /* The user issued a step when stopped at a breakpoint.
7058 Maybe we should stop, maybe we should not - the delay
7059 slot *might* correspond to a line of source. In any
7060 case, don't decide that here, just set
7061 ecs->stepping_over_breakpoint, making sure we
7062 single-step again before breakpoints are re-inserted. */
7063 ecs->event_thread->stepping_over_breakpoint = 1;
7064 }
7065 }
7066
7067 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
7068 handles this event. */
7069 ecs->event_thread->control.stop_bpstat
7070 = bpstat_stop_status (ecs->event_thread->inf->aspace.get (),
7071 ecs->event_thread->stop_pc (),
7072 ecs->event_thread, ecs->ws, stop_chain);
7073
7074 /* Following in case break condition called a
7075 function. */
7076 stop_print_frame = true;
7077
7078 /* This is where we handle "moribund" watchpoints. Unlike
7079 software breakpoints traps, hardware watchpoint traps are
7080 always distinguishable from random traps. If no high-level
7081 watchpoint is associated with the reported stop data address
7082 anymore, then the bpstat does not explain the signal ---
7083 simply make sure to ignore it if `stopped_by_watchpoint' is
7084 set. */
7085
7086 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7087 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
7088 GDB_SIGNAL_TRAP)
7089 && stopped_by_watchpoint)
7090 {
7091 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
7092 "ignoring");
7093 }
7094
7095 /* NOTE: cagney/2003-03-29: These checks for a random signal
7096 at one stage in the past included checks for an inferior
7097 function call's call dummy's return breakpoint. The original
7098 comment, that went with the test, read:
7099
7100 ``End of a stack dummy. Some systems (e.g. Sony news) give
7101 another signal besides SIGTRAP, so check here as well as
7102 above.''
7103
7104 If someone ever tries to get call dummys on a
7105 non-executable stack to work (where the target would stop
7106 with something like a SIGSEGV), then those tests might need
7107 to be re-instated. Given, however, that the tests were only
7108 enabled when momentary breakpoints were not being used, I
7109 suspect that it won't be the case.
7110
7111 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
7112 be necessary for call dummies on a non-executable stack on
7113 SPARC. */
7114
7115 /* See if the breakpoints module can explain the signal. */
7116 random_signal
7117 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
7118 ecs->event_thread->stop_signal ());
7119
7120 /* Maybe this was a trap for a software breakpoint that has since
7121 been removed. */
7122 if (random_signal && target_stopped_by_sw_breakpoint ())
7123 {
7124 if (gdbarch_program_breakpoint_here_p (gdbarch,
7125 ecs->event_thread->stop_pc ()))
7126 {
7127 struct regcache *regcache;
7128 int decr_pc;
7129
7130 /* Re-adjust PC to what the program would see if GDB was not
7131 debugging it. */
7132 regcache = get_thread_regcache (ecs->event_thread);
7133 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
7134 if (decr_pc != 0)
7135 {
7136 std::optional<scoped_restore_tmpl<int>>
7137 restore_operation_disable;
7138
7139 if (record_full_is_used ())
7140 restore_operation_disable.emplace
7141 (record_full_gdb_operation_disable_set ());
7142
7143 regcache_write_pc (regcache,
7144 ecs->event_thread->stop_pc () + decr_pc);
7145 }
7146 }
7147 else
7148 {
7149 /* A delayed software breakpoint event. Ignore the trap. */
7150 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
7151 random_signal = 0;
7152 }
7153 }
7154
7155 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
7156 has since been removed. */
7157 if (random_signal && target_stopped_by_hw_breakpoint ())
7158 {
7159 /* A delayed hardware breakpoint event. Ignore the trap. */
7160 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
7161 "trap, ignoring");
7162 random_signal = 0;
7163 }
7164
7165 /* If not, perhaps stepping/nexting can. */
7166 if (random_signal)
7167 random_signal = !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7168 && currently_stepping (ecs->event_thread));
7169
7170 /* Perhaps the thread hit a single-step breakpoint of _another_
7171 thread. Single-step breakpoints are transparent to the
7172 breakpoints module. */
7173 if (random_signal)
7174 random_signal = !ecs->hit_singlestep_breakpoint;
7175
7176 /* No? Perhaps we got a moribund watchpoint. */
7177 if (random_signal)
7178 random_signal = !stopped_by_watchpoint;
7179
7180 /* Always stop if the user explicitly requested this thread to
7181 remain stopped. */
7182 if (ecs->event_thread->stop_requested)
7183 {
7184 random_signal = 1;
7185 infrun_debug_printf ("user-requested stop");
7186 }
7187
7188 /* For the program's own signals, act according to
7189 the signal handling tables. */
7190
7191 if (random_signal)
7192 {
7193 /* Signal not for debugging purposes. */
7194 enum gdb_signal stop_signal = ecs->event_thread->stop_signal ();
7195
7196 infrun_debug_printf ("random signal (%s)",
7197 gdb_signal_to_symbol_string (stop_signal));
7198
7199 stopped_by_random_signal = 1;
7200
7201 /* Always stop on signals if we're either just gaining control
7202 of the program, or the user explicitly requested this thread
7203 to remain stopped. */
7204 if (stop_soon != NO_STOP_QUIETLY
7205 || ecs->event_thread->stop_requested
7206 || signal_stop_state (ecs->event_thread->stop_signal ()))
7207 {
7208 stop_waiting (ecs);
7209 return;
7210 }
7211
7212 /* Notify observers the signal has "handle print" set. Note we
7213 returned early above if stopping; normal_stop handles the
7214 printing in that case. */
7215 if (signal_print[ecs->event_thread->stop_signal ()])
7216 {
7217 /* The signal table tells us to print about this signal. */
7218 target_terminal::ours_for_output ();
7219 notify_signal_received (ecs->event_thread->stop_signal ());
7220 target_terminal::inferior ();
7221 }
7222
7223 /* Clear the signal if it should not be passed. */
7224 if (signal_program[ecs->event_thread->stop_signal ()] == 0)
7225 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
7226
7227 if (ecs->event_thread->prev_pc == ecs->event_thread->stop_pc ()
7228 && ecs->event_thread->control.trap_expected
7229 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
7230 {
7231 /* We were just starting a new sequence, attempting to
7232 single-step off of a breakpoint and expecting a SIGTRAP.
7233 Instead this signal arrives. This signal will take us out
7234 of the stepping range so GDB needs to remember to, when
7235 the signal handler returns, resume stepping off that
7236 breakpoint. */
7237 /* To simplify things, "continue" is forced to use the same
7238 code paths as single-step - set a breakpoint at the
7239 signal return address and then, once hit, step off that
7240 breakpoint. */
7241 infrun_debug_printf ("signal arrived while stepping over breakpoint");
7242
7243 insert_hp_step_resume_breakpoint_at_frame (frame);
7244 ecs->event_thread->step_after_step_resume_breakpoint = 1;
7245 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7246 ecs->event_thread->control.trap_expected = 0;
7247
7248 /* If we were nexting/stepping some other thread, switch to
7249 it, so that we don't continue it, losing control. */
7250 if (!switch_back_to_stepped_thread (ecs))
7251 keep_going (ecs);
7252 return;
7253 }
7254
7255 if (ecs->event_thread->stop_signal () != GDB_SIGNAL_0
7256 && (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
7257 ecs->event_thread)
7258 || ecs->event_thread->control.step_range_end == 1)
7259 && (get_stack_frame_id (frame)
7260 == ecs->event_thread->control.step_stack_frame_id)
7261 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
7262 {
7263 /* The inferior is about to take a signal that will take it
7264 out of the single step range. Set a breakpoint at the
7265 current PC (which is presumably where the signal handler
7266 will eventually return) and then allow the inferior to
7267 run free.
7268
7269 Note that this is only needed for a signal delivered
7270 while in the single-step range. Nested signals aren't a
7271 problem as they eventually all return. */
7272 infrun_debug_printf ("signal may take us out of single-step range");
7273
7274 clear_step_over_info ();
7275 insert_hp_step_resume_breakpoint_at_frame (frame);
7276 ecs->event_thread->step_after_step_resume_breakpoint = 1;
7277 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7278 ecs->event_thread->control.trap_expected = 0;
7279 keep_going (ecs);
7280 return;
7281 }
7282
7283 /* Note: step_resume_breakpoint may be non-NULL. This occurs
7284 when either there's a nested signal, or when there's a
7285 pending signal enabled just as the signal handler returns
7286 (leaving the inferior at the step-resume-breakpoint without
7287 actually executing it). Either way continue until the
7288 breakpoint is really hit. */
7289
7290 if (!switch_back_to_stepped_thread (ecs))
7291 {
7292 infrun_debug_printf ("random signal, keep going");
7293
7294 keep_going (ecs);
7295 }
7296 return;
7297 }
7298
7299 process_event_stop_test (ecs);
7300 }
7301
7302 /* Return the address for the beginning of the line. */
7303
7304 CORE_ADDR
7305 update_line_range_start (CORE_ADDR pc, struct execution_control_state *ecs)
7306 {
7307 /* The line table may have multiple entries for the same source code line.
7308 Given the PC, check the line table and return the PC that corresponds
7309 to the line table entry for the source line that PC is in. */
7310 CORE_ADDR start_line_pc = ecs->event_thread->control.step_range_start;
7311 std::optional<CORE_ADDR> real_range_start;
7312
7313 /* Call find_line_range_start to get the smallest address in the
7314 linetable for multiple Line X entries in the line table. */
7315 real_range_start = find_line_range_start (pc);
7316
7317 if (real_range_start.has_value ())
7318 start_line_pc = *real_range_start;
7319
7320 return start_line_pc;
7321 }
7322
7323 namespace {
7324
7325 /* Helper class for process_event_stop_test implementing lazy evaluation. */
7326 template<typename T>
7327 class lazy_loader
7328 {
7329 using fetcher_t = std::function<T ()>;
7330
7331 public:
7332 explicit lazy_loader (fetcher_t &&f) : m_loader (std::move (f))
7333 { }
7334
7335 T &operator* ()
7336 {
7337 if (!m_value.has_value ())
7338 m_value.emplace (m_loader ());
7339 return m_value.value ();
7340 }
7341
7342 T *operator-> ()
7343 {
7344 return &**this;
7345 }
7346
7347 private:
7348 std::optional<T> m_value;
7349 fetcher_t m_loader;
7350 };
7351
7352 }
7353
7354 /* Come here when we've got some debug event / signal we can explain
7355 (IOW, not a random signal), and test whether it should cause a
7356 stop, or whether we should resume the inferior (transparently).
7357 E.g., could be a breakpoint whose condition evaluates false; we
7358 could be still stepping within the line; etc. */
7359
7360 static void
7361 process_event_stop_test (struct execution_control_state *ecs)
7362 {
7363 struct symtab_and_line stop_pc_sal;
7364 frame_info_ptr frame;
7365 struct gdbarch *gdbarch;
7366 CORE_ADDR jmp_buf_pc;
7367 struct bpstat_what what;
7368
7369 /* Handle cases caused by hitting a breakpoint. */
7370
7371 frame = get_current_frame ();
7372 gdbarch = get_frame_arch (frame);
7373
7374 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
7375
7376 if (what.call_dummy)
7377 {
7378 stop_stack_dummy = what.call_dummy;
7379 }
7380
7381 /* A few breakpoint types have callbacks associated (e.g.,
7382 bp_jit_event). Run them now. */
7383 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
7384
7385 /* If we hit an internal event that triggers symbol changes, the
7386 current frame will be invalidated within bpstat_what (e.g., if we
7387 hit an internal solib event). Re-fetch it. */
7388 frame = get_current_frame ();
7389 gdbarch = get_frame_arch (frame);
7390
7391 /* Shorthand to make if statements smaller. */
7392 struct frame_id original_frame_id
7393 = ecs->event_thread->control.step_frame_id;
7394 lazy_loader<frame_id> curr_frame_id
7395 ([] () { return get_frame_id (get_current_frame ()); });
7396
7397 switch (what.main_action)
7398 {
7399 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
7400 /* If we hit the breakpoint at longjmp while stepping, we
7401 install a momentary breakpoint at the target of the
7402 jmp_buf. */
7403
7404 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
7405
7406 ecs->event_thread->stepping_over_breakpoint = 1;
7407
7408 if (what.is_longjmp)
7409 {
7410 struct value *arg_value;
7411
7412 /* If we set the longjmp breakpoint via a SystemTap probe,
7413 then use it to extract the arguments. The destination PC
7414 is the third argument to the probe. */
7415 arg_value = probe_safe_evaluate_at_pc (frame, 2);
7416 if (arg_value)
7417 {
7418 jmp_buf_pc = value_as_address (arg_value);
7419 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
7420 }
7421 else if (!gdbarch_get_longjmp_target_p (gdbarch)
7422 || !gdbarch_get_longjmp_target (gdbarch,
7423 frame, &jmp_buf_pc))
7424 {
7425 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
7426 "(!gdbarch_get_longjmp_target)");
7427 keep_going (ecs);
7428 return;
7429 }
7430
7431 /* Insert a breakpoint at resume address. */
7432 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
7433 }
7434 else
7435 check_exception_resume (ecs, frame);
7436 keep_going (ecs);
7437 return;
7438
7439 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
7440 {
7441 frame_info_ptr init_frame;
7442
7443 /* There are several cases to consider.
7444
7445 1. The initiating frame no longer exists. In this case we
7446 must stop, because the exception or longjmp has gone too
7447 far.
7448
7449 2. The initiating frame exists, and is the same as the
7450 current frame. We stop, because the exception or longjmp
7451 has been caught.
7452
7453 3. The initiating frame exists and is different from the
7454 current frame. This means the exception or longjmp has
7455 been caught beneath the initiating frame, so keep going.
7456
7457 4. longjmp breakpoint has been placed just to protect
7458 against stale dummy frames and user is not interested in
7459 stopping around longjmps. */
7460
7461 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
7462
7463 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
7464 != nullptr);
7465 delete_exception_resume_breakpoint (ecs->event_thread);
7466
7467 if (what.is_longjmp)
7468 {
7469 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
7470
7471 if (!frame_id_p (ecs->event_thread->initiating_frame))
7472 {
7473 /* Case 4. */
7474 keep_going (ecs);
7475 return;
7476 }
7477 }
7478
7479 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
7480
7481 if (init_frame)
7482 {
7483 if (*curr_frame_id == ecs->event_thread->initiating_frame)
7484 {
7485 /* Case 2. Fall through. */
7486 }
7487 else
7488 {
7489 /* Case 3. */
7490 keep_going (ecs);
7491 return;
7492 }
7493 }
7494
7495 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
7496 exists. */
7497 delete_step_resume_breakpoint (ecs->event_thread);
7498
7499 end_stepping_range (ecs);
7500 }
7501 return;
7502
7503 case BPSTAT_WHAT_SINGLE:
7504 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
7505 ecs->event_thread->stepping_over_breakpoint = 1;
7506 /* Still need to check other stuff, at least the case where we
7507 are stepping and step out of the right range. */
7508 break;
7509
7510 case BPSTAT_WHAT_STEP_RESUME:
7511 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
7512
7513 delete_step_resume_breakpoint (ecs->event_thread);
7514 if (ecs->event_thread->control.proceed_to_finish
7515 && execution_direction == EXEC_REVERSE)
7516 {
7517 struct thread_info *tp = ecs->event_thread;
7518
7519 /* We are finishing a function in reverse, and just hit the
7520 step-resume breakpoint at the start address of the
7521 function, and we're almost there -- just need to back up
7522 by one more single-step, which should take us back to the
7523 function call. */
7524 tp->control.step_range_start = tp->control.step_range_end = 1;
7525 keep_going (ecs);
7526 return;
7527 }
7528 fill_in_stop_func (gdbarch, ecs);
7529 if (ecs->event_thread->stop_pc () == ecs->stop_func_start
7530 && execution_direction == EXEC_REVERSE)
7531 {
7532 /* We are stepping over a function call in reverse, and just
7533 hit the step-resume breakpoint at the start address of
7534 the function. Go back to single-stepping, which should
7535 take us back to the function call. */
7536 ecs->event_thread->stepping_over_breakpoint = 1;
7537 keep_going (ecs);
7538 return;
7539 }
7540 break;
7541
7542 case BPSTAT_WHAT_STOP_NOISY:
7543 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
7544 stop_print_frame = true;
7545
7546 /* Assume the thread stopped for a breakpoint. We'll still check
7547 whether a/the breakpoint is there when the thread is next
7548 resumed. */
7549 ecs->event_thread->stepping_over_breakpoint = 1;
7550
7551 stop_waiting (ecs);
7552 return;
7553
7554 case BPSTAT_WHAT_STOP_SILENT:
7555 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
7556 stop_print_frame = false;
7557
7558 /* Assume the thread stopped for a breakpoint. We'll still check
7559 whether a/the breakpoint is there when the thread is next
7560 resumed. */
7561 ecs->event_thread->stepping_over_breakpoint = 1;
7562 stop_waiting (ecs);
7563 return;
7564
7565 case BPSTAT_WHAT_HP_STEP_RESUME:
7566 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
7567
7568 delete_step_resume_breakpoint (ecs->event_thread);
7569 if (ecs->event_thread->step_after_step_resume_breakpoint)
7570 {
7571 /* Back when the step-resume breakpoint was inserted, we
7572 were trying to single-step off a breakpoint. Go back to
7573 doing that. */
7574 ecs->event_thread->step_after_step_resume_breakpoint = 0;
7575 ecs->event_thread->stepping_over_breakpoint = 1;
7576 keep_going (ecs);
7577 return;
7578 }
7579 break;
7580
7581 case BPSTAT_WHAT_KEEP_CHECKING:
7582 break;
7583 }
7584
7585 /* If we stepped a permanent breakpoint and we had a high priority
7586 step-resume breakpoint for the address we stepped, but we didn't
7587 hit it, then we must have stepped into the signal handler. The
7588 step-resume was only necessary to catch the case of _not_
7589 stepping into the handler, so delete it, and fall through to
7590 checking whether the step finished. */
7591 if (ecs->event_thread->stepped_breakpoint)
7592 {
7593 struct breakpoint *sr_bp
7594 = ecs->event_thread->control.step_resume_breakpoint;
7595
7596 if (sr_bp != nullptr
7597 && sr_bp->first_loc ().permanent
7598 && sr_bp->type == bp_hp_step_resume
7599 && sr_bp->first_loc ().address == ecs->event_thread->prev_pc)
7600 {
7601 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
7602 delete_step_resume_breakpoint (ecs->event_thread);
7603 ecs->event_thread->step_after_step_resume_breakpoint = 0;
7604 }
7605 }
7606
7607 /* We come here if we hit a breakpoint but should not stop for it.
7608 Possibly we also were stepping and should stop for that. So fall
7609 through and test for stepping. But, if not stepping, do not
7610 stop. */
7611
7612 /* In all-stop mode, if we're currently stepping but have stopped in
7613 some other thread, we need to switch back to the stepped thread. */
7614 if (switch_back_to_stepped_thread (ecs))
7615 return;
7616
7617 if (ecs->event_thread->control.step_resume_breakpoint)
7618 {
7619 infrun_debug_printf ("step-resume breakpoint is inserted");
7620
7621 /* Having a step-resume breakpoint overrides anything
7622 else having to do with stepping commands until
7623 that breakpoint is reached. */
7624 keep_going (ecs);
7625 return;
7626 }
7627
7628 if (ecs->event_thread->control.step_range_end == 0)
7629 {
7630 infrun_debug_printf ("no stepping, continue");
7631 /* Likewise if we aren't even stepping. */
7632 keep_going (ecs);
7633 return;
7634 }
7635
7636 /* Re-fetch current thread's frame in case the code above caused
7637 the frame cache to be re-initialized, making our FRAME variable
7638 a dangling pointer. */
7639 frame = get_current_frame ();
7640 gdbarch = get_frame_arch (frame);
7641 fill_in_stop_func (gdbarch, ecs);
7642
7643 /* If stepping through a line, keep going if still within it.
7644
7645 Note that step_range_end is the address of the first instruction
7646 beyond the step range, and NOT the address of the last instruction
7647 within it!
7648
7649 Note also that during reverse execution, we may be stepping
7650 through a function epilogue and therefore must detect when
7651 the current-frame changes in the middle of a line. */
7652
7653 if (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
7654 ecs->event_thread)
7655 && (execution_direction != EXEC_REVERSE
7656 || *curr_frame_id == original_frame_id))
7657 {
7658 infrun_debug_printf
7659 ("stepping inside range [%s-%s]",
7660 paddress (gdbarch, ecs->event_thread->control.step_range_start),
7661 paddress (gdbarch, ecs->event_thread->control.step_range_end));
7662
7663 /* Tentatively re-enable range stepping; `resume' disables it if
7664 necessary (e.g., if we're stepping over a breakpoint or we
7665 have software watchpoints). */
7666 ecs->event_thread->control.may_range_step = 1;
7667
7668 /* When stepping backward, stop at beginning of line range
7669 (unless it's the function entry point, in which case
7670 keep going back to the call point). */
7671 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7672 if (stop_pc == ecs->event_thread->control.step_range_start
7673 && stop_pc != ecs->stop_func_start
7674 && execution_direction == EXEC_REVERSE)
7675 end_stepping_range (ecs);
7676 else
7677 keep_going (ecs);
7678
7679 return;
7680 }
7681
7682 /* We stepped out of the stepping range. */
7683
7684 /* If we are stepping at the source level and entered the runtime
7685 loader dynamic symbol resolution code...
7686
7687 EXEC_FORWARD: we keep on single stepping until we exit the run
7688 time loader code and reach the callee's address.
7689
7690 EXEC_REVERSE: we've already executed the callee (backward), and
7691 the runtime loader code is handled just like any other
7692 undebuggable function call. Now we need only keep stepping
7693 backward through the trampoline code, and that's handled further
7694 down, so there is nothing for us to do here. */
7695
7696 if (execution_direction != EXEC_REVERSE
7697 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7698 && in_solib_dynsym_resolve_code (ecs->event_thread->stop_pc ())
7699 && (ecs->event_thread->control.step_start_function == nullptr
7700 || !in_solib_dynsym_resolve_code (
7701 ecs->event_thread->control.step_start_function->value_block ()
7702 ->entry_pc ())))
7703 {
7704 CORE_ADDR pc_after_resolver =
7705 gdbarch_skip_solib_resolver (gdbarch, ecs->event_thread->stop_pc ());
7706
7707 infrun_debug_printf ("stepped into dynsym resolve code");
7708
7709 if (pc_after_resolver)
7710 {
7711 /* Set up a step-resume breakpoint at the address
7712 indicated by SKIP_SOLIB_RESOLVER. */
7713 symtab_and_line sr_sal;
7714 sr_sal.pc = pc_after_resolver;
7715 sr_sal.pspace = get_frame_program_space (frame);
7716
7717 insert_step_resume_breakpoint_at_sal (gdbarch,
7718 sr_sal, null_frame_id);
7719 }
7720
7721 keep_going (ecs);
7722 return;
7723 }
7724
7725 /* Step through an indirect branch thunk. */
7726 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
7727 && gdbarch_in_indirect_branch_thunk (gdbarch,
7728 ecs->event_thread->stop_pc ()))
7729 {
7730 infrun_debug_printf ("stepped into indirect branch thunk");
7731 keep_going (ecs);
7732 return;
7733 }
7734
7735 if (ecs->event_thread->control.step_range_end != 1
7736 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7737 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
7738 && get_frame_type (frame) == SIGTRAMP_FRAME)
7739 {
7740 infrun_debug_printf ("stepped into signal trampoline");
7741 /* The inferior, while doing a "step" or "next", has ended up in
7742 a signal trampoline (either by a signal being delivered or by
7743 the signal handler returning). Just single-step until the
7744 inferior leaves the trampoline (either by calling the handler
7745 or returning). */
7746 keep_going (ecs);
7747 return;
7748 }
7749
7750 /* If we're in the return path from a shared library trampoline,
7751 we want to proceed through the trampoline when stepping. */
7752 /* macro/2012-04-25: This needs to come before the subroutine
7753 call check below as on some targets return trampolines look
7754 like subroutine calls (MIPS16 return thunks). */
7755 if (gdbarch_in_solib_return_trampoline (gdbarch,
7756 ecs->event_thread->stop_pc (),
7757 ecs->stop_func_name)
7758 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
7759 {
7760 /* Determine where this trampoline returns. */
7761 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7762 CORE_ADDR real_stop_pc
7763 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7764
7765 infrun_debug_printf ("stepped into solib return tramp");
7766
7767 /* Only proceed through if we know where it's going. */
7768 if (real_stop_pc)
7769 {
7770 /* And put the step-breakpoint there and go until there. */
7771 symtab_and_line sr_sal;
7772 sr_sal.pc = real_stop_pc;
7773 sr_sal.section = find_pc_overlay (sr_sal.pc);
7774 sr_sal.pspace = get_frame_program_space (frame);
7775
7776 /* Do not specify what the fp should be when we stop since
7777 on some machines the prologue is where the new fp value
7778 is established. */
7779 insert_step_resume_breakpoint_at_sal (gdbarch,
7780 sr_sal, null_frame_id);
7781
7782 /* Restart without fiddling with the step ranges or
7783 other state. */
7784 keep_going (ecs);
7785 return;
7786 }
7787 }
7788
7789 /* Check for subroutine calls. The check for the current frame
7790 equalling the step ID is not necessary - the check of the
7791 previous frame's ID is sufficient - but it is a common case and
7792 cheaper than checking the previous frame's ID.
7793
7794 NOTE: frame_id::operator== will never report two invalid frame IDs as
7795 being equal, so to get into this block, both the current and
7796 previous frame must have valid frame IDs. */
7797 /* The outer_frame_id check is a heuristic to detect stepping
7798 through startup code. If we step over an instruction which
7799 sets the stack pointer from an invalid value to a valid value,
7800 we may detect that as a subroutine call from the mythical
7801 "outermost" function. This could be fixed by marking
7802 outermost frames as !stack_p,code_p,special_p. Then the
7803 initial outermost frame, before sp was valid, would
7804 have code_addr == &_start. See the comment in frame_id::operator==
7805 for more. */
7806
7807 /* We want "nexti" to step into, not over, signal handlers invoked
7808 by the kernel, therefore this subroutine check should not trigger
7809 for a signal handler invocation. On most platforms, this is already
7810 not the case, as the kernel puts a signal trampoline frame onto the
7811 stack to handle proper return after the handler, and therefore at this
7812 point, the current frame is a grandchild of the step frame, not a
7813 child. However, on some platforms, the kernel actually uses a
7814 trampoline to handle *invocation* of the handler. In that case,
7815 when executing the first instruction of the trampoline, this check
7816 would erroneously detect the trampoline invocation as a subroutine
7817 call. Fix this by checking for SIGTRAMP_FRAME. */
7818 if ((get_stack_frame_id (frame)
7819 != ecs->event_thread->control.step_stack_frame_id)
7820 && get_frame_type (frame) != SIGTRAMP_FRAME
7821 && ((frame_unwind_caller_id (get_current_frame ())
7822 == ecs->event_thread->control.step_stack_frame_id)
7823 && ((ecs->event_thread->control.step_stack_frame_id
7824 != outer_frame_id)
7825 || (ecs->event_thread->control.step_start_function
7826 != find_pc_function (ecs->event_thread->stop_pc ())))))
7827 {
7828 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7829 CORE_ADDR real_stop_pc;
7830
7831 infrun_debug_printf ("stepped into subroutine");
7832
7833 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
7834 {
7835 /* I presume that step_over_calls is only 0 when we're
7836 supposed to be stepping at the assembly language level
7837 ("stepi"). Just stop. */
7838 /* And this works the same backward as frontward. MVS */
7839 end_stepping_range (ecs);
7840 return;
7841 }
7842
7843 /* Reverse stepping through solib trampolines. */
7844
7845 if (execution_direction == EXEC_REVERSE
7846 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
7847 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7848 || (ecs->stop_func_start == 0
7849 && in_solib_dynsym_resolve_code (stop_pc))))
7850 {
7851 /* Any solib trampoline code can be handled in reverse
7852 by simply continuing to single-step. We have already
7853 executed the solib function (backwards), and a few
7854 steps will take us back through the trampoline to the
7855 caller. */
7856 keep_going (ecs);
7857 return;
7858 }
7859
7860 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
7861 {
7862 /* We're doing a "next".
7863
7864 Normal (forward) execution: set a breakpoint at the
7865 callee's return address (the address at which the caller
7866 will resume).
7867
7868 Reverse (backward) execution. set the step-resume
7869 breakpoint at the start of the function that we just
7870 stepped into (backwards), and continue to there. When we
7871 get there, we'll need to single-step back to the caller. */
7872
7873 if (execution_direction == EXEC_REVERSE)
7874 {
7875 /* If we're already at the start of the function, we've either
7876 just stepped backward into a single instruction function,
7877 or stepped back out of a signal handler to the first instruction
7878 of the function. Just keep going, which will single-step back
7879 to the caller. */
7880 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
7881 {
7882 /* Normal function call return (static or dynamic). */
7883 symtab_and_line sr_sal;
7884 sr_sal.pc = ecs->stop_func_start;
7885 sr_sal.pspace = get_frame_program_space (frame);
7886 insert_step_resume_breakpoint_at_sal (gdbarch,
7887 sr_sal, get_stack_frame_id (frame));
7888 }
7889 }
7890 else
7891 insert_step_resume_breakpoint_at_caller (frame);
7892
7893 keep_going (ecs);
7894 return;
7895 }
7896
7897 /* If we are in a function call trampoline (a stub between the
7898 calling routine and the real function), locate the real
7899 function. That's what tells us (a) whether we want to step
7900 into it at all, and (b) what prologue we want to run to the
7901 end of, if we do step into it. */
7902 real_stop_pc = skip_language_trampoline (frame, stop_pc);
7903 if (real_stop_pc == 0)
7904 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7905 if (real_stop_pc != 0)
7906 ecs->stop_func_start = real_stop_pc;
7907
7908 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
7909 {
7910 symtab_and_line sr_sal;
7911 sr_sal.pc = ecs->stop_func_start;
7912 sr_sal.pspace = get_frame_program_space (frame);
7913
7914 insert_step_resume_breakpoint_at_sal (gdbarch,
7915 sr_sal, null_frame_id);
7916 keep_going (ecs);
7917 return;
7918 }
7919
7920 /* If we have line number information for the function we are
7921 thinking of stepping into and the function isn't on the skip
7922 list, step into it.
7923
7924 If there are several symtabs at that PC (e.g. with include
7925 files), just want to know whether *any* of them have line
7926 numbers. find_pc_line handles this. */
7927 {
7928 struct symtab_and_line tmp_sal;
7929
7930 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
7931 if (tmp_sal.line != 0
7932 && !function_name_is_marked_for_skip (ecs->stop_func_name,
7933 tmp_sal)
7934 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
7935 {
7936 if (execution_direction == EXEC_REVERSE)
7937 handle_step_into_function_backward (gdbarch, ecs);
7938 else
7939 handle_step_into_function (gdbarch, ecs);
7940 return;
7941 }
7942 }
7943
7944 /* If we have no line number and the step-stop-if-no-debug is
7945 set, we stop the step so that the user has a chance to switch
7946 in assembly mode. */
7947 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7948 && step_stop_if_no_debug)
7949 {
7950 end_stepping_range (ecs);
7951 return;
7952 }
7953
7954 if (execution_direction == EXEC_REVERSE)
7955 {
7956 /* If we're already at the start of the function, we've either just
7957 stepped backward into a single instruction function without line
7958 number info, or stepped back out of a signal handler to the first
7959 instruction of the function without line number info. Just keep
7960 going, which will single-step back to the caller. */
7961 if (ecs->stop_func_start != stop_pc)
7962 {
7963 /* Set a breakpoint at callee's start address.
7964 From there we can step once and be back in the caller. */
7965 symtab_and_line sr_sal;
7966 sr_sal.pc = ecs->stop_func_start;
7967 sr_sal.pspace = get_frame_program_space (frame);
7968 insert_step_resume_breakpoint_at_sal (gdbarch,
7969 sr_sal, null_frame_id);
7970 }
7971 }
7972 else
7973 /* Set a breakpoint at callee's return address (the address
7974 at which the caller will resume). */
7975 insert_step_resume_breakpoint_at_caller (frame);
7976
7977 keep_going (ecs);
7978 return;
7979 }
7980
7981 /* Reverse stepping through solib trampolines. */
7982
7983 if (execution_direction == EXEC_REVERSE
7984 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
7985 {
7986 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7987
7988 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7989 || (ecs->stop_func_start == 0
7990 && in_solib_dynsym_resolve_code (stop_pc)))
7991 {
7992 /* Any solib trampoline code can be handled in reverse
7993 by simply continuing to single-step. We have already
7994 executed the solib function (backwards), and a few
7995 steps will take us back through the trampoline to the
7996 caller. */
7997 keep_going (ecs);
7998 return;
7999 }
8000 else if (in_solib_dynsym_resolve_code (stop_pc))
8001 {
8002 /* Stepped backward into the solib dynsym resolver.
8003 Set a breakpoint at its start and continue, then
8004 one more step will take us out. */
8005 symtab_and_line sr_sal;
8006 sr_sal.pc = ecs->stop_func_start;
8007 sr_sal.pspace = get_frame_program_space (frame);
8008 insert_step_resume_breakpoint_at_sal (gdbarch,
8009 sr_sal, null_frame_id);
8010 keep_going (ecs);
8011 return;
8012 }
8013 }
8014
8015 /* This always returns the sal for the inner-most frame when we are in a
8016 stack of inlined frames, even if GDB actually believes that it is in a
8017 more outer frame. This is checked for below by calls to
8018 inline_skipped_frames. */
8019 stop_pc_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
8020
8021 /* NOTE: tausq/2004-05-24: This if block used to be done before all
8022 the trampoline processing logic, however, there are some trampolines
8023 that have no names, so we should do trampoline handling first. */
8024 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
8025 && ecs->stop_func_name == nullptr
8026 && stop_pc_sal.line == 0)
8027 {
8028 infrun_debug_printf ("stepped into undebuggable function");
8029
8030 /* The inferior just stepped into, or returned to, an
8031 undebuggable function (where there is no debugging information
8032 and no line number corresponding to the address where the
8033 inferior stopped). Since we want to skip this kind of code,
8034 we keep going until the inferior returns from this
8035 function - unless the user has asked us not to (via
8036 set step-mode) or we no longer know how to get back
8037 to the call site. */
8038 if (step_stop_if_no_debug
8039 || !frame_id_p (frame_unwind_caller_id (frame)))
8040 {
8041 /* If we have no line number and the step-stop-if-no-debug
8042 is set, we stop the step so that the user has a chance to
8043 switch in assembly mode. */
8044 end_stepping_range (ecs);
8045 return;
8046 }
8047 else
8048 {
8049 /* Set a breakpoint at callee's return address (the address
8050 at which the caller will resume). */
8051 insert_step_resume_breakpoint_at_caller (frame);
8052 keep_going (ecs);
8053 return;
8054 }
8055 }
8056
8057 if (execution_direction == EXEC_REVERSE
8058 && ecs->event_thread->control.proceed_to_finish
8059 && ecs->event_thread->stop_pc () >= ecs->stop_func_alt_start
8060 && ecs->event_thread->stop_pc () < ecs->stop_func_start)
8061 {
8062 /* We are executing the reverse-finish command.
8063 If the system supports multiple entry points and we are finishing a
8064 function in reverse. If we are between the entry points single-step
8065 back to the alternate entry point. If we are at the alternate entry
8066 point -- just need to back up by one more single-step, which
8067 should take us back to the function call. */
8068 ecs->event_thread->control.step_range_start
8069 = ecs->event_thread->control.step_range_end = 1;
8070 keep_going (ecs);
8071 return;
8072
8073 }
8074
8075 if (ecs->event_thread->control.step_range_end == 1)
8076 {
8077 /* It is stepi or nexti. We always want to stop stepping after
8078 one instruction. */
8079 infrun_debug_printf ("stepi/nexti");
8080 end_stepping_range (ecs);
8081 return;
8082 }
8083
8084 if (stop_pc_sal.line == 0)
8085 {
8086 /* We have no line number information. That means to stop
8087 stepping (does this always happen right after one instruction,
8088 when we do "s" in a function with no line numbers,
8089 or can this happen as a result of a return or longjmp?). */
8090 infrun_debug_printf ("line number info");
8091 end_stepping_range (ecs);
8092 return;
8093 }
8094
8095 /* Look for "calls" to inlined functions, part one. If the inline
8096 frame machinery detected some skipped call sites, we have entered
8097 a new inline function. */
8098
8099 if ((*curr_frame_id == original_frame_id)
8100 && inline_skipped_frames (ecs->event_thread))
8101 {
8102 infrun_debug_printf ("stepped into inlined function");
8103
8104 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
8105
8106 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
8107 {
8108 /* For "step", we're going to stop. But if the call site
8109 for this inlined function is on the same source line as
8110 we were previously stepping, go down into the function
8111 first. Otherwise stop at the call site. */
8112
8113 if (call_sal.line == ecs->event_thread->current_line
8114 && call_sal.symtab == ecs->event_thread->current_symtab)
8115 {
8116 step_into_inline_frame (ecs->event_thread);
8117 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
8118 {
8119 keep_going (ecs);
8120 return;
8121 }
8122 }
8123
8124 end_stepping_range (ecs);
8125 return;
8126 }
8127 else
8128 {
8129 /* For "next", we should stop at the call site if it is on a
8130 different source line. Otherwise continue through the
8131 inlined function. */
8132 if (call_sal.line == ecs->event_thread->current_line
8133 && call_sal.symtab == ecs->event_thread->current_symtab)
8134 keep_going (ecs);
8135 else
8136 end_stepping_range (ecs);
8137 return;
8138 }
8139 }
8140
8141 /* Look for "calls" to inlined functions, part two. If we are still
8142 in the same real function we were stepping through, but we have
8143 to go further up to find the exact frame ID, we are stepping
8144 through a more inlined call beyond its call site. */
8145
8146 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
8147 && (*curr_frame_id != original_frame_id)
8148 && stepped_in_from (get_current_frame (), original_frame_id))
8149 {
8150 infrun_debug_printf ("stepping through inlined function");
8151
8152 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
8153 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
8154 keep_going (ecs);
8155 else
8156 end_stepping_range (ecs);
8157 return;
8158 }
8159
8160 bool refresh_step_info = true;
8161 if ((ecs->event_thread->stop_pc () == stop_pc_sal.pc)
8162 && (ecs->event_thread->current_line != stop_pc_sal.line
8163 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
8164 {
8165 /* We are at a different line. */
8166
8167 if (stop_pc_sal.is_stmt)
8168 {
8169 if (execution_direction == EXEC_REVERSE)
8170 {
8171 /* We are stepping backwards make sure we have reached the
8172 beginning of the line. */
8173 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
8174 CORE_ADDR start_line_pc
8175 = update_line_range_start (stop_pc, ecs);
8176
8177 if (stop_pc != start_line_pc)
8178 {
8179 /* Have not reached the beginning of the source code line.
8180 Set a step range. Execution should stop in any function
8181 calls we execute back into before reaching the beginning
8182 of the line. */
8183 ecs->event_thread->control.step_range_start
8184 = start_line_pc;
8185 ecs->event_thread->control.step_range_end = stop_pc;
8186 set_step_info (ecs->event_thread, frame, stop_pc_sal);
8187 keep_going (ecs);
8188 return;
8189 }
8190 }
8191
8192 /* We are at the start of a statement.
8193
8194 So stop. Note that we don't stop if we step into the middle of a
8195 statement. That is said to make things like for (;;) statements
8196 work better. */
8197 infrun_debug_printf ("stepped to a different line");
8198 end_stepping_range (ecs);
8199 return;
8200 }
8201 else if (*curr_frame_id == original_frame_id)
8202 {
8203 /* We are not at the start of a statement, and we have not changed
8204 frame.
8205
8206 We ignore this line table entry, and continue stepping forward,
8207 looking for a better place to stop. */
8208 refresh_step_info = false;
8209 infrun_debug_printf ("stepped to a different line, but "
8210 "it's not the start of a statement");
8211 }
8212 else
8213 {
8214 /* We are not the start of a statement, and we have changed frame.
8215
8216 We ignore this line table entry, and continue stepping forward,
8217 looking for a better place to stop. Keep refresh_step_info at
8218 true to note that the frame has changed, but ignore the line
8219 number to make sure we don't ignore a subsequent entry with the
8220 same line number. */
8221 stop_pc_sal.line = 0;
8222 infrun_debug_printf ("stepped to a different frame, but "
8223 "it's not the start of a statement");
8224 }
8225 }
8226 else if (execution_direction == EXEC_REVERSE
8227 && *curr_frame_id != original_frame_id
8228 && original_frame_id.code_addr_p && curr_frame_id->code_addr_p
8229 && original_frame_id.code_addr == curr_frame_id->code_addr)
8230 {
8231 /* If we enter here, we're leaving a recursive function call. In this
8232 situation, we shouldn't refresh the step information, because if we
8233 do, we'll lose the frame_id of when we started stepping, and this
8234 will make GDB not know we need to print frame information. */
8235 refresh_step_info = false;
8236 infrun_debug_printf ("reverse stepping, left a recursive call, don't "
8237 "update step info so we remember we left a frame");
8238 }
8239
8240 /* We aren't done stepping.
8241
8242 Optimize by setting the stepping range to the line.
8243 (We might not be in the original line, but if we entered a
8244 new line in mid-statement, we continue stepping. This makes
8245 things like for(;;) statements work better.)
8246
8247 If we entered a SAL that indicates a non-statement line table entry,
8248 then we update the stepping range, but we don't update the step info,
8249 which includes things like the line number we are stepping away from.
8250 This means we will stop when we find a line table entry that is marked
8251 as is-statement, even if it matches the non-statement one we just
8252 stepped into. */
8253
8254 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
8255 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
8256 ecs->event_thread->control.may_range_step = 1;
8257 infrun_debug_printf
8258 ("updated step range, start = %s, end = %s, may_range_step = %d",
8259 paddress (gdbarch, ecs->event_thread->control.step_range_start),
8260 paddress (gdbarch, ecs->event_thread->control.step_range_end),
8261 ecs->event_thread->control.may_range_step);
8262 if (refresh_step_info)
8263 set_step_info (ecs->event_thread, frame, stop_pc_sal);
8264
8265 infrun_debug_printf ("keep going");
8266
8267 if (execution_direction == EXEC_REVERSE)
8268 {
8269 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
8270
8271 /* Make sure the stop_pc is set to the beginning of the line. */
8272 if (stop_pc != ecs->event_thread->control.step_range_start)
8273 ecs->event_thread->control.step_range_start
8274 = update_line_range_start (stop_pc, ecs);
8275 }
8276
8277 keep_going (ecs);
8278 }
8279
8280 static bool restart_stepped_thread (process_stratum_target *resume_target,
8281 ptid_t resume_ptid);
8282
8283 /* In all-stop mode, if we're currently stepping but have stopped in
8284 some other thread, we may need to switch back to the stepped
8285 thread. Returns true we set the inferior running, false if we left
8286 it stopped (and the event needs further processing). */
8287
8288 static bool
8289 switch_back_to_stepped_thread (struct execution_control_state *ecs)
8290 {
8291 if (!target_is_non_stop_p ())
8292 {
8293 /* If any thread is blocked on some internal breakpoint, and we
8294 simply need to step over that breakpoint to get it going
8295 again, do that first. */
8296
8297 /* However, if we see an event for the stepping thread, then we
8298 know all other threads have been moved past their breakpoints
8299 already. Let the caller check whether the step is finished,
8300 etc., before deciding to move it past a breakpoint. */
8301 if (ecs->event_thread->control.step_range_end != 0)
8302 return false;
8303
8304 /* Check if the current thread is blocked on an incomplete
8305 step-over, interrupted by a random signal. */
8306 if (ecs->event_thread->control.trap_expected
8307 && ecs->event_thread->stop_signal () != GDB_SIGNAL_TRAP)
8308 {
8309 infrun_debug_printf
8310 ("need to finish step-over of [%s]",
8311 ecs->event_thread->ptid.to_string ().c_str ());
8312 keep_going (ecs);
8313 return true;
8314 }
8315
8316 /* Check if the current thread is blocked by a single-step
8317 breakpoint of another thread. */
8318 if (ecs->hit_singlestep_breakpoint)
8319 {
8320 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
8321 ecs->ptid.to_string ().c_str ());
8322 keep_going (ecs);
8323 return true;
8324 }
8325
8326 /* If this thread needs yet another step-over (e.g., stepping
8327 through a delay slot), do it first before moving on to
8328 another thread. */
8329 if (thread_still_needs_step_over (ecs->event_thread))
8330 {
8331 infrun_debug_printf
8332 ("thread [%s] still needs step-over",
8333 ecs->event_thread->ptid.to_string ().c_str ());
8334 keep_going (ecs);
8335 return true;
8336 }
8337
8338 /* If scheduler locking applies even if not stepping, there's no
8339 need to walk over threads. Above we've checked whether the
8340 current thread is stepping. If some other thread not the
8341 event thread is stepping, then it must be that scheduler
8342 locking is not in effect. */
8343 if (schedlock_applies (ecs->event_thread))
8344 return false;
8345
8346 /* Otherwise, we no longer expect a trap in the current thread.
8347 Clear the trap_expected flag before switching back -- this is
8348 what keep_going does as well, if we call it. */
8349 ecs->event_thread->control.trap_expected = 0;
8350
8351 /* Likewise, clear the signal if it should not be passed. */
8352 if (!signal_program[ecs->event_thread->stop_signal ()])
8353 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
8354
8355 if (restart_stepped_thread (ecs->target, ecs->ptid))
8356 {
8357 prepare_to_wait (ecs);
8358 return true;
8359 }
8360
8361 switch_to_thread (ecs->event_thread);
8362 }
8363
8364 return false;
8365 }
8366
8367 /* Look for the thread that was stepping, and resume it.
8368 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
8369 is resuming. Return true if a thread was started, false
8370 otherwise. */
8371
8372 static bool
8373 restart_stepped_thread (process_stratum_target *resume_target,
8374 ptid_t resume_ptid)
8375 {
8376 /* Do all pending step-overs before actually proceeding with
8377 step/next/etc. */
8378 if (start_step_over ())
8379 return true;
8380
8381 for (thread_info *tp : all_threads_safe ())
8382 {
8383 if (tp->state == THREAD_EXITED)
8384 continue;
8385
8386 if (tp->has_pending_waitstatus ())
8387 continue;
8388
8389 /* Ignore threads of processes the caller is not
8390 resuming. */
8391 if (!sched_multi
8392 && (tp->inf->process_target () != resume_target
8393 || tp->inf->pid != resume_ptid.pid ()))
8394 continue;
8395
8396 if (tp->control.trap_expected)
8397 {
8398 infrun_debug_printf ("switching back to stepped thread (step-over)");
8399
8400 if (keep_going_stepped_thread (tp))
8401 return true;
8402 }
8403 }
8404
8405 for (thread_info *tp : all_threads_safe ())
8406 {
8407 if (tp->state == THREAD_EXITED)
8408 continue;
8409
8410 if (tp->has_pending_waitstatus ())
8411 continue;
8412
8413 /* Ignore threads of processes the caller is not
8414 resuming. */
8415 if (!sched_multi
8416 && (tp->inf->process_target () != resume_target
8417 || tp->inf->pid != resume_ptid.pid ()))
8418 continue;
8419
8420 /* Did we find the stepping thread? */
8421 if (tp->control.step_range_end)
8422 {
8423 infrun_debug_printf ("switching back to stepped thread (stepping)");
8424
8425 if (keep_going_stepped_thread (tp))
8426 return true;
8427 }
8428 }
8429
8430 return false;
8431 }
8432
8433 /* See infrun.h. */
8434
8435 void
8436 restart_after_all_stop_detach (process_stratum_target *proc_target)
8437 {
8438 /* Note we don't check target_is_non_stop_p() here, because the
8439 current inferior may no longer have a process_stratum target
8440 pushed, as we just detached. */
8441
8442 /* See if we have a THREAD_RUNNING thread that need to be
8443 re-resumed. If we have any thread that is already executing,
8444 then we don't need to resume the target -- it is already been
8445 resumed. With the remote target (in all-stop), it's even
8446 impossible to issue another resumption if the target is already
8447 resumed, until the target reports a stop. */
8448 for (thread_info *thr : all_threads (proc_target))
8449 {
8450 if (thr->state != THREAD_RUNNING)
8451 continue;
8452
8453 /* If we have any thread that is already executing, then we
8454 don't need to resume the target -- it is already been
8455 resumed. */
8456 if (thr->executing ())
8457 return;
8458
8459 /* If we have a pending event to process, skip resuming the
8460 target and go straight to processing it. */
8461 if (thr->resumed () && thr->has_pending_waitstatus ())
8462 return;
8463 }
8464
8465 /* Alright, we need to re-resume the target. If a thread was
8466 stepping, we need to restart it stepping. */
8467 if (restart_stepped_thread (proc_target, minus_one_ptid))
8468 return;
8469
8470 /* Otherwise, find the first THREAD_RUNNING thread and resume
8471 it. */
8472 for (thread_info *thr : all_threads (proc_target))
8473 {
8474 if (thr->state != THREAD_RUNNING)
8475 continue;
8476
8477 execution_control_state ecs (thr);
8478 switch_to_thread (thr);
8479 keep_going (&ecs);
8480 return;
8481 }
8482 }
8483
8484 /* Set a previously stepped thread back to stepping. Returns true on
8485 success, false if the resume is not possible (e.g., the thread
8486 vanished). */
8487
8488 static bool
8489 keep_going_stepped_thread (struct thread_info *tp)
8490 {
8491 frame_info_ptr frame;
8492
8493 /* If the stepping thread exited, then don't try to switch back and
8494 resume it, which could fail in several different ways depending
8495 on the target. Instead, just keep going.
8496
8497 We can find a stepping dead thread in the thread list in two
8498 cases:
8499
8500 - The target supports thread exit events, and when the target
8501 tries to delete the thread from the thread list, inferior_ptid
8502 pointed at the exiting thread. In such case, calling
8503 delete_thread does not really remove the thread from the list;
8504 instead, the thread is left listed, with 'exited' state.
8505
8506 - The target's debug interface does not support thread exit
8507 events, and so we have no idea whatsoever if the previously
8508 stepping thread is still alive. For that reason, we need to
8509 synchronously query the target now. */
8510
8511 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
8512 {
8513 infrun_debug_printf ("not resuming previously stepped thread, it has "
8514 "vanished");
8515
8516 delete_thread (tp);
8517 return false;
8518 }
8519
8520 infrun_debug_printf ("resuming previously stepped thread");
8521
8522 execution_control_state ecs (tp);
8523 switch_to_thread (tp);
8524
8525 tp->set_stop_pc (regcache_read_pc (get_thread_regcache (tp)));
8526 frame = get_current_frame ();
8527
8528 /* If the PC of the thread we were trying to single-step has
8529 changed, then that thread has trapped or been signaled, but the
8530 event has not been reported to GDB yet. Re-poll the target
8531 looking for this particular thread's event (i.e. temporarily
8532 enable schedlock) by:
8533
8534 - setting a break at the current PC
8535 - resuming that particular thread, only (by setting trap
8536 expected)
8537
8538 This prevents us continuously moving the single-step breakpoint
8539 forward, one instruction at a time, overstepping. */
8540
8541 if (tp->stop_pc () != tp->prev_pc)
8542 {
8543 ptid_t resume_ptid;
8544
8545 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
8546 paddress (current_inferior ()->arch (), tp->prev_pc),
8547 paddress (current_inferior ()->arch (),
8548 tp->stop_pc ()));
8549
8550 /* Clear the info of the previous step-over, as it's no longer
8551 valid (if the thread was trying to step over a breakpoint, it
8552 has already succeeded). It's what keep_going would do too,
8553 if we called it. Do this before trying to insert the sss
8554 breakpoint, otherwise if we were previously trying to step
8555 over this exact address in another thread, the breakpoint is
8556 skipped. */
8557 clear_step_over_info ();
8558 tp->control.trap_expected = 0;
8559
8560 insert_single_step_breakpoint (get_frame_arch (frame),
8561 get_frame_address_space (frame),
8562 tp->stop_pc ());
8563
8564 tp->set_resumed (true);
8565 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
8566 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
8567 }
8568 else
8569 {
8570 infrun_debug_printf ("expected thread still hasn't advanced");
8571
8572 keep_going_pass_signal (&ecs);
8573 }
8574
8575 return true;
8576 }
8577
8578 /* Is thread TP in the middle of (software or hardware)
8579 single-stepping? (Note the result of this function must never be
8580 passed directly as target_resume's STEP parameter.) */
8581
8582 static bool
8583 currently_stepping (struct thread_info *tp)
8584 {
8585 return ((tp->control.step_range_end
8586 && tp->control.step_resume_breakpoint == nullptr)
8587 || tp->control.trap_expected
8588 || tp->stepped_breakpoint
8589 || bpstat_should_step ());
8590 }
8591
8592 /* Inferior has stepped into a subroutine call with source code that
8593 we should not step over. Do step to the first line of code in
8594 it. */
8595
8596 static void
8597 handle_step_into_function (struct gdbarch *gdbarch,
8598 struct execution_control_state *ecs)
8599 {
8600 fill_in_stop_func (gdbarch, ecs);
8601
8602 compunit_symtab *cust
8603 = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
8604 if (cust != nullptr && cust->language () != language_asm)
8605 ecs->stop_func_start
8606 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
8607
8608 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
8609 /* Use the step_resume_break to step until the end of the prologue,
8610 even if that involves jumps (as it seems to on the vax under
8611 4.2). */
8612 /* If the prologue ends in the middle of a source line, continue to
8613 the end of that source line (if it is still within the function).
8614 Otherwise, just go to end of prologue. */
8615 if (stop_func_sal.end
8616 && stop_func_sal.pc != ecs->stop_func_start
8617 && stop_func_sal.end < ecs->stop_func_end)
8618 ecs->stop_func_start = stop_func_sal.end;
8619
8620 /* Architectures which require breakpoint adjustment might not be able
8621 to place a breakpoint at the computed address. If so, the test
8622 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
8623 ecs->stop_func_start to an address at which a breakpoint may be
8624 legitimately placed.
8625
8626 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
8627 made, GDB will enter an infinite loop when stepping through
8628 optimized code consisting of VLIW instructions which contain
8629 subinstructions corresponding to different source lines. On
8630 FR-V, it's not permitted to place a breakpoint on any but the
8631 first subinstruction of a VLIW instruction. When a breakpoint is
8632 set, GDB will adjust the breakpoint address to the beginning of
8633 the VLIW instruction. Thus, we need to make the corresponding
8634 adjustment here when computing the stop address. */
8635
8636 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
8637 {
8638 ecs->stop_func_start
8639 = gdbarch_adjust_breakpoint_address (gdbarch,
8640 ecs->stop_func_start);
8641 }
8642
8643 if (ecs->stop_func_start == ecs->event_thread->stop_pc ())
8644 {
8645 /* We are already there: stop now. */
8646 end_stepping_range (ecs);
8647 return;
8648 }
8649 else
8650 {
8651 /* Put the step-breakpoint there and go until there. */
8652 symtab_and_line sr_sal;
8653 sr_sal.pc = ecs->stop_func_start;
8654 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
8655 sr_sal.pspace = get_frame_program_space (get_current_frame ());
8656
8657 /* Do not specify what the fp should be when we stop since on
8658 some machines the prologue is where the new fp value is
8659 established. */
8660 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
8661
8662 /* And make sure stepping stops right away then. */
8663 ecs->event_thread->control.step_range_end
8664 = ecs->event_thread->control.step_range_start;
8665 }
8666 keep_going (ecs);
8667 }
8668
8669 /* Inferior has stepped backward into a subroutine call with source
8670 code that we should not step over. Do step to the beginning of the
8671 last line of code in it. */
8672
8673 static void
8674 handle_step_into_function_backward (struct gdbarch *gdbarch,
8675 struct execution_control_state *ecs)
8676 {
8677 struct compunit_symtab *cust;
8678 struct symtab_and_line stop_func_sal;
8679
8680 fill_in_stop_func (gdbarch, ecs);
8681
8682 cust = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
8683 if (cust != nullptr && cust->language () != language_asm)
8684 ecs->stop_func_start
8685 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
8686
8687 stop_func_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
8688
8689 /* OK, we're just going to keep stepping here. */
8690 if (stop_func_sal.pc == ecs->event_thread->stop_pc ())
8691 {
8692 /* We're there already. Just stop stepping now. */
8693 end_stepping_range (ecs);
8694 }
8695 else
8696 {
8697 /* Else just reset the step range and keep going.
8698 No step-resume breakpoint, they don't work for
8699 epilogues, which can have multiple entry paths. */
8700 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
8701 ecs->event_thread->control.step_range_end = stop_func_sal.end;
8702 keep_going (ecs);
8703 }
8704 return;
8705 }
8706
8707 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
8708 This is used to both functions and to skip over code. */
8709
8710 static void
8711 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
8712 struct symtab_and_line sr_sal,
8713 struct frame_id sr_id,
8714 enum bptype sr_type)
8715 {
8716 /* There should never be more than one step-resume or longjmp-resume
8717 breakpoint per thread, so we should never be setting a new
8718 step_resume_breakpoint when one is already active. */
8719 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == nullptr);
8720 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
8721
8722 infrun_debug_printf ("inserting step-resume breakpoint at %s",
8723 paddress (gdbarch, sr_sal.pc));
8724
8725 inferior_thread ()->control.step_resume_breakpoint
8726 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
8727 }
8728
8729 void
8730 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
8731 struct symtab_and_line sr_sal,
8732 struct frame_id sr_id)
8733 {
8734 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
8735 sr_sal, sr_id,
8736 bp_step_resume);
8737 }
8738
8739 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
8740 This is used to skip a potential signal handler.
8741
8742 This is called with the interrupted function's frame. The signal
8743 handler, when it returns, will resume the interrupted function at
8744 RETURN_FRAME.pc. */
8745
8746 static void
8747 insert_hp_step_resume_breakpoint_at_frame (const frame_info_ptr &return_frame)
8748 {
8749 gdb_assert (return_frame != nullptr);
8750
8751 struct gdbarch *gdbarch = get_frame_arch (return_frame);
8752
8753 symtab_and_line sr_sal;
8754 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
8755 sr_sal.section = find_pc_overlay (sr_sal.pc);
8756 sr_sal.pspace = get_frame_program_space (return_frame);
8757
8758 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
8759 get_stack_frame_id (return_frame),
8760 bp_hp_step_resume);
8761 }
8762
8763 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
8764 is used to skip a function after stepping into it (for "next" or if
8765 the called function has no debugging information).
8766
8767 The current function has almost always been reached by single
8768 stepping a call or return instruction. NEXT_FRAME belongs to the
8769 current function, and the breakpoint will be set at the caller's
8770 resume address.
8771
8772 This is a separate function rather than reusing
8773 insert_hp_step_resume_breakpoint_at_frame in order to avoid
8774 get_prev_frame, which may stop prematurely (see the implementation
8775 of frame_unwind_caller_id for an example). */
8776
8777 static void
8778 insert_step_resume_breakpoint_at_caller (const frame_info_ptr &next_frame)
8779 {
8780 /* We shouldn't have gotten here if we don't know where the call site
8781 is. */
8782 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
8783
8784 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
8785
8786 symtab_and_line sr_sal;
8787 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
8788 frame_unwind_caller_pc (next_frame));
8789 sr_sal.section = find_pc_overlay (sr_sal.pc);
8790 sr_sal.pspace = frame_unwind_program_space (next_frame);
8791
8792 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
8793 frame_unwind_caller_id (next_frame));
8794 }
8795
8796 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
8797 new breakpoint at the target of a jmp_buf. The handling of
8798 longjmp-resume uses the same mechanisms used for handling
8799 "step-resume" breakpoints. */
8800
8801 static void
8802 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
8803 {
8804 /* There should never be more than one longjmp-resume breakpoint per
8805 thread, so we should never be setting a new
8806 longjmp_resume_breakpoint when one is already active. */
8807 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == nullptr);
8808
8809 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
8810 paddress (gdbarch, pc));
8811
8812 inferior_thread ()->control.exception_resume_breakpoint =
8813 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
8814 }
8815
8816 /* Insert an exception resume breakpoint. TP is the thread throwing
8817 the exception. The block B is the block of the unwinder debug hook
8818 function. FRAME is the frame corresponding to the call to this
8819 function. SYM is the symbol of the function argument holding the
8820 target PC of the exception. */
8821
8822 static void
8823 insert_exception_resume_breakpoint (struct thread_info *tp,
8824 const struct block *b,
8825 const frame_info_ptr &frame,
8826 struct symbol *sym)
8827 {
8828 try
8829 {
8830 struct block_symbol vsym;
8831 struct value *value;
8832 CORE_ADDR handler;
8833 struct breakpoint *bp;
8834
8835 vsym = lookup_symbol_search_name (sym->search_name (),
8836 b, SEARCH_VAR_DOMAIN);
8837 value = read_var_value (vsym.symbol, vsym.block, frame);
8838 /* If the value was optimized out, revert to the old behavior. */
8839 if (! value->optimized_out ())
8840 {
8841 handler = value_as_address (value);
8842
8843 infrun_debug_printf ("exception resume at %lx",
8844 (unsigned long) handler);
8845
8846 /* set_momentary_breakpoint_at_pc creates a thread-specific
8847 breakpoint for the current inferior thread. */
8848 gdb_assert (tp == inferior_thread ());
8849 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
8850 handler,
8851 bp_exception_resume).release ();
8852
8853 tp->control.exception_resume_breakpoint = bp;
8854 }
8855 }
8856 catch (const gdb_exception_error &e)
8857 {
8858 /* We want to ignore errors here. */
8859 }
8860 }
8861
8862 /* A helper for check_exception_resume that sets an
8863 exception-breakpoint based on a SystemTap probe. */
8864
8865 static void
8866 insert_exception_resume_from_probe (struct thread_info *tp,
8867 const struct bound_probe *probe,
8868 const frame_info_ptr &frame)
8869 {
8870 struct value *arg_value;
8871 CORE_ADDR handler;
8872 struct breakpoint *bp;
8873
8874 arg_value = probe_safe_evaluate_at_pc (frame, 1);
8875 if (!arg_value)
8876 return;
8877
8878 handler = value_as_address (arg_value);
8879
8880 infrun_debug_printf ("exception resume at %s",
8881 paddress (probe->objfile->arch (), handler));
8882
8883 /* set_momentary_breakpoint_at_pc creates a thread-specific breakpoint
8884 for the current inferior thread. */
8885 gdb_assert (tp == inferior_thread ());
8886 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
8887 handler, bp_exception_resume).release ();
8888 tp->control.exception_resume_breakpoint = bp;
8889 }
8890
8891 /* This is called when an exception has been intercepted. Check to
8892 see whether the exception's destination is of interest, and if so,
8893 set an exception resume breakpoint there. */
8894
8895 static void
8896 check_exception_resume (struct execution_control_state *ecs,
8897 const frame_info_ptr &frame)
8898 {
8899 struct bound_probe probe;
8900 struct symbol *func;
8901
8902 /* First see if this exception unwinding breakpoint was set via a
8903 SystemTap probe point. If so, the probe has two arguments: the
8904 CFA and the HANDLER. We ignore the CFA, extract the handler, and
8905 set a breakpoint there. */
8906 probe = find_probe_by_pc (get_frame_pc (frame));
8907 if (probe.prob)
8908 {
8909 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
8910 return;
8911 }
8912
8913 func = get_frame_function (frame);
8914 if (!func)
8915 return;
8916
8917 try
8918 {
8919 const struct block *b;
8920 int argno = 0;
8921
8922 /* The exception breakpoint is a thread-specific breakpoint on
8923 the unwinder's debug hook, declared as:
8924
8925 void _Unwind_DebugHook (void *cfa, void *handler);
8926
8927 The CFA argument indicates the frame to which control is
8928 about to be transferred. HANDLER is the destination PC.
8929
8930 We ignore the CFA and set a temporary breakpoint at HANDLER.
8931 This is not extremely efficient but it avoids issues in gdb
8932 with computing the DWARF CFA, and it also works even in weird
8933 cases such as throwing an exception from inside a signal
8934 handler. */
8935
8936 b = func->value_block ();
8937 for (struct symbol *sym : block_iterator_range (b))
8938 {
8939 if (!sym->is_argument ())
8940 continue;
8941
8942 if (argno == 0)
8943 ++argno;
8944 else
8945 {
8946 insert_exception_resume_breakpoint (ecs->event_thread,
8947 b, frame, sym);
8948 break;
8949 }
8950 }
8951 }
8952 catch (const gdb_exception_error &e)
8953 {
8954 }
8955 }
8956
8957 static void
8958 stop_waiting (struct execution_control_state *ecs)
8959 {
8960 infrun_debug_printf ("stop_waiting");
8961
8962 /* Let callers know we don't want to wait for the inferior anymore. */
8963 ecs->wait_some_more = 0;
8964 }
8965
8966 /* Like keep_going, but passes the signal to the inferior, even if the
8967 signal is set to nopass. */
8968
8969 static void
8970 keep_going_pass_signal (struct execution_control_state *ecs)
8971 {
8972 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
8973 gdb_assert (!ecs->event_thread->resumed ());
8974
8975 /* Save the pc before execution, to compare with pc after stop. */
8976 ecs->event_thread->prev_pc
8977 = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
8978
8979 if (ecs->event_thread->control.trap_expected)
8980 {
8981 struct thread_info *tp = ecs->event_thread;
8982
8983 infrun_debug_printf ("%s has trap_expected set, "
8984 "resuming to collect trap",
8985 tp->ptid.to_string ().c_str ());
8986
8987 /* We haven't yet gotten our trap, and either: intercepted a
8988 non-signal event (e.g., a fork); or took a signal which we
8989 are supposed to pass through to the inferior. Simply
8990 continue. */
8991 resume (ecs->event_thread->stop_signal ());
8992 }
8993 else if (step_over_info_valid_p ())
8994 {
8995 /* Another thread is stepping over a breakpoint in-line. If
8996 this thread needs a step-over too, queue the request. In
8997 either case, this resume must be deferred for later. */
8998 struct thread_info *tp = ecs->event_thread;
8999
9000 if (ecs->hit_singlestep_breakpoint
9001 || thread_still_needs_step_over (tp))
9002 {
9003 infrun_debug_printf ("step-over already in progress: "
9004 "step-over for %s deferred",
9005 tp->ptid.to_string ().c_str ());
9006 global_thread_step_over_chain_enqueue (tp);
9007 }
9008 else
9009 infrun_debug_printf ("step-over in progress: resume of %s deferred",
9010 tp->ptid.to_string ().c_str ());
9011 }
9012 else
9013 {
9014 regcache *regcache = get_thread_regcache (ecs->event_thread);
9015 int remove_bp;
9016 int remove_wps;
9017 step_over_what step_what;
9018
9019 /* Either the trap was not expected, but we are continuing
9020 anyway (if we got a signal, the user asked it be passed to
9021 the child)
9022 -- or --
9023 We got our expected trap, but decided we should resume from
9024 it.
9025
9026 We're going to run this baby now!
9027
9028 Note that insert_breakpoints won't try to re-insert
9029 already inserted breakpoints. Therefore, we don't
9030 care if breakpoints were already inserted, or not. */
9031
9032 /* If we need to step over a breakpoint, and we're not using
9033 displaced stepping to do so, insert all breakpoints
9034 (watchpoints, etc.) but the one we're stepping over, step one
9035 instruction, and then re-insert the breakpoint when that step
9036 is finished. */
9037
9038 step_what = thread_still_needs_step_over (ecs->event_thread);
9039
9040 remove_bp = (ecs->hit_singlestep_breakpoint
9041 || (step_what & STEP_OVER_BREAKPOINT));
9042 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
9043
9044 /* We can't use displaced stepping if we need to step past a
9045 watchpoint. The instruction copied to the scratch pad would
9046 still trigger the watchpoint. */
9047 if (remove_bp
9048 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
9049 {
9050 set_step_over_info (ecs->event_thread->inf->aspace.get (),
9051 regcache_read_pc (regcache), remove_wps,
9052 ecs->event_thread->global_num);
9053 }
9054 else if (remove_wps)
9055 set_step_over_info (nullptr, 0, remove_wps, -1);
9056
9057 /* If we now need to do an in-line step-over, we need to stop
9058 all other threads. Note this must be done before
9059 insert_breakpoints below, because that removes the breakpoint
9060 we're about to step over, otherwise other threads could miss
9061 it. */
9062 if (step_over_info_valid_p () && target_is_non_stop_p ())
9063 stop_all_threads ("starting in-line step-over");
9064
9065 /* Stop stepping if inserting breakpoints fails. */
9066 try
9067 {
9068 insert_breakpoints ();
9069 }
9070 catch (const gdb_exception_error &e)
9071 {
9072 exception_print (gdb_stderr, e);
9073 stop_waiting (ecs);
9074 clear_step_over_info ();
9075 return;
9076 }
9077
9078 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
9079
9080 resume (ecs->event_thread->stop_signal ());
9081 }
9082
9083 prepare_to_wait (ecs);
9084 }
9085
9086 /* Called when we should continue running the inferior, because the
9087 current event doesn't cause a user visible stop. This does the
9088 resuming part; waiting for the next event is done elsewhere. */
9089
9090 static void
9091 keep_going (struct execution_control_state *ecs)
9092 {
9093 if (ecs->event_thread->control.trap_expected
9094 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
9095 ecs->event_thread->control.trap_expected = 0;
9096
9097 if (!signal_program[ecs->event_thread->stop_signal ()])
9098 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
9099 keep_going_pass_signal (ecs);
9100 }
9101
9102 /* This function normally comes after a resume, before
9103 handle_inferior_event exits. It takes care of any last bits of
9104 housekeeping, and sets the all-important wait_some_more flag. */
9105
9106 static void
9107 prepare_to_wait (struct execution_control_state *ecs)
9108 {
9109 infrun_debug_printf ("prepare_to_wait");
9110
9111 ecs->wait_some_more = 1;
9112
9113 /* If the target can't async, emulate it by marking the infrun event
9114 handler such that as soon as we get back to the event-loop, we
9115 immediately end up in fetch_inferior_event again calling
9116 target_wait. */
9117 if (!target_can_async_p ())
9118 mark_infrun_async_event_handler ();
9119 }
9120
9121 /* We are done with the step range of a step/next/si/ni command.
9122 Called once for each n of a "step n" operation. */
9123
9124 static void
9125 end_stepping_range (struct execution_control_state *ecs)
9126 {
9127 ecs->event_thread->control.stop_step = 1;
9128 stop_waiting (ecs);
9129 }
9130
9131 /* Several print_*_reason functions to print why the inferior has stopped.
9132 We always print something when the inferior exits, or receives a signal.
9133 The rest of the cases are dealt with later on in normal_stop and
9134 print_it_typical. Ideally there should be a call to one of these
9135 print_*_reason functions functions from handle_inferior_event each time
9136 stop_waiting is called.
9137
9138 Note that we don't call these directly, instead we delegate that to
9139 the interpreters, through observers. Interpreters then call these
9140 with whatever uiout is right. */
9141
9142 void
9143 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
9144 {
9145 annotate_signalled ();
9146 if (uiout->is_mi_like_p ())
9147 uiout->field_string
9148 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
9149 uiout->text ("\nProgram terminated with signal ");
9150 annotate_signal_name ();
9151 uiout->field_string ("signal-name",
9152 gdb_signal_to_name (siggnal));
9153 annotate_signal_name_end ();
9154 uiout->text (", ");
9155 annotate_signal_string ();
9156 uiout->field_string ("signal-meaning",
9157 gdb_signal_to_string (siggnal));
9158 annotate_signal_string_end ();
9159 uiout->text (".\n");
9160 uiout->text ("The program no longer exists.\n");
9161 }
9162
9163 void
9164 print_exited_reason (struct ui_out *uiout, int exitstatus)
9165 {
9166 struct inferior *inf = current_inferior ();
9167 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
9168
9169 annotate_exited (exitstatus);
9170 if (exitstatus)
9171 {
9172 if (uiout->is_mi_like_p ())
9173 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
9174 std::string exit_code_str
9175 = string_printf ("0%o", (unsigned int) exitstatus);
9176 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
9177 plongest (inf->num), pidstr.c_str (),
9178 string_field ("exit-code", exit_code_str.c_str ()));
9179 }
9180 else
9181 {
9182 if (uiout->is_mi_like_p ())
9183 uiout->field_string
9184 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
9185 uiout->message ("[Inferior %s (%s) exited normally]\n",
9186 plongest (inf->num), pidstr.c_str ());
9187 }
9188 }
9189
9190 void
9191 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
9192 {
9193 struct thread_info *thr = inferior_thread ();
9194
9195 infrun_debug_printf ("signal = %s", gdb_signal_to_string (siggnal));
9196
9197 annotate_signal ();
9198
9199 if (uiout->is_mi_like_p ())
9200 ;
9201 else if (show_thread_that_caused_stop ())
9202 {
9203 uiout->text ("\nThread ");
9204 uiout->field_string ("thread-id", print_thread_id (thr));
9205
9206 const char *name = thread_name (thr);
9207 if (name != nullptr)
9208 {
9209 uiout->text (" \"");
9210 uiout->field_string ("name", name);
9211 uiout->text ("\"");
9212 }
9213 }
9214 else
9215 uiout->text ("\nProgram");
9216
9217 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
9218 uiout->text (" stopped");
9219 else
9220 {
9221 uiout->text (" received signal ");
9222 annotate_signal_name ();
9223 if (uiout->is_mi_like_p ())
9224 uiout->field_string
9225 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
9226 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
9227 annotate_signal_name_end ();
9228 uiout->text (", ");
9229 annotate_signal_string ();
9230 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
9231
9232 regcache *regcache = get_thread_regcache (thr);
9233 struct gdbarch *gdbarch = regcache->arch ();
9234 if (gdbarch_report_signal_info_p (gdbarch))
9235 gdbarch_report_signal_info (gdbarch, uiout, siggnal);
9236
9237 annotate_signal_string_end ();
9238 }
9239 uiout->text (".\n");
9240 }
9241
9242 void
9243 print_no_history_reason (struct ui_out *uiout)
9244 {
9245 if (uiout->is_mi_like_p ())
9246 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_NO_HISTORY));
9247 else
9248 uiout->text ("\nNo more reverse-execution history.\n");
9249 }
9250
9251 /* Print current location without a level number, if we have changed
9252 functions or hit a breakpoint. Print source line if we have one.
9253 bpstat_print contains the logic deciding in detail what to print,
9254 based on the event(s) that just occurred. */
9255
9256 static void
9257 print_stop_location (const target_waitstatus &ws)
9258 {
9259 int bpstat_ret;
9260 enum print_what source_flag;
9261 int do_frame_printing = 1;
9262 struct thread_info *tp = inferior_thread ();
9263
9264 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws.kind ());
9265 switch (bpstat_ret)
9266 {
9267 case PRINT_UNKNOWN:
9268 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
9269 should) carry around the function and does (or should) use
9270 that when doing a frame comparison. */
9271 if (tp->control.stop_step
9272 && (tp->control.step_frame_id
9273 == get_frame_id (get_current_frame ()))
9274 && (tp->control.step_start_function
9275 == find_pc_function (tp->stop_pc ())))
9276 {
9277 /* Finished step, just print source line. */
9278 source_flag = SRC_LINE;
9279 }
9280 else
9281 {
9282 /* Print location and source line. */
9283 source_flag = SRC_AND_LOC;
9284 }
9285 break;
9286 case PRINT_SRC_AND_LOC:
9287 /* Print location and source line. */
9288 source_flag = SRC_AND_LOC;
9289 break;
9290 case PRINT_SRC_ONLY:
9291 source_flag = SRC_LINE;
9292 break;
9293 case PRINT_NOTHING:
9294 /* Something bogus. */
9295 source_flag = SRC_LINE;
9296 do_frame_printing = 0;
9297 break;
9298 default:
9299 internal_error (_("Unknown value."));
9300 }
9301
9302 /* The behavior of this routine with respect to the source
9303 flag is:
9304 SRC_LINE: Print only source line
9305 LOCATION: Print only location
9306 SRC_AND_LOC: Print location and source line. */
9307 if (do_frame_printing)
9308 print_stack_frame (get_selected_frame (nullptr), 0, source_flag, 1);
9309 }
9310
9311 /* See `print_stop_event` in infrun.h. */
9312
9313 static void
9314 do_print_stop_event (struct ui_out *uiout, bool displays)
9315 {
9316 struct target_waitstatus last;
9317 struct thread_info *tp;
9318
9319 get_last_target_status (nullptr, nullptr, &last);
9320
9321 {
9322 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
9323
9324 print_stop_location (last);
9325
9326 /* Display the auto-display expressions. */
9327 if (displays)
9328 do_displays ();
9329 }
9330
9331 tp = inferior_thread ();
9332 if (tp->thread_fsm () != nullptr
9333 && tp->thread_fsm ()->finished_p ())
9334 {
9335 struct return_value_info *rv;
9336
9337 rv = tp->thread_fsm ()->return_value ();
9338 if (rv != nullptr)
9339 print_return_value (uiout, rv);
9340 }
9341 }
9342
9343 /* See infrun.h. This function itself sets up buffered output for the
9344 duration of do_print_stop_event, which performs the actual event
9345 printing. */
9346
9347 void
9348 print_stop_event (struct ui_out *uiout, bool displays)
9349 {
9350 do_with_buffered_output (do_print_stop_event, uiout, displays);
9351 }
9352
9353 /* See infrun.h. */
9354
9355 void
9356 maybe_remove_breakpoints (void)
9357 {
9358 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
9359 {
9360 if (remove_breakpoints ())
9361 {
9362 target_terminal::ours_for_output ();
9363 gdb_printf (_("Cannot remove breakpoints because "
9364 "program is no longer writable.\nFurther "
9365 "execution is probably impossible.\n"));
9366 }
9367 }
9368 }
9369
9370 /* The execution context that just caused a normal stop. */
9371
9372 struct stop_context
9373 {
9374 stop_context ();
9375
9376 DISABLE_COPY_AND_ASSIGN (stop_context);
9377
9378 bool changed () const;
9379
9380 /* The stop ID. */
9381 ULONGEST stop_id;
9382
9383 /* The event PTID. */
9384
9385 ptid_t ptid;
9386
9387 /* If stopp for a thread event, this is the thread that caused the
9388 stop. */
9389 thread_info_ref thread;
9390
9391 /* The inferior that caused the stop. */
9392 int inf_num;
9393 };
9394
9395 /* Initializes a new stop context. If stopped for a thread event, this
9396 takes a strong reference to the thread. */
9397
9398 stop_context::stop_context ()
9399 {
9400 stop_id = get_stop_id ();
9401 ptid = inferior_ptid;
9402 inf_num = current_inferior ()->num;
9403
9404 if (inferior_ptid != null_ptid)
9405 {
9406 /* Take a strong reference so that the thread can't be deleted
9407 yet. */
9408 thread = thread_info_ref::new_reference (inferior_thread ());
9409 }
9410 }
9411
9412 /* Return true if the current context no longer matches the saved stop
9413 context. */
9414
9415 bool
9416 stop_context::changed () const
9417 {
9418 if (ptid != inferior_ptid)
9419 return true;
9420 if (inf_num != current_inferior ()->num)
9421 return true;
9422 if (thread != nullptr && thread->state != THREAD_STOPPED)
9423 return true;
9424 if (get_stop_id () != stop_id)
9425 return true;
9426 return false;
9427 }
9428
9429 /* See infrun.h. */
9430
9431 bool
9432 normal_stop ()
9433 {
9434 struct target_waitstatus last;
9435
9436 get_last_target_status (nullptr, nullptr, &last);
9437
9438 new_stop_id ();
9439
9440 /* If an exception is thrown from this point on, make sure to
9441 propagate GDB's knowledge of the executing state to the
9442 frontend/user running state. A QUIT is an easy exception to see
9443 here, so do this before any filtered output. */
9444
9445 ptid_t finish_ptid = null_ptid;
9446
9447 if (!non_stop)
9448 finish_ptid = minus_one_ptid;
9449 else if (last.kind () == TARGET_WAITKIND_SIGNALLED
9450 || last.kind () == TARGET_WAITKIND_EXITED)
9451 {
9452 /* On some targets, we may still have live threads in the
9453 inferior when we get a process exit event. E.g., for
9454 "checkpoint", when the current checkpoint/fork exits,
9455 linux-fork.c automatically switches to another fork from
9456 within target_mourn_inferior. */
9457 if (inferior_ptid != null_ptid)
9458 finish_ptid = ptid_t (inferior_ptid.pid ());
9459 }
9460 else if (last.kind () != TARGET_WAITKIND_NO_RESUMED
9461 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
9462 finish_ptid = inferior_ptid;
9463
9464 std::optional<scoped_finish_thread_state> maybe_finish_thread_state;
9465 if (finish_ptid != null_ptid)
9466 {
9467 maybe_finish_thread_state.emplace
9468 (user_visible_resume_target (finish_ptid), finish_ptid);
9469 }
9470
9471 /* As we're presenting a stop, and potentially removing breakpoints,
9472 update the thread list so we can tell whether there are threads
9473 running on the target. With target remote, for example, we can
9474 only learn about new threads when we explicitly update the thread
9475 list. Do this before notifying the interpreters about signal
9476 stops, end of stepping ranges, etc., so that the "new thread"
9477 output is emitted before e.g., "Program received signal FOO",
9478 instead of after. */
9479 update_thread_list ();
9480
9481 if (last.kind () == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
9482 notify_signal_received (inferior_thread ()->stop_signal ());
9483
9484 /* As with the notification of thread events, we want to delay
9485 notifying the user that we've switched thread context until
9486 the inferior actually stops.
9487
9488 There's no point in saying anything if the inferior has exited.
9489 Note that SIGNALLED here means "exited with a signal", not
9490 "received a signal".
9491
9492 Also skip saying anything in non-stop mode. In that mode, as we
9493 don't want GDB to switch threads behind the user's back, to avoid
9494 races where the user is typing a command to apply to thread x,
9495 but GDB switches to thread y before the user finishes entering
9496 the command, fetch_inferior_event installs a cleanup to restore
9497 the current thread back to the thread the user had selected right
9498 after this event is handled, so we're not really switching, only
9499 informing of a stop. */
9500 if (!non_stop)
9501 {
9502 if ((last.kind () != TARGET_WAITKIND_SIGNALLED
9503 && last.kind () != TARGET_WAITKIND_EXITED
9504 && last.kind () != TARGET_WAITKIND_NO_RESUMED
9505 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
9506 && target_has_execution ()
9507 && previous_thread != inferior_thread ())
9508 {
9509 SWITCH_THRU_ALL_UIS ()
9510 {
9511 target_terminal::ours_for_output ();
9512 gdb_printf (_("[Switching to %s]\n"),
9513 target_pid_to_str (inferior_ptid).c_str ());
9514 annotate_thread_changed ();
9515 }
9516 }
9517
9518 update_previous_thread ();
9519 }
9520
9521 if (last.kind () == TARGET_WAITKIND_NO_RESUMED
9522 || last.kind () == TARGET_WAITKIND_THREAD_EXITED)
9523 {
9524 stop_print_frame = false;
9525
9526 SWITCH_THRU_ALL_UIS ()
9527 if (current_ui->prompt_state == PROMPT_BLOCKED)
9528 {
9529 target_terminal::ours_for_output ();
9530 if (last.kind () == TARGET_WAITKIND_NO_RESUMED)
9531 gdb_printf (_("No unwaited-for children left.\n"));
9532 else if (last.kind () == TARGET_WAITKIND_THREAD_EXITED)
9533 gdb_printf (_("Command aborted, thread exited.\n"));
9534 else
9535 gdb_assert_not_reached ("unhandled");
9536 }
9537 }
9538
9539 /* Note: this depends on the update_thread_list call above. */
9540 maybe_remove_breakpoints ();
9541
9542 /* If an auto-display called a function and that got a signal,
9543 delete that auto-display to avoid an infinite recursion. */
9544
9545 if (stopped_by_random_signal)
9546 disable_current_display ();
9547
9548 SWITCH_THRU_ALL_UIS ()
9549 {
9550 async_enable_stdin ();
9551 }
9552
9553 /* Let the user/frontend see the threads as stopped. */
9554 maybe_finish_thread_state.reset ();
9555
9556 /* Select innermost stack frame - i.e., current frame is frame 0,
9557 and current location is based on that. Handle the case where the
9558 dummy call is returning after being stopped. E.g. the dummy call
9559 previously hit a breakpoint. (If the dummy call returns
9560 normally, we won't reach here.) Do this before the stop hook is
9561 run, so that it doesn't get to see the temporary dummy frame,
9562 which is not where we'll present the stop. */
9563 if (has_stack_frames ())
9564 {
9565 if (stop_stack_dummy == STOP_STACK_DUMMY)
9566 {
9567 /* Pop the empty frame that contains the stack dummy. This
9568 also restores inferior state prior to the call (struct
9569 infcall_suspend_state). */
9570 frame_info_ptr frame = get_current_frame ();
9571
9572 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
9573 frame_pop (frame);
9574 /* frame_pop calls reinit_frame_cache as the last thing it
9575 does which means there's now no selected frame. */
9576 }
9577
9578 select_frame (get_current_frame ());
9579
9580 /* Set the current source location. */
9581 set_current_sal_from_frame (get_current_frame ());
9582 }
9583
9584 /* Look up the hook_stop and run it (CLI internally handles problem
9585 of stop_command's pre-hook not existing). */
9586 stop_context saved_context;
9587
9588 try
9589 {
9590 execute_cmd_pre_hook (stop_command);
9591 }
9592 catch (const gdb_exception_error &ex)
9593 {
9594 exception_fprintf (gdb_stderr, ex,
9595 "Error while running hook_stop:\n");
9596 }
9597
9598 /* If the stop hook resumes the target, then there's no point in
9599 trying to notify about the previous stop; its context is
9600 gone. Likewise if the command switches thread or inferior --
9601 the observers would print a stop for the wrong
9602 thread/inferior. */
9603 if (saved_context.changed ())
9604 return true;
9605
9606 /* Notify observers about the stop. This is where the interpreters
9607 print the stop event. */
9608 notify_normal_stop ((inferior_ptid != null_ptid
9609 ? inferior_thread ()->control.stop_bpstat
9610 : nullptr),
9611 stop_print_frame);
9612 annotate_stopped ();
9613
9614 if (target_has_execution ())
9615 {
9616 if (last.kind () != TARGET_WAITKIND_SIGNALLED
9617 && last.kind () != TARGET_WAITKIND_EXITED
9618 && last.kind () != TARGET_WAITKIND_NO_RESUMED
9619 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
9620 /* Delete the breakpoint we stopped at, if it wants to be deleted.
9621 Delete any breakpoint that is to be deleted at the next stop. */
9622 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
9623 }
9624
9625 return false;
9626 }
9627 \f
9628 int
9629 signal_stop_state (int signo)
9630 {
9631 return signal_stop[signo];
9632 }
9633
9634 int
9635 signal_print_state (int signo)
9636 {
9637 return signal_print[signo];
9638 }
9639
9640 int
9641 signal_pass_state (int signo)
9642 {
9643 return signal_program[signo];
9644 }
9645
9646 static void
9647 signal_cache_update (int signo)
9648 {
9649 if (signo == -1)
9650 {
9651 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
9652 signal_cache_update (signo);
9653
9654 return;
9655 }
9656
9657 signal_pass[signo] = (signal_stop[signo] == 0
9658 && signal_print[signo] == 0
9659 && signal_program[signo] == 1
9660 && signal_catch[signo] == 0);
9661 }
9662
9663 int
9664 signal_stop_update (int signo, int state)
9665 {
9666 int ret = signal_stop[signo];
9667
9668 signal_stop[signo] = state;
9669 signal_cache_update (signo);
9670 return ret;
9671 }
9672
9673 int
9674 signal_print_update (int signo, int state)
9675 {
9676 int ret = signal_print[signo];
9677
9678 signal_print[signo] = state;
9679 signal_cache_update (signo);
9680 return ret;
9681 }
9682
9683 int
9684 signal_pass_update (int signo, int state)
9685 {
9686 int ret = signal_program[signo];
9687
9688 signal_program[signo] = state;
9689 signal_cache_update (signo);
9690 return ret;
9691 }
9692
9693 /* Update the global 'signal_catch' from INFO and notify the
9694 target. */
9695
9696 void
9697 signal_catch_update (const unsigned int *info)
9698 {
9699 int i;
9700
9701 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
9702 signal_catch[i] = info[i] > 0;
9703 signal_cache_update (-1);
9704 target_pass_signals (signal_pass);
9705 }
9706
9707 static void
9708 sig_print_header (void)
9709 {
9710 gdb_printf (_("Signal Stop\tPrint\tPass "
9711 "to program\tDescription\n"));
9712 }
9713
9714 static void
9715 sig_print_info (enum gdb_signal oursig)
9716 {
9717 const char *name = gdb_signal_to_name (oursig);
9718 int name_padding = 13 - strlen (name);
9719
9720 if (name_padding <= 0)
9721 name_padding = 0;
9722
9723 gdb_printf ("%s", name);
9724 gdb_printf ("%*.*s ", name_padding, name_padding, " ");
9725 gdb_printf ("%s\t", signal_stop[oursig] ? "Yes" : "No");
9726 gdb_printf ("%s\t", signal_print[oursig] ? "Yes" : "No");
9727 gdb_printf ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
9728 gdb_printf ("%s\n", gdb_signal_to_string (oursig));
9729 }
9730
9731 /* Specify how various signals in the inferior should be handled. */
9732
9733 static void
9734 handle_command (const char *args, int from_tty)
9735 {
9736 int digits, wordlen;
9737 int sigfirst, siglast;
9738 enum gdb_signal oursig;
9739 int allsigs;
9740
9741 if (args == nullptr)
9742 {
9743 error_no_arg (_("signal to handle"));
9744 }
9745
9746 /* Allocate and zero an array of flags for which signals to handle. */
9747
9748 const size_t nsigs = GDB_SIGNAL_LAST;
9749 unsigned char sigs[nsigs] {};
9750
9751 /* Break the command line up into args. */
9752
9753 gdb_argv built_argv (args);
9754
9755 /* Walk through the args, looking for signal oursigs, signal names, and
9756 actions. Signal numbers and signal names may be interspersed with
9757 actions, with the actions being performed for all signals cumulatively
9758 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
9759
9760 for (char *arg : built_argv)
9761 {
9762 wordlen = strlen (arg);
9763 for (digits = 0; isdigit (arg[digits]); digits++)
9764 {;
9765 }
9766 allsigs = 0;
9767 sigfirst = siglast = -1;
9768
9769 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
9770 {
9771 /* Apply action to all signals except those used by the
9772 debugger. Silently skip those. */
9773 allsigs = 1;
9774 sigfirst = 0;
9775 siglast = nsigs - 1;
9776 }
9777 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
9778 {
9779 SET_SIGS (nsigs, sigs, signal_stop);
9780 SET_SIGS (nsigs, sigs, signal_print);
9781 }
9782 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
9783 {
9784 UNSET_SIGS (nsigs, sigs, signal_program);
9785 }
9786 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
9787 {
9788 SET_SIGS (nsigs, sigs, signal_print);
9789 }
9790 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
9791 {
9792 SET_SIGS (nsigs, sigs, signal_program);
9793 }
9794 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
9795 {
9796 UNSET_SIGS (nsigs, sigs, signal_stop);
9797 }
9798 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
9799 {
9800 SET_SIGS (nsigs, sigs, signal_program);
9801 }
9802 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
9803 {
9804 UNSET_SIGS (nsigs, sigs, signal_print);
9805 UNSET_SIGS (nsigs, sigs, signal_stop);
9806 }
9807 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
9808 {
9809 UNSET_SIGS (nsigs, sigs, signal_program);
9810 }
9811 else if (digits > 0)
9812 {
9813 /* It is numeric. The numeric signal refers to our own
9814 internal signal numbering from target.h, not to host/target
9815 signal number. This is a feature; users really should be
9816 using symbolic names anyway, and the common ones like
9817 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
9818
9819 sigfirst = siglast = (int)
9820 gdb_signal_from_command (atoi (arg));
9821 if (arg[digits] == '-')
9822 {
9823 siglast = (int)
9824 gdb_signal_from_command (atoi (arg + digits + 1));
9825 }
9826 if (sigfirst > siglast)
9827 {
9828 /* Bet he didn't figure we'd think of this case... */
9829 std::swap (sigfirst, siglast);
9830 }
9831 }
9832 else
9833 {
9834 oursig = gdb_signal_from_name (arg);
9835 if (oursig != GDB_SIGNAL_UNKNOWN)
9836 {
9837 sigfirst = siglast = (int) oursig;
9838 }
9839 else
9840 {
9841 /* Not a number and not a recognized flag word => complain. */
9842 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
9843 }
9844 }
9845
9846 /* If any signal numbers or symbol names were found, set flags for
9847 which signals to apply actions to. */
9848
9849 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
9850 {
9851 switch ((enum gdb_signal) signum)
9852 {
9853 case GDB_SIGNAL_TRAP:
9854 case GDB_SIGNAL_INT:
9855 if (!allsigs && !sigs[signum])
9856 {
9857 if (query (_("%s is used by the debugger.\n\
9858 Are you sure you want to change it? "),
9859 gdb_signal_to_name ((enum gdb_signal) signum)))
9860 {
9861 sigs[signum] = 1;
9862 }
9863 else
9864 gdb_printf (_("Not confirmed, unchanged.\n"));
9865 }
9866 break;
9867 case GDB_SIGNAL_0:
9868 case GDB_SIGNAL_DEFAULT:
9869 case GDB_SIGNAL_UNKNOWN:
9870 /* Make sure that "all" doesn't print these. */
9871 break;
9872 default:
9873 sigs[signum] = 1;
9874 break;
9875 }
9876 }
9877 }
9878
9879 for (int signum = 0; signum < nsigs; signum++)
9880 if (sigs[signum])
9881 {
9882 signal_cache_update (-1);
9883 target_pass_signals (signal_pass);
9884 target_program_signals (signal_program);
9885
9886 if (from_tty)
9887 {
9888 /* Show the results. */
9889 sig_print_header ();
9890 for (; signum < nsigs; signum++)
9891 if (sigs[signum])
9892 sig_print_info ((enum gdb_signal) signum);
9893 }
9894
9895 break;
9896 }
9897 }
9898
9899 /* Complete the "handle" command. */
9900
9901 static void
9902 handle_completer (struct cmd_list_element *ignore,
9903 completion_tracker &tracker,
9904 const char *text, const char *word)
9905 {
9906 static const char * const keywords[] =
9907 {
9908 "all",
9909 "stop",
9910 "ignore",
9911 "print",
9912 "pass",
9913 "nostop",
9914 "noignore",
9915 "noprint",
9916 "nopass",
9917 nullptr,
9918 };
9919
9920 signal_completer (ignore, tracker, text, word);
9921 complete_on_enum (tracker, keywords, word, word);
9922 }
9923
9924 enum gdb_signal
9925 gdb_signal_from_command (int num)
9926 {
9927 if (num >= 1 && num <= 15)
9928 return (enum gdb_signal) num;
9929 error (_("Only signals 1-15 are valid as numeric signals.\n\
9930 Use \"info signals\" for a list of symbolic signals."));
9931 }
9932
9933 /* Print current contents of the tables set by the handle command.
9934 It is possible we should just be printing signals actually used
9935 by the current target (but for things to work right when switching
9936 targets, all signals should be in the signal tables). */
9937
9938 static void
9939 info_signals_command (const char *signum_exp, int from_tty)
9940 {
9941 enum gdb_signal oursig;
9942
9943 sig_print_header ();
9944
9945 if (signum_exp)
9946 {
9947 /* First see if this is a symbol name. */
9948 oursig = gdb_signal_from_name (signum_exp);
9949 if (oursig == GDB_SIGNAL_UNKNOWN)
9950 {
9951 /* No, try numeric. */
9952 oursig =
9953 gdb_signal_from_command (parse_and_eval_long (signum_exp));
9954 }
9955 sig_print_info (oursig);
9956 return;
9957 }
9958
9959 gdb_printf ("\n");
9960 /* These ugly casts brought to you by the native VAX compiler. */
9961 for (oursig = GDB_SIGNAL_FIRST;
9962 (int) oursig < (int) GDB_SIGNAL_LAST;
9963 oursig = (enum gdb_signal) ((int) oursig + 1))
9964 {
9965 QUIT;
9966
9967 if (oursig != GDB_SIGNAL_UNKNOWN
9968 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
9969 sig_print_info (oursig);
9970 }
9971
9972 gdb_printf (_("\nUse the \"handle\" command "
9973 "to change these tables.\n"));
9974 }
9975
9976 /* The $_siginfo convenience variable is a bit special. We don't know
9977 for sure the type of the value until we actually have a chance to
9978 fetch the data. The type can change depending on gdbarch, so it is
9979 also dependent on which thread you have selected.
9980
9981 1. making $_siginfo be an internalvar that creates a new value on
9982 access.
9983
9984 2. making the value of $_siginfo be an lval_computed value. */
9985
9986 /* This function implements the lval_computed support for reading a
9987 $_siginfo value. */
9988
9989 static void
9990 siginfo_value_read (struct value *v)
9991 {
9992 LONGEST transferred;
9993
9994 /* If we can access registers, so can we access $_siginfo. Likewise
9995 vice versa. */
9996 validate_registers_access ();
9997
9998 transferred =
9999 target_read (current_inferior ()->top_target (),
10000 TARGET_OBJECT_SIGNAL_INFO,
10001 nullptr,
10002 v->contents_all_raw ().data (),
10003 v->offset (),
10004 v->type ()->length ());
10005
10006 if (transferred != v->type ()->length ())
10007 error (_("Unable to read siginfo"));
10008 }
10009
10010 /* This function implements the lval_computed support for writing a
10011 $_siginfo value. */
10012
10013 static void
10014 siginfo_value_write (struct value *v, struct value *fromval)
10015 {
10016 LONGEST transferred;
10017
10018 /* If we can access registers, so can we access $_siginfo. Likewise
10019 vice versa. */
10020 validate_registers_access ();
10021
10022 transferred = target_write (current_inferior ()->top_target (),
10023 TARGET_OBJECT_SIGNAL_INFO,
10024 nullptr,
10025 fromval->contents_all_raw ().data (),
10026 v->offset (),
10027 fromval->type ()->length ());
10028
10029 if (transferred != fromval->type ()->length ())
10030 error (_("Unable to write siginfo"));
10031 }
10032
10033 static const struct lval_funcs siginfo_value_funcs =
10034 {
10035 siginfo_value_read,
10036 siginfo_value_write
10037 };
10038
10039 /* Return a new value with the correct type for the siginfo object of
10040 the current thread using architecture GDBARCH. Return a void value
10041 if there's no object available. */
10042
10043 static struct value *
10044 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
10045 void *ignore)
10046 {
10047 if (target_has_stack ()
10048 && inferior_ptid != null_ptid
10049 && gdbarch_get_siginfo_type_p (gdbarch))
10050 {
10051 struct type *type = gdbarch_get_siginfo_type (gdbarch);
10052
10053 return value::allocate_computed (type, &siginfo_value_funcs, nullptr);
10054 }
10055
10056 return value::allocate (builtin_type (gdbarch)->builtin_void);
10057 }
10058
10059 \f
10060 /* infcall_suspend_state contains state about the program itself like its
10061 registers and any signal it received when it last stopped.
10062 This state must be restored regardless of how the inferior function call
10063 ends (either successfully, or after it hits a breakpoint or signal)
10064 if the program is to properly continue where it left off. */
10065
10066 class infcall_suspend_state
10067 {
10068 public:
10069 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
10070 once the inferior function call has finished. */
10071 infcall_suspend_state (struct gdbarch *gdbarch,
10072 const struct thread_info *tp,
10073 struct regcache *regcache)
10074 : m_registers (new readonly_detached_regcache (*regcache))
10075 {
10076 tp->save_suspend_to (m_thread_suspend);
10077
10078 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
10079
10080 if (gdbarch_get_siginfo_type_p (gdbarch))
10081 {
10082 struct type *type = gdbarch_get_siginfo_type (gdbarch);
10083 size_t len = type->length ();
10084
10085 siginfo_data.reset ((gdb_byte *) xmalloc (len));
10086
10087 if (target_read (current_inferior ()->top_target (),
10088 TARGET_OBJECT_SIGNAL_INFO, nullptr,
10089 siginfo_data.get (), 0, len) != len)
10090 {
10091 /* Errors ignored. */
10092 siginfo_data.reset (nullptr);
10093 }
10094 }
10095
10096 if (siginfo_data)
10097 {
10098 m_siginfo_gdbarch = gdbarch;
10099 m_siginfo_data = std::move (siginfo_data);
10100 }
10101 }
10102
10103 /* Return a pointer to the stored register state. */
10104
10105 readonly_detached_regcache *registers () const
10106 {
10107 return m_registers.get ();
10108 }
10109
10110 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
10111
10112 void restore (struct gdbarch *gdbarch,
10113 struct thread_info *tp,
10114 struct regcache *regcache) const
10115 {
10116 tp->restore_suspend_from (m_thread_suspend);
10117
10118 if (m_siginfo_gdbarch == gdbarch)
10119 {
10120 struct type *type = gdbarch_get_siginfo_type (gdbarch);
10121
10122 /* Errors ignored. */
10123 target_write (current_inferior ()->top_target (),
10124 TARGET_OBJECT_SIGNAL_INFO, nullptr,
10125 m_siginfo_data.get (), 0, type->length ());
10126 }
10127
10128 /* The inferior can be gone if the user types "print exit(0)"
10129 (and perhaps other times). */
10130 if (target_has_execution ())
10131 /* NB: The register write goes through to the target. */
10132 regcache->restore (registers ());
10133 }
10134
10135 private:
10136 /* How the current thread stopped before the inferior function call was
10137 executed. */
10138 struct thread_suspend_state m_thread_suspend;
10139
10140 /* The registers before the inferior function call was executed. */
10141 std::unique_ptr<readonly_detached_regcache> m_registers;
10142
10143 /* Format of SIGINFO_DATA or NULL if it is not present. */
10144 struct gdbarch *m_siginfo_gdbarch = nullptr;
10145
10146 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
10147 gdbarch_get_siginfo_type ()->length (). For different gdbarch the
10148 content would be invalid. */
10149 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
10150 };
10151
10152 infcall_suspend_state_up
10153 save_infcall_suspend_state ()
10154 {
10155 struct thread_info *tp = inferior_thread ();
10156 regcache *regcache = get_thread_regcache (tp);
10157 struct gdbarch *gdbarch = regcache->arch ();
10158
10159 infcall_suspend_state_up inf_state
10160 (new struct infcall_suspend_state (gdbarch, tp, regcache));
10161
10162 /* Having saved the current state, adjust the thread state, discarding
10163 any stop signal information. The stop signal is not useful when
10164 starting an inferior function call, and run_inferior_call will not use
10165 the signal due to its `proceed' call with GDB_SIGNAL_0. */
10166 tp->set_stop_signal (GDB_SIGNAL_0);
10167
10168 return inf_state;
10169 }
10170
10171 /* Restore inferior session state to INF_STATE. */
10172
10173 void
10174 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
10175 {
10176 struct thread_info *tp = inferior_thread ();
10177 regcache *regcache = get_thread_regcache (inferior_thread ());
10178 struct gdbarch *gdbarch = regcache->arch ();
10179
10180 inf_state->restore (gdbarch, tp, regcache);
10181 discard_infcall_suspend_state (inf_state);
10182 }
10183
10184 void
10185 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
10186 {
10187 delete inf_state;
10188 }
10189
10190 readonly_detached_regcache *
10191 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
10192 {
10193 return inf_state->registers ();
10194 }
10195
10196 /* infcall_control_state contains state regarding gdb's control of the
10197 inferior itself like stepping control. It also contains session state like
10198 the user's currently selected frame. */
10199
10200 struct infcall_control_state
10201 {
10202 struct thread_control_state thread_control;
10203 struct inferior_control_state inferior_control;
10204
10205 /* Other fields: */
10206 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
10207 int stopped_by_random_signal = 0;
10208
10209 /* ID and level of the selected frame when the inferior function
10210 call was made. */
10211 struct frame_id selected_frame_id {};
10212 int selected_frame_level = -1;
10213 };
10214
10215 /* Save all of the information associated with the inferior<==>gdb
10216 connection. */
10217
10218 infcall_control_state_up
10219 save_infcall_control_state ()
10220 {
10221 infcall_control_state_up inf_status (new struct infcall_control_state);
10222 struct thread_info *tp = inferior_thread ();
10223 struct inferior *inf = current_inferior ();
10224
10225 inf_status->thread_control = tp->control;
10226 inf_status->inferior_control = inf->control;
10227
10228 tp->control.step_resume_breakpoint = nullptr;
10229 tp->control.exception_resume_breakpoint = nullptr;
10230
10231 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
10232 chain. If caller's caller is walking the chain, they'll be happier if we
10233 hand them back the original chain when restore_infcall_control_state is
10234 called. */
10235 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
10236
10237 /* Other fields: */
10238 inf_status->stop_stack_dummy = stop_stack_dummy;
10239 inf_status->stopped_by_random_signal = stopped_by_random_signal;
10240
10241 save_selected_frame (&inf_status->selected_frame_id,
10242 &inf_status->selected_frame_level);
10243
10244 return inf_status;
10245 }
10246
10247 /* Restore inferior session state to INF_STATUS. */
10248
10249 void
10250 restore_infcall_control_state (struct infcall_control_state *inf_status)
10251 {
10252 struct thread_info *tp = inferior_thread ();
10253 struct inferior *inf = current_inferior ();
10254
10255 if (tp->control.step_resume_breakpoint)
10256 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
10257
10258 if (tp->control.exception_resume_breakpoint)
10259 tp->control.exception_resume_breakpoint->disposition
10260 = disp_del_at_next_stop;
10261
10262 /* Handle the bpstat_copy of the chain. */
10263 bpstat_clear (&tp->control.stop_bpstat);
10264
10265 tp->control = inf_status->thread_control;
10266 inf->control = inf_status->inferior_control;
10267
10268 /* Other fields: */
10269 stop_stack_dummy = inf_status->stop_stack_dummy;
10270 stopped_by_random_signal = inf_status->stopped_by_random_signal;
10271
10272 if (target_has_stack ())
10273 {
10274 restore_selected_frame (inf_status->selected_frame_id,
10275 inf_status->selected_frame_level);
10276 }
10277
10278 delete inf_status;
10279 }
10280
10281 void
10282 discard_infcall_control_state (struct infcall_control_state *inf_status)
10283 {
10284 if (inf_status->thread_control.step_resume_breakpoint)
10285 inf_status->thread_control.step_resume_breakpoint->disposition
10286 = disp_del_at_next_stop;
10287
10288 if (inf_status->thread_control.exception_resume_breakpoint)
10289 inf_status->thread_control.exception_resume_breakpoint->disposition
10290 = disp_del_at_next_stop;
10291
10292 /* See save_infcall_control_state for info on stop_bpstat. */
10293 bpstat_clear (&inf_status->thread_control.stop_bpstat);
10294
10295 delete inf_status;
10296 }
10297 \f
10298 /* See infrun.h. */
10299
10300 void
10301 clear_exit_convenience_vars (void)
10302 {
10303 clear_internalvar (lookup_internalvar ("_exitsignal"));
10304 clear_internalvar (lookup_internalvar ("_exitcode"));
10305 }
10306 \f
10307
10308 /* User interface for reverse debugging:
10309 Set exec-direction / show exec-direction commands
10310 (returns error unless target implements to_set_exec_direction method). */
10311
10312 enum exec_direction_kind execution_direction = EXEC_FORWARD;
10313 static const char exec_forward[] = "forward";
10314 static const char exec_reverse[] = "reverse";
10315 static const char *exec_direction = exec_forward;
10316 static const char *const exec_direction_names[] = {
10317 exec_forward,
10318 exec_reverse,
10319 nullptr
10320 };
10321
10322 static void
10323 set_exec_direction_func (const char *args, int from_tty,
10324 struct cmd_list_element *cmd)
10325 {
10326 if (target_can_execute_reverse ())
10327 {
10328 if (!strcmp (exec_direction, exec_forward))
10329 execution_direction = EXEC_FORWARD;
10330 else if (!strcmp (exec_direction, exec_reverse))
10331 execution_direction = EXEC_REVERSE;
10332 }
10333 else
10334 {
10335 exec_direction = exec_forward;
10336 error (_("Target does not support this operation."));
10337 }
10338 }
10339
10340 static void
10341 show_exec_direction_func (struct ui_file *out, int from_tty,
10342 struct cmd_list_element *cmd, const char *value)
10343 {
10344 switch (execution_direction) {
10345 case EXEC_FORWARD:
10346 gdb_printf (out, _("Forward.\n"));
10347 break;
10348 case EXEC_REVERSE:
10349 gdb_printf (out, _("Reverse.\n"));
10350 break;
10351 default:
10352 internal_error (_("bogus execution_direction value: %d"),
10353 (int) execution_direction);
10354 }
10355 }
10356
10357 static void
10358 show_schedule_multiple (struct ui_file *file, int from_tty,
10359 struct cmd_list_element *c, const char *value)
10360 {
10361 gdb_printf (file, _("Resuming the execution of threads "
10362 "of all processes is %s.\n"), value);
10363 }
10364
10365 /* Implementation of `siginfo' variable. */
10366
10367 static const struct internalvar_funcs siginfo_funcs =
10368 {
10369 siginfo_make_value,
10370 nullptr,
10371 };
10372
10373 /* Callback for infrun's target events source. This is marked when a
10374 thread has a pending status to process. */
10375
10376 static void
10377 infrun_async_inferior_event_handler (gdb_client_data data)
10378 {
10379 clear_async_event_handler (infrun_async_inferior_event_token);
10380 inferior_event_handler (INF_REG_EVENT);
10381 }
10382
10383 #if GDB_SELF_TEST
10384 namespace selftests
10385 {
10386
10387 /* Verify that when two threads with the same ptid exist (from two different
10388 targets) and one of them changes ptid, we only update inferior_ptid if
10389 it is appropriate. */
10390
10391 static void
10392 infrun_thread_ptid_changed ()
10393 {
10394 gdbarch *arch = current_inferior ()->arch ();
10395
10396 /* The thread which inferior_ptid represents changes ptid. */
10397 {
10398 scoped_restore_current_pspace_and_thread restore;
10399
10400 scoped_mock_context<test_target_ops> target1 (arch);
10401 scoped_mock_context<test_target_ops> target2 (arch);
10402
10403 ptid_t old_ptid (111, 222);
10404 ptid_t new_ptid (111, 333);
10405
10406 target1.mock_inferior.pid = old_ptid.pid ();
10407 target1.mock_thread.ptid = old_ptid;
10408 target1.mock_inferior.ptid_thread_map.clear ();
10409 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
10410
10411 target2.mock_inferior.pid = old_ptid.pid ();
10412 target2.mock_thread.ptid = old_ptid;
10413 target2.mock_inferior.ptid_thread_map.clear ();
10414 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
10415
10416 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
10417 set_current_inferior (&target1.mock_inferior);
10418
10419 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
10420
10421 gdb_assert (inferior_ptid == new_ptid);
10422 }
10423
10424 /* A thread with the same ptid as inferior_ptid, but from another target,
10425 changes ptid. */
10426 {
10427 scoped_restore_current_pspace_and_thread restore;
10428
10429 scoped_mock_context<test_target_ops> target1 (arch);
10430 scoped_mock_context<test_target_ops> target2 (arch);
10431
10432 ptid_t old_ptid (111, 222);
10433 ptid_t new_ptid (111, 333);
10434
10435 target1.mock_inferior.pid = old_ptid.pid ();
10436 target1.mock_thread.ptid = old_ptid;
10437 target1.mock_inferior.ptid_thread_map.clear ();
10438 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
10439
10440 target2.mock_inferior.pid = old_ptid.pid ();
10441 target2.mock_thread.ptid = old_ptid;
10442 target2.mock_inferior.ptid_thread_map.clear ();
10443 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
10444
10445 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
10446 set_current_inferior (&target2.mock_inferior);
10447
10448 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
10449
10450 gdb_assert (inferior_ptid == old_ptid);
10451 }
10452 }
10453
10454 } /* namespace selftests */
10455
10456 #endif /* GDB_SELF_TEST */
10457
10458 void _initialize_infrun ();
10459 void
10460 _initialize_infrun ()
10461 {
10462 struct cmd_list_element *c;
10463
10464 /* Register extra event sources in the event loop. */
10465 infrun_async_inferior_event_token
10466 = create_async_event_handler (infrun_async_inferior_event_handler, nullptr,
10467 "infrun");
10468
10469 cmd_list_element *info_signals_cmd
10470 = add_info ("signals", info_signals_command, _("\
10471 What debugger does when program gets various signals.\n\
10472 Specify a signal as argument to print info on that signal only."));
10473 add_info_alias ("handle", info_signals_cmd, 0);
10474
10475 c = add_com ("handle", class_run, handle_command, _("\
10476 Specify how to handle signals.\n\
10477 Usage: handle SIGNAL [ACTIONS]\n\
10478 Args are signals and actions to apply to those signals.\n\
10479 If no actions are specified, the current settings for the specified signals\n\
10480 will be displayed instead.\n\
10481 \n\
10482 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
10483 from 1-15 are allowed for compatibility with old versions of GDB.\n\
10484 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
10485 The special arg \"all\" is recognized to mean all signals except those\n\
10486 used by the debugger, typically SIGTRAP and SIGINT.\n\
10487 \n\
10488 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
10489 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
10490 Stop means reenter debugger if this signal happens (implies print).\n\
10491 Print means print a message if this signal happens.\n\
10492 Pass means let program see this signal; otherwise program doesn't know.\n\
10493 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
10494 Pass and Stop may be combined.\n\
10495 \n\
10496 Multiple signals may be specified. Signal numbers and signal names\n\
10497 may be interspersed with actions, with the actions being performed for\n\
10498 all signals cumulatively specified."));
10499 set_cmd_completer (c, handle_completer);
10500
10501 stop_command = add_cmd ("stop", class_obscure,
10502 not_just_help_class_command, _("\
10503 There is no `stop' command, but you can set a hook on `stop'.\n\
10504 This allows you to set a list of commands to be run each time execution\n\
10505 of the program stops."), &cmdlist);
10506
10507 add_setshow_boolean_cmd
10508 ("infrun", class_maintenance, &debug_infrun,
10509 _("Set inferior debugging."),
10510 _("Show inferior debugging."),
10511 _("When non-zero, inferior specific debugging is enabled."),
10512 nullptr, show_debug_infrun, &setdebuglist, &showdebuglist);
10513
10514 add_setshow_boolean_cmd ("non-stop", no_class,
10515 &non_stop_1, _("\
10516 Set whether gdb controls the inferior in non-stop mode."), _("\
10517 Show whether gdb controls the inferior in non-stop mode."), _("\
10518 When debugging a multi-threaded program and this setting is\n\
10519 off (the default, also called all-stop mode), when one thread stops\n\
10520 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
10521 all other threads in the program while you interact with the thread of\n\
10522 interest. When you continue or step a thread, you can allow the other\n\
10523 threads to run, or have them remain stopped, but while you inspect any\n\
10524 thread's state, all threads stop.\n\
10525 \n\
10526 In non-stop mode, when one thread stops, other threads can continue\n\
10527 to run freely. You'll be able to step each thread independently,\n\
10528 leave it stopped or free to run as needed."),
10529 set_non_stop,
10530 show_non_stop,
10531 &setlist,
10532 &showlist);
10533
10534 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
10535 {
10536 signal_stop[i] = 1;
10537 signal_print[i] = 1;
10538 signal_program[i] = 1;
10539 signal_catch[i] = 0;
10540 }
10541
10542 /* Signals caused by debugger's own actions should not be given to
10543 the program afterwards.
10544
10545 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
10546 explicitly specifies that it should be delivered to the target
10547 program. Typically, that would occur when a user is debugging a
10548 target monitor on a simulator: the target monitor sets a
10549 breakpoint; the simulator encounters this breakpoint and halts
10550 the simulation handing control to GDB; GDB, noting that the stop
10551 address doesn't map to any known breakpoint, returns control back
10552 to the simulator; the simulator then delivers the hardware
10553 equivalent of a GDB_SIGNAL_TRAP to the program being
10554 debugged. */
10555 signal_program[GDB_SIGNAL_TRAP] = 0;
10556 signal_program[GDB_SIGNAL_INT] = 0;
10557
10558 /* Signals that are not errors should not normally enter the debugger. */
10559 signal_stop[GDB_SIGNAL_ALRM] = 0;
10560 signal_print[GDB_SIGNAL_ALRM] = 0;
10561 signal_stop[GDB_SIGNAL_VTALRM] = 0;
10562 signal_print[GDB_SIGNAL_VTALRM] = 0;
10563 signal_stop[GDB_SIGNAL_PROF] = 0;
10564 signal_print[GDB_SIGNAL_PROF] = 0;
10565 signal_stop[GDB_SIGNAL_CHLD] = 0;
10566 signal_print[GDB_SIGNAL_CHLD] = 0;
10567 signal_stop[GDB_SIGNAL_IO] = 0;
10568 signal_print[GDB_SIGNAL_IO] = 0;
10569 signal_stop[GDB_SIGNAL_POLL] = 0;
10570 signal_print[GDB_SIGNAL_POLL] = 0;
10571 signal_stop[GDB_SIGNAL_URG] = 0;
10572 signal_print[GDB_SIGNAL_URG] = 0;
10573 signal_stop[GDB_SIGNAL_WINCH] = 0;
10574 signal_print[GDB_SIGNAL_WINCH] = 0;
10575 signal_stop[GDB_SIGNAL_PRIO] = 0;
10576 signal_print[GDB_SIGNAL_PRIO] = 0;
10577
10578 /* These signals are used internally by user-level thread
10579 implementations. (See signal(5) on Solaris.) Like the above
10580 signals, a healthy program receives and handles them as part of
10581 its normal operation. */
10582 signal_stop[GDB_SIGNAL_LWP] = 0;
10583 signal_print[GDB_SIGNAL_LWP] = 0;
10584 signal_stop[GDB_SIGNAL_WAITING] = 0;
10585 signal_print[GDB_SIGNAL_WAITING] = 0;
10586 signal_stop[GDB_SIGNAL_CANCEL] = 0;
10587 signal_print[GDB_SIGNAL_CANCEL] = 0;
10588 signal_stop[GDB_SIGNAL_LIBRT] = 0;
10589 signal_print[GDB_SIGNAL_LIBRT] = 0;
10590
10591 /* Update cached state. */
10592 signal_cache_update (-1);
10593
10594 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
10595 &stop_on_solib_events, _("\
10596 Set stopping for shared library events."), _("\
10597 Show stopping for shared library events."), _("\
10598 If nonzero, gdb will give control to the user when the dynamic linker\n\
10599 notifies gdb of shared library events. The most common event of interest\n\
10600 to the user would be loading/unloading of a new library."),
10601 set_stop_on_solib_events,
10602 show_stop_on_solib_events,
10603 &setlist, &showlist);
10604
10605 add_setshow_enum_cmd ("follow-fork-mode", class_run,
10606 follow_fork_mode_kind_names,
10607 &follow_fork_mode_string, _("\
10608 Set debugger response to a program call of fork or vfork."), _("\
10609 Show debugger response to a program call of fork or vfork."), _("\
10610 A fork or vfork creates a new process. follow-fork-mode can be:\n\
10611 parent - the original process is debugged after a fork\n\
10612 child - the new process is debugged after a fork\n\
10613 The unfollowed process will continue to run.\n\
10614 By default, the debugger will follow the parent process."),
10615 nullptr,
10616 show_follow_fork_mode_string,
10617 &setlist, &showlist);
10618
10619 add_setshow_enum_cmd ("follow-exec-mode", class_run,
10620 follow_exec_mode_names,
10621 &follow_exec_mode_string, _("\
10622 Set debugger response to a program call of exec."), _("\
10623 Show debugger response to a program call of exec."), _("\
10624 An exec call replaces the program image of a process.\n\
10625 \n\
10626 follow-exec-mode can be:\n\
10627 \n\
10628 new - the debugger creates a new inferior and rebinds the process\n\
10629 to this new inferior. The program the process was running before\n\
10630 the exec call can be restarted afterwards by restarting the original\n\
10631 inferior.\n\
10632 \n\
10633 same - the debugger keeps the process bound to the same inferior.\n\
10634 The new executable image replaces the previous executable loaded in\n\
10635 the inferior. Restarting the inferior after the exec call restarts\n\
10636 the executable the process was running after the exec call.\n\
10637 \n\
10638 By default, the debugger will use the same inferior."),
10639 nullptr,
10640 show_follow_exec_mode_string,
10641 &setlist, &showlist);
10642
10643 add_setshow_enum_cmd ("scheduler-locking", class_run,
10644 scheduler_enums, &scheduler_mode, _("\
10645 Set mode for locking scheduler during execution."), _("\
10646 Show mode for locking scheduler during execution."), _("\
10647 off == no locking (threads may preempt at any time)\n\
10648 on == full locking (no thread except the current thread may run)\n\
10649 This applies to both normal execution and replay mode.\n\
10650 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
10651 In this mode, other threads may run during other commands.\n\
10652 This applies to both normal execution and replay mode.\n\
10653 replay == scheduler locked in replay mode and unlocked during normal execution."),
10654 set_schedlock_func, /* traps on target vector */
10655 show_scheduler_mode,
10656 &setlist, &showlist);
10657
10658 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
10659 Set mode for resuming threads of all processes."), _("\
10660 Show mode for resuming threads of all processes."), _("\
10661 When on, execution commands (such as 'continue' or 'next') resume all\n\
10662 threads of all processes. When off (which is the default), execution\n\
10663 commands only resume the threads of the current process. The set of\n\
10664 threads that are resumed is further refined by the scheduler-locking\n\
10665 mode (see help set scheduler-locking)."),
10666 nullptr,
10667 show_schedule_multiple,
10668 &setlist, &showlist);
10669
10670 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
10671 Set mode of the step operation."), _("\
10672 Show mode of the step operation."), _("\
10673 When set, doing a step over a function without debug line information\n\
10674 will stop at the first instruction of that function. Otherwise, the\n\
10675 function is skipped and the step command stops at a different source line."),
10676 nullptr,
10677 show_step_stop_if_no_debug,
10678 &setlist, &showlist);
10679
10680 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
10681 &can_use_displaced_stepping, _("\
10682 Set debugger's willingness to use displaced stepping."), _("\
10683 Show debugger's willingness to use displaced stepping."), _("\
10684 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
10685 supported by the target architecture. If off, gdb will not use displaced\n\
10686 stepping to step over breakpoints, even if such is supported by the target\n\
10687 architecture. If auto (which is the default), gdb will use displaced stepping\n\
10688 if the target architecture supports it and non-stop mode is active, but will not\n\
10689 use it in all-stop mode (see help set non-stop)."),
10690 nullptr,
10691 show_can_use_displaced_stepping,
10692 &setlist, &showlist);
10693
10694 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
10695 &exec_direction, _("Set direction of execution.\n\
10696 Options are 'forward' or 'reverse'."),
10697 _("Show direction of execution (forward/reverse)."),
10698 _("Tells gdb whether to execute forward or backward."),
10699 set_exec_direction_func, show_exec_direction_func,
10700 &setlist, &showlist);
10701
10702 /* Set/show detach-on-fork: user-settable mode. */
10703
10704 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
10705 Set whether gdb will detach the child of a fork."), _("\
10706 Show whether gdb will detach the child of a fork."), _("\
10707 Tells gdb whether to detach the child of a fork."),
10708 nullptr, nullptr, &setlist, &showlist);
10709
10710 /* Set/show disable address space randomization mode. */
10711
10712 add_setshow_boolean_cmd ("disable-randomization", class_support,
10713 &disable_randomization, _("\
10714 Set disabling of debuggee's virtual address space randomization."), _("\
10715 Show disabling of debuggee's virtual address space randomization."), _("\
10716 When this mode is on (which is the default), randomization of the virtual\n\
10717 address space is disabled. Standalone programs run with the randomization\n\
10718 enabled by default on some platforms."),
10719 &set_disable_randomization,
10720 &show_disable_randomization,
10721 &setlist, &showlist);
10722
10723 /* ptid initializations */
10724 inferior_ptid = null_ptid;
10725 target_last_wait_ptid = minus_one_ptid;
10726
10727 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed,
10728 "infrun");
10729 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested,
10730 "infrun");
10731 gdb::observers::inferior_exit.attach (infrun_inferior_exit, "infrun");
10732 gdb::observers::inferior_execd.attach (infrun_inferior_execd, "infrun");
10733
10734 /* Explicitly create without lookup, since that tries to create a
10735 value with a void typed value, and when we get here, gdbarch
10736 isn't initialized yet. At this point, we're quite sure there
10737 isn't another convenience variable of the same name. */
10738 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, nullptr);
10739
10740 add_setshow_boolean_cmd ("observer", no_class,
10741 &observer_mode_1, _("\
10742 Set whether gdb controls the inferior in observer mode."), _("\
10743 Show whether gdb controls the inferior in observer mode."), _("\
10744 In observer mode, GDB can get data from the inferior, but not\n\
10745 affect its execution. Registers and memory may not be changed,\n\
10746 breakpoints may not be set, and the program cannot be interrupted\n\
10747 or signalled."),
10748 set_observer_mode,
10749 show_observer_mode,
10750 &setlist,
10751 &showlist);
10752
10753 #if GDB_SELF_TEST
10754 selftests::register_test ("infrun_thread_ptid_changed",
10755 selftests::infrun_thread_ptid_changed);
10756 #endif
10757 }