]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/infrun.c
Update copyright year range in header of all files managed by GDB
[thirdparty/binutils-gdb.git] / gdb / infrun.c
CommitLineData
ca557f44
AC
1/* Target-struct-independent code to start (run) and stop an inferior
2 process.
8926118c 3
1d506c26 4 Copyright (C) 1986-2024 Free Software Foundation, Inc.
c906108c 5
c5aa993b 6 This file is part of GDB.
c906108c 7
c5aa993b
JM
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
c5aa993b 11 (at your option) any later version.
c906108c 12
c5aa993b
JM
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
c906108c 17
c5aa993b 18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
c906108c
SS
20
21#include "defs.h"
bab37966 22#include "displaced-stepping.h"
45741a9c 23#include "infrun.h"
c906108c
SS
24#include <ctype.h>
25#include "symtab.h"
26#include "frame.h"
27#include "inferior.h"
28#include "breakpoint.h"
c906108c
SS
29#include "gdbcore.h"
30#include "gdbcmd.h"
31#include "target.h"
2f4fcf00 32#include "target-connection.h"
c906108c
SS
33#include "gdbthread.h"
34#include "annotate.h"
1adeb98a 35#include "symfile.h"
7a292a7a 36#include "top.h"
13d03262 37#include "ui.h"
2acceee2 38#include "inf-loop.h"
4e052eda 39#include "regcache.h"
fd0407d6 40#include "value.h"
76727919 41#include "observable.h"
f636b87d 42#include "language.h"
a77053c2 43#include "solib.h"
f17517ea 44#include "main.h"
186c406b 45#include "block.h"
034dad6f 46#include "mi/mi-common.h"
4f8d22e3 47#include "event-top.h"
96429cc8 48#include "record.h"
d02ed0bb 49#include "record-full.h"
edb3359d 50#include "inline-frame.h"
4efc6507 51#include "jit.h"
06cd862c 52#include "tracepoint.h"
1bfeeb0f 53#include "skip.h"
28106bc2
SDJ
54#include "probe.h"
55#include "objfiles.h"
de0bea00 56#include "completer.h"
9107fc8d 57#include "target-descriptions.h"
f15cb84a 58#include "target-dcache.h"
d83ad864 59#include "terminal.h"
ff862be4 60#include "solist.h"
400b5eca 61#include "gdbsupport/event-loop.h"
243a9253 62#include "thread-fsm.h"
268a13a5 63#include "gdbsupport/enum-flags.h"
5ed8105e 64#include "progspace-and-thread.h"
6b09f134 65#include <optional>
46a62268 66#include "arch-utils.h"
268a13a5
TT
67#include "gdbsupport/scope-exit.h"
68#include "gdbsupport/forward-scope-exit.h"
06cc9596 69#include "gdbsupport/gdb_select.h"
5b6d1e4f 70#include <unordered_map>
93b54c8e 71#include "async-event.h"
b161a60d
SM
72#include "gdbsupport/selftest.h"
73#include "scoped-mock-context.h"
74#include "test-target.h"
ba988419 75#include "gdbsupport/common-debug.h"
7904e961 76#include "gdbsupport/buildargv.h"
141cd158 77#include "extension.h"
6d84a385 78#include "disasm.h"
3f75a984 79#include "interps.h"
c906108c
SS
80
81/* Prototypes for local functions */
82
2ea28649 83static void sig_print_info (enum gdb_signal);
c906108c 84
96baa820 85static void sig_print_header (void);
c906108c 86
d83ad864
DB
87static void follow_inferior_reset_breakpoints (void);
88
c4464ade 89static bool currently_stepping (struct thread_info *tp);
a289b8f6 90
9efe17a3 91static void insert_hp_step_resume_breakpoint_at_frame (frame_info_ptr);
2484c66b 92
9efe17a3 93static void insert_step_resume_breakpoint_at_caller (frame_info_ptr);
2484c66b 94
2484c66b
UW
95static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
96
22b11ba9 97static bool maybe_software_singlestep (struct gdbarch *gdbarch);
8550d3b3 98
aff4e175
AB
99static void resume (gdb_signal sig);
100
5b6d1e4f
PA
101static void wait_for_inferior (inferior *inf);
102
d8bbae6e
SM
103static void restart_threads (struct thread_info *event_thread,
104 inferior *inf = nullptr);
105
106static bool start_step_over (void);
107
2b718529
LS
108static bool step_over_info_valid_p (void);
109
7ac958f2
PA
110static bool schedlock_applies (struct thread_info *tp);
111
372316f1
PA
112/* Asynchronous signal handler registered as event loop source for
113 when we have pending events ready to be passed to the core. */
114static struct async_event_handler *infrun_async_inferior_event_token;
115
116/* Stores whether infrun_async was previously enabled or disabled.
117 Starts off as -1, indicating "never enabled/disabled". */
118static int infrun_is_async = -1;
fe6356de
CL
119static CORE_ADDR update_line_range_start (CORE_ADDR pc,
120 struct execution_control_state *ecs);
372316f1
PA
121
122/* See infrun.h. */
123
124void
125infrun_async (int enable)
126{
127 if (infrun_is_async != enable)
128 {
129 infrun_is_async = enable;
130
1eb8556f 131 infrun_debug_printf ("enable=%d", enable);
372316f1
PA
132
133 if (enable)
134 mark_async_event_handler (infrun_async_inferior_event_token);
135 else
136 clear_async_event_handler (infrun_async_inferior_event_token);
137 }
138}
139
0b333c5e
PA
140/* See infrun.h. */
141
142void
143mark_infrun_async_event_handler (void)
144{
145 mark_async_event_handler (infrun_async_inferior_event_token);
146}
147
5fbbeb29
CF
148/* When set, stop the 'step' command if we enter a function which has
149 no line number information. The normal behavior is that we step
150 over such function. */
491144b5 151bool step_stop_if_no_debug = false;
920d2a44
AC
152static void
153show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
154 struct cmd_list_element *c, const char *value)
155{
6cb06a8c 156 gdb_printf (file, _("Mode of the step operation is %s.\n"), value);
920d2a44 157}
5fbbeb29 158
b9f437de 159/* proceed and normal_stop use this to notify the user when the
6bf09ec0
PA
160 inferior stopped in a different thread than it had been running in.
161 It can also be used to find for which thread normal_stop last
162 reported a stop. */
a81871f7 163static thread_info_ref previous_thread;
96baa820 164
a81871f7
PA
165/* See infrun.h. */
166
167void
168update_previous_thread ()
169{
170 if (inferior_ptid == null_ptid)
171 previous_thread = nullptr;
172 else
173 previous_thread = thread_info_ref::new_reference (inferior_thread ());
174}
7a292a7a 175
6bf09ec0
PA
176/* See infrun.h. */
177
178thread_info *
179get_previous_thread ()
180{
181 return previous_thread.get ();
182}
183
07107ca6
LM
184/* If set (default for legacy reasons), when following a fork, GDB
185 will detach from one of the fork branches, child or parent.
186 Exactly which branch is detached depends on 'set follow-fork-mode'
187 setting. */
188
491144b5 189static bool detach_fork = true;
6c95b8df 190
94ba44a6 191bool debug_infrun = false;
920d2a44
AC
192static void
193show_debug_infrun (struct ui_file *file, int from_tty,
194 struct cmd_list_element *c, const char *value)
195{
6cb06a8c 196 gdb_printf (file, _("Inferior debugging is %s.\n"), value);
920d2a44 197}
527159b7 198
03583c20
UW
199/* Support for disabling address space randomization. */
200
491144b5 201bool disable_randomization = true;
03583c20
UW
202
203static void
204show_disable_randomization (struct ui_file *file, int from_tty,
205 struct cmd_list_element *c, const char *value)
206{
207 if (target_supports_disable_randomization ())
6cb06a8c
TT
208 gdb_printf (file,
209 _("Disabling randomization of debuggee's "
210 "virtual address space is %s.\n"),
211 value);
03583c20 212 else
0426ad51
TT
213 gdb_puts (_("Disabling randomization of debuggee's "
214 "virtual address space is unsupported on\n"
215 "this platform.\n"), file);
03583c20
UW
216}
217
218static void
eb4c3f4a 219set_disable_randomization (const char *args, int from_tty,
03583c20
UW
220 struct cmd_list_element *c)
221{
222 if (!target_supports_disable_randomization ())
223 error (_("Disabling randomization of debuggee's "
224 "virtual address space is unsupported on\n"
225 "this platform."));
226}
227
d32dc48e
PA
228/* User interface for non-stop mode. */
229
491144b5
CB
230bool non_stop = false;
231static bool non_stop_1 = false;
d32dc48e
PA
232
233static void
eb4c3f4a 234set_non_stop (const char *args, int from_tty,
d32dc48e
PA
235 struct cmd_list_element *c)
236{
55f6301a 237 if (target_has_execution ())
d32dc48e
PA
238 {
239 non_stop_1 = non_stop;
240 error (_("Cannot change this setting while the inferior is running."));
241 }
242
243 non_stop = non_stop_1;
244}
245
246static void
247show_non_stop (struct ui_file *file, int from_tty,
248 struct cmd_list_element *c, const char *value)
249{
6cb06a8c
TT
250 gdb_printf (file,
251 _("Controlling the inferior in non-stop mode is %s.\n"),
252 value);
d32dc48e
PA
253}
254
d914c394
SS
255/* "Observer mode" is somewhat like a more extreme version of
256 non-stop, in which all GDB operations that might affect the
257 target's execution have been disabled. */
258
6bd434d6 259static bool observer_mode = false;
491144b5 260static bool observer_mode_1 = false;
d914c394
SS
261
262static void
eb4c3f4a 263set_observer_mode (const char *args, int from_tty,
d914c394
SS
264 struct cmd_list_element *c)
265{
55f6301a 266 if (target_has_execution ())
d914c394
SS
267 {
268 observer_mode_1 = observer_mode;
269 error (_("Cannot change this setting while the inferior is running."));
270 }
271
272 observer_mode = observer_mode_1;
273
274 may_write_registers = !observer_mode;
275 may_write_memory = !observer_mode;
276 may_insert_breakpoints = !observer_mode;
277 may_insert_tracepoints = !observer_mode;
278 /* We can insert fast tracepoints in or out of observer mode,
279 but enable them if we're going into this mode. */
280 if (observer_mode)
491144b5 281 may_insert_fast_tracepoints = true;
d914c394
SS
282 may_stop = !observer_mode;
283 update_target_permissions ();
284
285 /* Going *into* observer mode we must force non-stop, then
286 going out we leave it that way. */
287 if (observer_mode)
288 {
2f6831b8 289 pagination_enabled = false;
491144b5 290 non_stop = non_stop_1 = true;
d914c394
SS
291 }
292
293 if (from_tty)
6cb06a8c
TT
294 gdb_printf (_("Observer mode is now %s.\n"),
295 (observer_mode ? "on" : "off"));
d914c394
SS
296}
297
298static void
299show_observer_mode (struct ui_file *file, int from_tty,
300 struct cmd_list_element *c, const char *value)
301{
6cb06a8c 302 gdb_printf (file, _("Observer mode is %s.\n"), value);
d914c394
SS
303}
304
305/* This updates the value of observer mode based on changes in
306 permissions. Note that we are deliberately ignoring the values of
307 may-write-registers and may-write-memory, since the user may have
308 reason to enable these during a session, for instance to turn on a
309 debugging-related global. */
310
311void
312update_observer_mode (void)
313{
491144b5
CB
314 bool newval = (!may_insert_breakpoints
315 && !may_insert_tracepoints
316 && may_insert_fast_tracepoints
317 && !may_stop
318 && non_stop);
d914c394
SS
319
320 /* Let the user know if things change. */
321 if (newval != observer_mode)
6cb06a8c
TT
322 gdb_printf (_("Observer mode is now %s.\n"),
323 (newval ? "on" : "off"));
d914c394
SS
324
325 observer_mode = observer_mode_1 = newval;
326}
c2c6d25f 327
c906108c
SS
328/* Tables of how to react to signals; the user sets them. */
329
adc6a863
PA
330static unsigned char signal_stop[GDB_SIGNAL_LAST];
331static unsigned char signal_print[GDB_SIGNAL_LAST];
332static unsigned char signal_program[GDB_SIGNAL_LAST];
c906108c 333
ab04a2af
TT
334/* Table of signals that are registered with "catch signal". A
335 non-zero entry indicates that the signal is caught by some "catch
adc6a863
PA
336 signal" command. */
337static unsigned char signal_catch[GDB_SIGNAL_LAST];
ab04a2af 338
2455069d
UW
339/* Table of signals that the target may silently handle.
340 This is automatically determined from the flags above,
341 and simply cached here. */
adc6a863 342static unsigned char signal_pass[GDB_SIGNAL_LAST];
2455069d 343
c906108c
SS
344#define SET_SIGS(nsigs,sigs,flags) \
345 do { \
346 int signum = (nsigs); \
347 while (signum-- > 0) \
348 if ((sigs)[signum]) \
349 (flags)[signum] = 1; \
350 } while (0)
351
352#define UNSET_SIGS(nsigs,sigs,flags) \
353 do { \
354 int signum = (nsigs); \
355 while (signum-- > 0) \
356 if ((sigs)[signum]) \
357 (flags)[signum] = 0; \
358 } while (0)
359
9b224c5e
PA
360/* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
361 this function is to avoid exporting `signal_program'. */
362
363void
364update_signals_program_target (void)
365{
adc6a863 366 target_program_signals (signal_program);
9b224c5e
PA
367}
368
1777feb0 369/* Value to pass to target_resume() to cause all threads to resume. */
39f77062 370
edb3359d 371#define RESUME_ALL minus_one_ptid
c906108c
SS
372
373/* Command list pointer for the "stop" placeholder. */
374
375static struct cmd_list_element *stop_command;
376
c906108c
SS
377/* Nonzero if we want to give control to the user when we're notified
378 of shared library events by the dynamic linker. */
628fe4e4 379int stop_on_solib_events;
f9e14852
GB
380
381/* Enable or disable optional shared library event breakpoints
382 as appropriate when the above flag is changed. */
383
384static void
eb4c3f4a
TT
385set_stop_on_solib_events (const char *args,
386 int from_tty, struct cmd_list_element *c)
f9e14852
GB
387{
388 update_solib_breakpoints ();
389}
390
920d2a44
AC
391static void
392show_stop_on_solib_events (struct ui_file *file, int from_tty,
393 struct cmd_list_element *c, const char *value)
394{
6cb06a8c
TT
395 gdb_printf (file, _("Stopping for shared library events is %s.\n"),
396 value);
920d2a44 397}
c906108c 398
c4464ade 399/* True after stop if current stack frame should be printed. */
c906108c 400
c4464ade 401static bool stop_print_frame;
c906108c 402
5b6d1e4f 403/* This is a cached copy of the target/ptid/waitstatus of the last
fb85cece 404 event returned by target_wait().
5b6d1e4f
PA
405 This information is returned by get_last_target_status(). */
406static process_stratum_target *target_last_proc_target;
39f77062 407static ptid_t target_last_wait_ptid;
e02bc4cc
DS
408static struct target_waitstatus target_last_waitstatus;
409
4e1c45ea 410void init_thread_stepping_state (struct thread_info *tss);
0d1e5fa7 411
53904c9e
AC
412static const char follow_fork_mode_child[] = "child";
413static const char follow_fork_mode_parent[] = "parent";
414
40478521 415static const char *const follow_fork_mode_kind_names[] = {
53904c9e
AC
416 follow_fork_mode_child,
417 follow_fork_mode_parent,
03acd4d8 418 nullptr
ef346e04 419};
c906108c 420
53904c9e 421static const char *follow_fork_mode_string = follow_fork_mode_parent;
920d2a44
AC
422static void
423show_follow_fork_mode_string (struct ui_file *file, int from_tty,
424 struct cmd_list_element *c, const char *value)
425{
6cb06a8c
TT
426 gdb_printf (file,
427 _("Debugger response to a program "
428 "call of fork or vfork is \"%s\".\n"),
429 value);
920d2a44 430}
c906108c
SS
431\f
432
d83ad864
DB
433/* Handle changes to the inferior list based on the type of fork,
434 which process is being followed, and whether the other process
435 should be detached. On entry inferior_ptid must be the ptid of
436 the fork parent. At return inferior_ptid is the ptid of the
437 followed inferior. */
438
5ab2fbf1
SM
439static bool
440follow_fork_inferior (bool follow_child, bool detach_fork)
d83ad864 441{
b26b06dd
AB
442 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
443
444 infrun_debug_printf ("follow_child = %d, detach_fork = %d",
445 follow_child, detach_fork);
446
183be222 447 target_waitkind fork_kind = inferior_thread ()->pending_follow.kind ();
3a849a34
SM
448 gdb_assert (fork_kind == TARGET_WAITKIND_FORKED
449 || fork_kind == TARGET_WAITKIND_VFORKED);
450 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
451 ptid_t parent_ptid = inferior_ptid;
183be222 452 ptid_t child_ptid = inferior_thread ()->pending_follow.child_ptid ();
d83ad864
DB
453
454 if (has_vforked
455 && !non_stop /* Non-stop always resumes both branches. */
3b12939d 456 && current_ui->prompt_state == PROMPT_BLOCKED
d83ad864
DB
457 && !(follow_child || detach_fork || sched_multi))
458 {
459 /* The parent stays blocked inside the vfork syscall until the
460 child execs or exits. If we don't let the child run, then
461 the parent stays blocked. If we're telling the parent to run
462 in the foreground, the user will not be able to ctrl-c to get
463 back the terminal, effectively hanging the debug session. */
6cb06a8c 464 gdb_printf (gdb_stderr, _("\
d83ad864
DB
465Can not resume the parent process over vfork in the foreground while\n\
466holding the child stopped. Try \"set detach-on-fork\" or \
467\"set schedule-multiple\".\n"));
e97007b6 468 return true;
d83ad864
DB
469 }
470
82d1f134
SM
471 inferior *parent_inf = current_inferior ();
472 inferior *child_inf = nullptr;
ff770835 473
d8bbae6e
SM
474 gdb_assert (parent_inf->thread_waiting_for_vfork_done == nullptr);
475
d83ad864
DB
476 if (!follow_child)
477 {
478 /* Detach new forked process? */
479 if (detach_fork)
480 {
d83ad864
DB
481 /* Before detaching from the child, remove all breakpoints
482 from it. If we forked, then this has already been taken
483 care of by infrun.c. If we vforked however, any
484 breakpoint inserted in the parent is visible in the
485 child, even those added while stopped in a vfork
486 catchpoint. This will remove the breakpoints from the
487 parent also, but they'll be reinserted below. */
488 if (has_vforked)
489 {
490 /* Keep breakpoints list in sync. */
00431a78 491 remove_breakpoints_inf (current_inferior ());
d83ad864
DB
492 }
493
f67c0c91 494 if (print_inferior_events)
d83ad864 495 {
8dd06f7a 496 /* Ensure that we have a process ptid. */
e99b03dc 497 ptid_t process_ptid = ptid_t (child_ptid.pid ());
8dd06f7a 498
223ffa71 499 target_terminal::ours_for_output ();
6cb06a8c
TT
500 gdb_printf (_("[Detaching after %s from child %s]\n"),
501 has_vforked ? "vfork" : "fork",
502 target_pid_to_str (process_ptid).c_str ());
d83ad864
DB
503 }
504 }
505 else
506 {
d83ad864 507 /* Add process to GDB's tables. */
e99b03dc 508 child_inf = add_inferior (child_ptid.pid ());
d83ad864 509
d83ad864
DB
510 child_inf->attach_flag = parent_inf->attach_flag;
511 copy_terminal_info (child_inf, parent_inf);
27b1f19f 512 child_inf->set_arch (parent_inf->arch ());
57768366 513 child_inf->tdesc_info = parent_inf->tdesc_info;
d83ad864 514
d83ad864
DB
515 child_inf->symfile_flags = SYMFILE_NO_READ;
516
517 /* If this is a vfork child, then the address-space is
518 shared with the parent. */
519 if (has_vforked)
520 {
521 child_inf->pspace = parent_inf->pspace;
522 child_inf->aspace = parent_inf->aspace;
523
82d1f134 524 exec_on_vfork (child_inf);
5b6d1e4f 525
d83ad864
DB
526 /* The parent will be frozen until the child is done
527 with the shared region. Keep track of the
528 parent. */
529 child_inf->vfork_parent = parent_inf;
30220b46 530 child_inf->pending_detach = false;
d83ad864 531 parent_inf->vfork_child = child_inf;
30220b46 532 parent_inf->pending_detach = false;
d83ad864
DB
533 }
534 else
535 {
f9582a22
TV
536 child_inf->pspace = new program_space (new_address_space ());
537 child_inf->aspace = child_inf->pspace->aspace;
30220b46 538 child_inf->removable = true;
d83ad864 539 clone_program_space (child_inf->pspace, parent_inf->pspace);
d83ad864 540 }
d83ad864
DB
541 }
542
543 if (has_vforked)
544 {
d83ad864
DB
545 /* If we detached from the child, then we have to be careful
546 to not insert breakpoints in the parent until the child
547 is done with the shared memory region. However, if we're
548 staying attached to the child, then we can and should
549 insert breakpoints, so that we can debug it. A
550 subsequent child exec or exit is enough to know when does
551 the child stops using the parent's address space. */
6f5d514f
SM
552 parent_inf->thread_waiting_for_vfork_done
553 = detach_fork ? inferior_thread () : nullptr;
d83ad864 554 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
b26b06dd
AB
555
556 infrun_debug_printf
557 ("parent_inf->thread_waiting_for_vfork_done == %s",
558 (parent_inf->thread_waiting_for_vfork_done == nullptr
559 ? "nullptr"
560 : (parent_inf->thread_waiting_for_vfork_done
561 ->ptid.to_string ().c_str ())));
d83ad864
DB
562 }
563 }
564 else
565 {
566 /* Follow the child. */
d83ad864 567
f67c0c91 568 if (print_inferior_events)
d83ad864 569 {
f67c0c91
SDJ
570 std::string parent_pid = target_pid_to_str (parent_ptid);
571 std::string child_pid = target_pid_to_str (child_ptid);
572
223ffa71 573 target_terminal::ours_for_output ();
6cb06a8c
TT
574 gdb_printf (_("[Attaching after %s %s to child %s]\n"),
575 parent_pid.c_str (),
576 has_vforked ? "vfork" : "fork",
577 child_pid.c_str ());
d83ad864
DB
578 }
579
580 /* Add the new inferior first, so that the target_detach below
581 doesn't unpush the target. */
582
e99b03dc 583 child_inf = add_inferior (child_ptid.pid ());
d83ad864 584
d83ad864
DB
585 child_inf->attach_flag = parent_inf->attach_flag;
586 copy_terminal_info (child_inf, parent_inf);
27b1f19f 587 child_inf->set_arch (parent_inf->arch ());
57768366 588 child_inf->tdesc_info = parent_inf->tdesc_info;
d83ad864 589
da474da1 590 if (has_vforked)
d83ad864 591 {
da474da1
SM
592 /* If this is a vfork child, then the address-space is shared
593 with the parent. */
594 child_inf->aspace = parent_inf->aspace;
595 child_inf->pspace = parent_inf->pspace;
5b6d1e4f 596
82d1f134 597 exec_on_vfork (child_inf);
d83ad864 598 }
da474da1
SM
599 else if (detach_fork)
600 {
601 /* We follow the child and detach from the parent: move the parent's
602 program space to the child. This simplifies some things, like
603 doing "next" over fork() and landing on the expected line in the
604 child (note, that is broken with "set detach-on-fork off").
605
606 Before assigning brand new spaces for the parent, remove
607 breakpoints from it: because the new pspace won't match
608 currently inserted locations, the normal detach procedure
609 wouldn't remove them, and we would leave them inserted when
610 detaching. */
611 remove_breakpoints_inf (parent_inf);
612
613 child_inf->aspace = parent_inf->aspace;
614 child_inf->pspace = parent_inf->pspace;
f9582a22
TV
615 parent_inf->pspace = new program_space (new_address_space ());
616 parent_inf->aspace = parent_inf->pspace->aspace;
da474da1
SM
617 clone_program_space (parent_inf->pspace, child_inf->pspace);
618
619 /* The parent inferior is still the current one, so keep things
620 in sync. */
621 set_current_program_space (parent_inf->pspace);
622 }
d83ad864
DB
623 else
624 {
f9582a22
TV
625 child_inf->pspace = new program_space (new_address_space ());
626 child_inf->aspace = child_inf->pspace->aspace;
30220b46 627 child_inf->removable = true;
d83ad864 628 child_inf->symfile_flags = SYMFILE_NO_READ;
da474da1 629 clone_program_space (child_inf->pspace, parent_inf->pspace);
d83ad864
DB
630 }
631 }
632
82d1f134
SM
633 gdb_assert (current_inferior () == parent_inf);
634
635 /* If we are setting up an inferior for the child, target_follow_fork is
636 responsible for pushing the appropriate targets on the new inferior's
637 target stack and adding the initial thread (with ptid CHILD_PTID).
638
639 If we are not setting up an inferior for the child (because following
640 the parent and detach_fork is true), it is responsible for detaching
641 from CHILD_PTID. */
642 target_follow_fork (child_inf, child_ptid, fork_kind, follow_child,
643 detach_fork);
644
f5694400
SM
645 gdb::observers::inferior_forked.notify (parent_inf, child_inf, fork_kind);
646
82d1f134
SM
647 /* target_follow_fork must leave the parent as the current inferior. If we
648 want to follow the child, we make it the current one below. */
649 gdb_assert (current_inferior () == parent_inf);
650
651 /* If there is a child inferior, target_follow_fork must have created a thread
652 for it. */
653 if (child_inf != nullptr)
654 gdb_assert (!child_inf->thread_list.empty ());
655
577d2167
SM
656 /* Clear the parent thread's pending follow field. Do this before calling
657 target_detach, so that the target can differentiate the two following
658 cases:
659
660 - We continue past a fork with "follow-fork-mode == child" &&
661 "detach-on-fork on", and therefore detach the parent. In that
662 case the target should not detach the fork child.
663 - We run to a fork catchpoint and the user types "detach". In that
664 case, the target should detach the fork child in addition to the
665 parent.
666
667 The former case will have pending_follow cleared, the later will have
668 pending_follow set. */
3c8af02f 669 thread_info *parent_thread = parent_inf->find_thread (parent_ptid);
577d2167
SM
670 gdb_assert (parent_thread != nullptr);
671 parent_thread->pending_follow.set_spurious ();
672
82d1f134
SM
673 /* Detach the parent if needed. */
674 if (follow_child)
675 {
676 /* If we're vforking, we want to hold on to the parent until
677 the child exits or execs. At child exec or exit time we
678 can remove the old breakpoints from the parent and detach
679 or resume debugging it. Otherwise, detach the parent now;
680 we'll want to reuse it's program/address spaces, but we
681 can't set them to the child before removing breakpoints
682 from the parent, otherwise, the breakpoints module could
683 decide to remove breakpoints from the wrong process (since
684 they'd be assigned to the same address space). */
685
686 if (has_vforked)
687 {
03acd4d8
CL
688 gdb_assert (child_inf->vfork_parent == nullptr);
689 gdb_assert (parent_inf->vfork_child == nullptr);
82d1f134 690 child_inf->vfork_parent = parent_inf;
30220b46 691 child_inf->pending_detach = false;
82d1f134
SM
692 parent_inf->vfork_child = child_inf;
693 parent_inf->pending_detach = detach_fork;
82d1f134
SM
694 }
695 else if (detach_fork)
696 {
697 if (print_inferior_events)
698 {
699 /* Ensure that we have a process ptid. */
700 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
701
702 target_terminal::ours_for_output ();
6cb06a8c
TT
703 gdb_printf (_("[Detaching after fork from "
704 "parent %s]\n"),
705 target_pid_to_str (process_ptid).c_str ());
82d1f134
SM
706 }
707
708 target_detach (parent_inf, 0);
709 }
710 }
e97007b6 711
ff770835
SM
712 /* If we ended up creating a new inferior, call post_create_inferior to inform
713 the various subcomponents. */
82d1f134 714 if (child_inf != nullptr)
ff770835 715 {
82d1f134 716 /* If FOLLOW_CHILD, we leave CHILD_INF as the current inferior
287de656 717 (do not restore the parent as the current inferior). */
6b09f134 718 std::optional<scoped_restore_current_thread> maybe_restore;
82d1f134 719
05e1cac2 720 if (!follow_child && !sched_multi)
82d1f134 721 maybe_restore.emplace ();
ff770835 722
82d1f134 723 switch_to_thread (*child_inf->threads ().begin ());
ff770835
SM
724 post_create_inferior (0);
725 }
726
e97007b6 727 return false;
d83ad864
DB
728}
729
3505d4c4
PA
730/* Set the last target status as TP having stopped. */
731
732static void
733set_last_target_status_stopped (thread_info *tp)
734{
735 set_last_target_status (tp->inf->process_target (), tp->ptid,
736 target_waitstatus {}.set_stopped (GDB_SIGNAL_0));
737}
738
e58b0e63
PA
739/* Tell the target to follow the fork we're stopped at. Returns true
740 if the inferior should be resumed; false, if the target for some
741 reason decided it's best not to resume. */
742
5ab2fbf1
SM
743static bool
744follow_fork ()
c906108c 745{
b26b06dd
AB
746 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
747
5ab2fbf1
SM
748 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
749 bool should_resume = true;
e58b0e63
PA
750
751 /* Copy user stepping state to the new inferior thread. FIXME: the
752 followed fork child thread should have a copy of most of the
4e3990f4
DE
753 parent thread structure's run control related fields, not just these.
754 Initialized to avoid "may be used uninitialized" warnings from gcc. */
03acd4d8
CL
755 struct breakpoint *step_resume_breakpoint = nullptr;
756 struct breakpoint *exception_resume_breakpoint = nullptr;
4e3990f4
DE
757 CORE_ADDR step_range_start = 0;
758 CORE_ADDR step_range_end = 0;
bf4cb9be 759 int current_line = 0;
03acd4d8 760 symtab *current_symtab = nullptr;
4e3990f4 761 struct frame_id step_frame_id = { 0 };
e58b0e63
PA
762
763 if (!non_stop)
764 {
3505d4c4
PA
765 thread_info *cur_thr = inferior_thread ();
766
767 ptid_t resume_ptid
768 = user_visible_resume_ptid (cur_thr->control.stepping_command);
769 process_stratum_target *resume_target
770 = user_visible_resume_target (resume_ptid);
771
772 /* Check if there's a thread that we're about to resume, other
773 than the current, with an unfollowed fork/vfork. If so,
774 switch back to it, to tell the target to follow it (in either
775 direction). We'll afterwards refuse to resume, and inform
776 the user what happened. */
777 for (thread_info *tp : all_non_exited_threads (resume_target,
778 resume_ptid))
e58b0e63 779 {
3505d4c4
PA
780 if (tp == cur_thr)
781 continue;
782
bd9482bc
PA
783 /* follow_fork_inferior clears tp->pending_follow, and below
784 we'll need the value after the follow_fork_inferior
785 call. */
786 target_waitkind kind = tp->pending_follow.kind ();
787
788 if (kind != TARGET_WAITKIND_SPURIOUS)
3505d4c4
PA
789 {
790 infrun_debug_printf ("need to follow-fork [%s] first",
791 tp->ptid.to_string ().c_str ());
792
793 switch_to_thread (tp);
bd9482bc
PA
794
795 /* Set up inferior(s) as specified by the caller, and
796 tell the target to do whatever is necessary to follow
797 either parent or child. */
798 if (follow_child)
799 {
800 /* The thread that started the execution command
801 won't exist in the child. Abort the command and
802 immediately stop in this thread, in the child,
803 inside fork. */
804 should_resume = false;
805 }
806 else
807 {
808 /* Following the parent, so let the thread fork its
809 child freely, it won't influence the current
810 execution command. */
811 if (follow_fork_inferior (follow_child, detach_fork))
812 {
813 /* Target refused to follow, or there's some
814 other reason we shouldn't resume. */
815 switch_to_thread (cur_thr);
816 set_last_target_status_stopped (cur_thr);
817 return false;
818 }
819
820 /* If we're following a vfork, when we need to leave
821 the just-forked thread as selected, as we need to
822 solo-resume it to collect the VFORK_DONE event.
823 If we're following a fork, however, switch back
824 to the original thread that we continue stepping
825 it, etc. */
826 if (kind != TARGET_WAITKIND_VFORKED)
827 {
828 gdb_assert (kind == TARGET_WAITKIND_FORKED);
829 switch_to_thread (cur_thr);
830 }
831 }
832
3505d4c4
PA
833 break;
834 }
e58b0e63
PA
835 }
836 }
837
577d2167 838 thread_info *tp = inferior_thread ();
e58b0e63
PA
839
840 /* If there were any forks/vforks that were caught and are now to be
841 followed, then do so now. */
183be222 842 switch (tp->pending_follow.kind ())
e58b0e63
PA
843 {
844 case TARGET_WAITKIND_FORKED:
845 case TARGET_WAITKIND_VFORKED:
846 {
847 ptid_t parent, child;
573269a8 848 std::unique_ptr<struct thread_fsm> thread_fsm;
e58b0e63
PA
849
850 /* If the user did a next/step, etc, over a fork call,
851 preserve the stepping state in the fork child. */
852 if (follow_child && should_resume)
853 {
8358c15c
JK
854 step_resume_breakpoint = clone_momentary_breakpoint
855 (tp->control.step_resume_breakpoint);
16c381f0
JK
856 step_range_start = tp->control.step_range_start;
857 step_range_end = tp->control.step_range_end;
bf4cb9be
TV
858 current_line = tp->current_line;
859 current_symtab = tp->current_symtab;
16c381f0 860 step_frame_id = tp->control.step_frame_id;
186c406b
TT
861 exception_resume_breakpoint
862 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
573269a8 863 thread_fsm = tp->release_thread_fsm ();
e58b0e63
PA
864
865 /* For now, delete the parent's sr breakpoint, otherwise,
866 parent/child sr breakpoints are considered duplicates,
867 and the child version will not be installed. Remove
868 this when the breakpoints module becomes aware of
869 inferiors and address spaces. */
870 delete_step_resume_breakpoint (tp);
16c381f0
JK
871 tp->control.step_range_start = 0;
872 tp->control.step_range_end = 0;
873 tp->control.step_frame_id = null_frame_id;
186c406b 874 delete_exception_resume_breakpoint (tp);
e58b0e63
PA
875 }
876
877 parent = inferior_ptid;
183be222 878 child = tp->pending_follow.child_ptid ();
e58b0e63 879
d8bbae6e
SM
880 /* If handling a vfork, stop all the inferior's threads, they will be
881 restarted when the vfork shared region is complete. */
882 if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED
883 && target_is_non_stop_p ())
884 stop_all_threads ("handling vfork", tp->inf);
885
5b6d1e4f 886 process_stratum_target *parent_targ = tp->inf->process_target ();
d83ad864
DB
887 /* Set up inferior(s) as specified by the caller, and tell the
888 target to do whatever is necessary to follow either parent
889 or child. */
890 if (follow_fork_inferior (follow_child, detach_fork))
e58b0e63
PA
891 {
892 /* Target refused to follow, or there's some other reason
893 we shouldn't resume. */
894 should_resume = 0;
895 }
896 else
897 {
1777feb0 898 /* If we followed the child, switch to it... */
e58b0e63
PA
899 if (follow_child)
900 {
9213a6d7 901 tp = parent_targ->find_thread (child);
3505d4c4 902 switch_to_thread (tp);
e58b0e63
PA
903
904 /* ... and preserve the stepping state, in case the
905 user was stepping over the fork call. */
906 if (should_resume)
907 {
8358c15c
JK
908 tp->control.step_resume_breakpoint
909 = step_resume_breakpoint;
16c381f0
JK
910 tp->control.step_range_start = step_range_start;
911 tp->control.step_range_end = step_range_end;
bf4cb9be
TV
912 tp->current_line = current_line;
913 tp->current_symtab = current_symtab;
16c381f0 914 tp->control.step_frame_id = step_frame_id;
186c406b
TT
915 tp->control.exception_resume_breakpoint
916 = exception_resume_breakpoint;
573269a8 917 tp->set_thread_fsm (std::move (thread_fsm));
e58b0e63
PA
918 }
919 else
920 {
921 /* If we get here, it was because we're trying to
922 resume from a fork catchpoint, but, the user
923 has switched threads away from the thread that
924 forked. In that case, the resume command
925 issued is most likely not applicable to the
926 child, so just warn, and refuse to resume. */
3e43a32a 927 warning (_("Not resuming: switched threads "
fd7dcb94 928 "before following fork child."));
e58b0e63
PA
929 }
930
931 /* Reset breakpoints in the child as appropriate. */
932 follow_inferior_reset_breakpoints ();
933 }
e58b0e63
PA
934 }
935 }
936 break;
937 case TARGET_WAITKIND_SPURIOUS:
938 /* Nothing to follow. */
939 break;
940 default:
f34652de 941 internal_error ("Unexpected pending_follow.kind %d\n",
183be222 942 tp->pending_follow.kind ());
e58b0e63
PA
943 break;
944 }
c906108c 945
3505d4c4
PA
946 if (!should_resume)
947 set_last_target_status_stopped (tp);
e58b0e63 948 return should_resume;
c906108c
SS
949}
950
d83ad864 951static void
6604731b 952follow_inferior_reset_breakpoints (void)
c906108c 953{
4e1c45ea
PA
954 struct thread_info *tp = inferior_thread ();
955
6604731b
DJ
956 /* Was there a step_resume breakpoint? (There was if the user
957 did a "next" at the fork() call.) If so, explicitly reset its
a1aa2221
LM
958 thread number. Cloned step_resume breakpoints are disabled on
959 creation, so enable it here now that it is associated with the
960 correct thread.
6604731b
DJ
961
962 step_resumes are a form of bp that are made to be per-thread.
963 Since we created the step_resume bp when the parent process
964 was being debugged, and now are switching to the child process,
965 from the breakpoint package's viewpoint, that's a switch of
966 "threads". We must update the bp's notion of which thread
967 it is for, or it'll be ignored when it triggers. */
968
8358c15c 969 if (tp->control.step_resume_breakpoint)
a1aa2221
LM
970 {
971 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
f5951b9f 972 tp->control.step_resume_breakpoint->first_loc ().enabled = 1;
a1aa2221 973 }
6604731b 974
a1aa2221 975 /* Treat exception_resume breakpoints like step_resume breakpoints. */
186c406b 976 if (tp->control.exception_resume_breakpoint)
a1aa2221
LM
977 {
978 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
f5951b9f 979 tp->control.exception_resume_breakpoint->first_loc ().enabled = 1;
a1aa2221 980 }
186c406b 981
6604731b
DJ
982 /* Reinsert all breakpoints in the child. The user may have set
983 breakpoints after catching the fork, in which case those
984 were never set in the child, but only in the parent. This makes
985 sure the inserted breakpoints match the breakpoint list. */
986
987 breakpoint_re_set ();
988 insert_breakpoints ();
c906108c 989}
c906108c 990
69eadcc9
SM
991/* The child has exited or execed: resume THREAD, a thread of the parent,
992 if it was meant to be executing. */
6c95b8df 993
69eadcc9
SM
994static void
995proceed_after_vfork_done (thread_info *thread)
6c95b8df 996{
69eadcc9 997 if (thread->state == THREAD_RUNNING
611841bb 998 && !thread->executing ()
6c95b8df 999 && !thread->stop_requested
1edb66d8 1000 && thread->stop_signal () == GDB_SIGNAL_0)
6c95b8df 1001 {
1eb8556f 1002 infrun_debug_printf ("resuming vfork parent thread %s",
0fab7955 1003 thread->ptid.to_string ().c_str ());
6c95b8df 1004
00431a78 1005 switch_to_thread (thread);
70509625 1006 clear_proceed_status (0);
64ce06e4 1007 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
6c95b8df 1008 }
6c95b8df
PA
1009}
1010
1011/* Called whenever we notice an exec or exit event, to handle
1012 detaching or resuming a vfork parent. */
1013
1014static void
1015handle_vfork_child_exec_or_exit (int exec)
1016{
b26b06dd
AB
1017 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1018
6c95b8df
PA
1019 struct inferior *inf = current_inferior ();
1020
1021 if (inf->vfork_parent)
1022 {
69eadcc9 1023 inferior *resume_parent = nullptr;
6c95b8df
PA
1024
1025 /* This exec or exit marks the end of the shared memory region
b73715df
TV
1026 between the parent and the child. Break the bonds. */
1027 inferior *vfork_parent = inf->vfork_parent;
03acd4d8
CL
1028 inf->vfork_parent->vfork_child = nullptr;
1029 inf->vfork_parent = nullptr;
6c95b8df 1030
b73715df
TV
1031 /* If the user wanted to detach from the parent, now is the
1032 time. */
1033 if (vfork_parent->pending_detach)
6c95b8df 1034 {
6c95b8df 1035 struct program_space *pspace;
6c95b8df 1036
1777feb0 1037 /* follow-fork child, detach-on-fork on. */
6c95b8df 1038
30220b46 1039 vfork_parent->pending_detach = false;
68c9da30 1040
18493a00 1041 scoped_restore_current_pspace_and_thread restore_thread;
6c95b8df
PA
1042
1043 /* We're letting loose of the parent. */
18493a00 1044 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
00431a78 1045 switch_to_thread (tp);
6c95b8df
PA
1046
1047 /* We're about to detach from the parent, which implicitly
1048 removes breakpoints from its address space. There's a
1049 catch here: we want to reuse the spaces for the child,
1050 but, parent/child are still sharing the pspace at this
1051 point, although the exec in reality makes the kernel give
1052 the child a fresh set of new pages. The problem here is
1053 that the breakpoints module being unaware of this, would
1054 likely chose the child process to write to the parent
1055 address space. Swapping the child temporarily away from
1056 the spaces has the desired effect. Yes, this is "sort
1057 of" a hack. */
1058
1059 pspace = inf->pspace;
03acd4d8 1060 inf->pspace = nullptr;
f9582a22 1061 address_space_ref_ptr aspace = std::move (inf->aspace);
6c95b8df 1062
f67c0c91 1063 if (print_inferior_events)
6c95b8df 1064 {
a068643d 1065 std::string pidstr
b73715df 1066 = target_pid_to_str (ptid_t (vfork_parent->pid));
f67c0c91 1067
223ffa71 1068 target_terminal::ours_for_output ();
6c95b8df
PA
1069
1070 if (exec)
6f259a23 1071 {
6cb06a8c
TT
1072 gdb_printf (_("[Detaching vfork parent %s "
1073 "after child exec]\n"), pidstr.c_str ());
6f259a23 1074 }
6c95b8df 1075 else
6f259a23 1076 {
6cb06a8c
TT
1077 gdb_printf (_("[Detaching vfork parent %s "
1078 "after child exit]\n"), pidstr.c_str ());
6f259a23 1079 }
6c95b8df
PA
1080 }
1081
b73715df 1082 target_detach (vfork_parent, 0);
6c95b8df
PA
1083
1084 /* Put it back. */
1085 inf->pspace = pspace;
1086 inf->aspace = aspace;
6c95b8df
PA
1087 }
1088 else if (exec)
1089 {
1090 /* We're staying attached to the parent, so, really give the
1091 child a new address space. */
564b1e3f 1092 inf->pspace = new program_space (maybe_new_address_space ());
6c95b8df 1093 inf->aspace = inf->pspace->aspace;
30220b46 1094 inf->removable = true;
6c95b8df
PA
1095 set_current_program_space (inf->pspace);
1096
69eadcc9 1097 resume_parent = vfork_parent;
6c95b8df
PA
1098 }
1099 else
1100 {
6c95b8df
PA
1101 /* If this is a vfork child exiting, then the pspace and
1102 aspaces were shared with the parent. Since we're
1103 reporting the process exit, we'll be mourning all that is
1104 found in the address space, and switching to null_ptid,
1105 preparing to start a new inferior. But, since we don't
1106 want to clobber the parent's address/program spaces, we
1107 go ahead and create a new one for this exiting
1108 inferior. */
1109
18493a00 1110 scoped_restore_current_thread restore_thread;
6c95b8df 1111
14414227
TV
1112 /* Temporarily switch to the vfork parent, to facilitate ptrace
1113 calls done during maybe_new_address_space. */
1114 switch_to_thread (any_live_thread_of_inferior (vfork_parent));
1115 address_space_ref_ptr aspace = maybe_new_address_space ();
1116
1117 /* Switch back to the vfork child inferior. Switch to no-thread
1118 while running clone_program_space, so that clone_program_space
1119 doesn't want to read the selected frame of a dead process. */
1120 switch_to_inferior_no_thread (inf);
1121
1122 inf->pspace = new program_space (std::move (aspace));
53af73bf
PA
1123 inf->aspace = inf->pspace->aspace;
1124 set_current_program_space (inf->pspace);
30220b46 1125 inf->removable = true;
7dcd53a0 1126 inf->symfile_flags = SYMFILE_NO_READ;
53af73bf 1127 clone_program_space (inf->pspace, vfork_parent->pspace);
6c95b8df 1128
69eadcc9 1129 resume_parent = vfork_parent;
6c95b8df
PA
1130 }
1131
6c95b8df
PA
1132 gdb_assert (current_program_space == inf->pspace);
1133
69eadcc9 1134 if (non_stop && resume_parent != nullptr)
6c95b8df
PA
1135 {
1136 /* If the user wanted the parent to be running, let it go
1137 free now. */
5ed8105e 1138 scoped_restore_current_thread restore_thread;
6c95b8df 1139
1eb8556f 1140 infrun_debug_printf ("resuming vfork parent process %d",
69eadcc9 1141 resume_parent->pid);
6c95b8df 1142
69eadcc9
SM
1143 for (thread_info *thread : resume_parent->threads ())
1144 proceed_after_vfork_done (thread);
6c95b8df
PA
1145 }
1146 }
1147}
1148
d8bbae6e
SM
1149/* Handle TARGET_WAITKIND_VFORK_DONE. */
1150
1151static void
1152handle_vfork_done (thread_info *event_thread)
1153{
b26b06dd
AB
1154 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1155
d8bbae6e
SM
1156 /* We only care about this event if inferior::thread_waiting_for_vfork_done is
1157 set, that is if we are waiting for a vfork child not under our control
1158 (because we detached it) to exec or exit.
1159
1160 If an inferior has vforked and we are debugging the child, we don't use
1161 the vfork-done event to get notified about the end of the shared address
1162 space window. We rely instead on the child's exec or exit event, and the
1163 inferior::vfork_{parent,child} fields are used instead. See
1164 handle_vfork_child_exec_or_exit for that. */
1165 if (event_thread->inf->thread_waiting_for_vfork_done == nullptr)
1166 {
1167 infrun_debug_printf ("not waiting for a vfork-done event");
1168 return;
1169 }
1170
d8bbae6e
SM
1171 /* We stopped all threads (other than the vforking thread) of the inferior in
1172 follow_fork and kept them stopped until now. It should therefore not be
1173 possible for another thread to have reported a vfork during that window.
1174 If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
1175 vfork-done we are handling right now. */
1176 gdb_assert (event_thread->inf->thread_waiting_for_vfork_done == event_thread);
1177
1178 event_thread->inf->thread_waiting_for_vfork_done = nullptr;
1179 event_thread->inf->pspace->breakpoints_not_allowed = 0;
1180
1181 /* On non-stop targets, we stopped all the inferior's threads in follow_fork,
1182 resume them now. On all-stop targets, everything that needs to be resumed
1183 will be when we resume the event thread. */
1184 if (target_is_non_stop_p ())
1185 {
1186 /* restart_threads and start_step_over may change the current thread, make
1187 sure we leave the event thread as the current thread. */
1188 scoped_restore_current_thread restore_thread;
1189
1190 insert_breakpoints ();
d8bbae6e 1191 start_step_over ();
2b718529
LS
1192
1193 if (!step_over_info_valid_p ())
1194 restart_threads (event_thread, event_thread->inf);
d8bbae6e
SM
1195 }
1196}
1197
eb6c553b 1198/* Enum strings for "set|show follow-exec-mode". */
6c95b8df
PA
1199
1200static const char follow_exec_mode_new[] = "new";
1201static const char follow_exec_mode_same[] = "same";
40478521 1202static const char *const follow_exec_mode_names[] =
6c95b8df
PA
1203{
1204 follow_exec_mode_new,
1205 follow_exec_mode_same,
03acd4d8 1206 nullptr,
6c95b8df
PA
1207};
1208
1209static const char *follow_exec_mode_string = follow_exec_mode_same;
1210static void
1211show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1212 struct cmd_list_element *c, const char *value)
1213{
6cb06a8c 1214 gdb_printf (file, _("Follow exec mode is \"%s\".\n"), value);
6c95b8df
PA
1215}
1216
ecf45d2c 1217/* EXEC_FILE_TARGET is assumed to be non-NULL. */
1adeb98a 1218
c906108c 1219static void
4ca51187 1220follow_exec (ptid_t ptid, const char *exec_file_target)
c906108c 1221{
e99b03dc 1222 int pid = ptid.pid ();
94585166 1223 ptid_t process_ptid;
7a292a7a 1224
65d2b333
PW
1225 /* Switch terminal for any messages produced e.g. by
1226 breakpoint_re_set. */
1227 target_terminal::ours_for_output ();
1228
c906108c
SS
1229 /* This is an exec event that we actually wish to pay attention to.
1230 Refresh our symbol table to the newly exec'd program, remove any
1231 momentary bp's, etc.
1232
1233 If there are breakpoints, they aren't really inserted now,
1234 since the exec() transformed our inferior into a fresh set
1235 of instructions.
1236
1237 We want to preserve symbolic breakpoints on the list, since
1238 we have hopes that they can be reset after the new a.out's
1239 symbol table is read.
1240
1241 However, any "raw" breakpoints must be removed from the list
1242 (e.g., the solib bp's), since their address is probably invalid
1243 now.
1244
1245 And, we DON'T want to call delete_breakpoints() here, since
1246 that may write the bp's "shadow contents" (the instruction
85102364 1247 value that was overwritten with a TRAP instruction). Since
1777feb0 1248 we now have a new a.out, those shadow contents aren't valid. */
6c95b8df
PA
1249
1250 mark_breakpoints_out ();
1251
95e50b27
PA
1252 /* The target reports the exec event to the main thread, even if
1253 some other thread does the exec, and even if the main thread was
1254 stopped or already gone. We may still have non-leader threads of
1255 the process on our list. E.g., on targets that don't have thread
6a534f85
PA
1256 exit events (like remote) and nothing forces an update of the
1257 thread list up to here. When debugging remotely, it's best to
95e50b27
PA
1258 avoid extra traffic, when possible, so avoid syncing the thread
1259 list with the target, and instead go ahead and delete all threads
6a534f85 1260 of the process but the one that reported the event. Note this must
95e50b27
PA
1261 be done before calling update_breakpoints_after_exec, as
1262 otherwise clearing the threads' resources would reference stale
1263 thread breakpoints -- it may have been one of these threads that
1264 stepped across the exec. We could just clear their stepping
1265 states, but as long as we're iterating, might as well delete
1266 them. Deleting them now rather than at the next user-visible
1267 stop provides a nicer sequence of events for user and MI
1268 notifications. */
08036331 1269 for (thread_info *th : all_threads_safe ())
d7e15655 1270 if (th->ptid.pid () == pid && th->ptid != ptid)
00431a78 1271 delete_thread (th);
95e50b27
PA
1272
1273 /* We also need to clear any left over stale state for the
1274 leader/event thread. E.g., if there was any step-resume
1275 breakpoint or similar, it's gone now. We cannot truly
1276 step-to-next statement through an exec(). */
08036331 1277 thread_info *th = inferior_thread ();
03acd4d8
CL
1278 th->control.step_resume_breakpoint = nullptr;
1279 th->control.exception_resume_breakpoint = nullptr;
1280 th->control.single_step_breakpoints = nullptr;
16c381f0
JK
1281 th->control.step_range_start = 0;
1282 th->control.step_range_end = 0;
c906108c 1283
95e50b27
PA
1284 /* The user may have had the main thread held stopped in the
1285 previous image (e.g., schedlock on, or non-stop). Release
1286 it now. */
a75724bc
PA
1287 th->stop_requested = 0;
1288
95e50b27
PA
1289 update_breakpoints_after_exec ();
1290
1777feb0 1291 /* What is this a.out's name? */
f2907e49 1292 process_ptid = ptid_t (pid);
6cb06a8c
TT
1293 gdb_printf (_("%s is executing new program: %s\n"),
1294 target_pid_to_str (process_ptid).c_str (),
1295 exec_file_target);
c906108c
SS
1296
1297 /* We've followed the inferior through an exec. Therefore, the
1777feb0 1298 inferior has essentially been killed & reborn. */
7a292a7a 1299
6ca15a4b 1300 breakpoint_init_inferior (inf_execd);
e85a822c 1301
797bc1cb 1302 gdb::unique_xmalloc_ptr<char> exec_file_host
03acd4d8 1303 = exec_file_find (exec_file_target, nullptr);
ff862be4 1304
ecf45d2c
SL
1305 /* If we were unable to map the executable target pathname onto a host
1306 pathname, tell the user that. Otherwise GDB's subsequent behavior
1307 is confusing. Maybe it would even be better to stop at this point
1308 so that the user can specify a file manually before continuing. */
03acd4d8 1309 if (exec_file_host == nullptr)
ecf45d2c
SL
1310 warning (_("Could not load symbols for executable %s.\n"
1311 "Do you need \"set sysroot\"?"),
1312 exec_file_target);
c906108c 1313
cce9b6bf
PA
1314 /* Reset the shared library package. This ensures that we get a
1315 shlib event when the child reaches "_start", at which point the
1316 dld will have had a chance to initialize the child. */
1317 /* Also, loading a symbol file below may trigger symbol lookups, and
1318 we don't want those to be satisfied by the libraries of the
1319 previous incarnation of this process. */
03acd4d8 1320 no_shared_libraries (nullptr, 0);
cce9b6bf 1321
4a1283c8
SM
1322 inferior *execing_inferior = current_inferior ();
1323 inferior *following_inferior;
294c36eb 1324
6c95b8df
PA
1325 if (follow_exec_mode_string == follow_exec_mode_new)
1326 {
6c95b8df
PA
1327 /* The user wants to keep the old inferior and program spaces
1328 around. Create a new fresh one, and switch to it. */
1329
35ed81d4
SM
1330 /* Do exit processing for the original inferior before setting the new
1331 inferior's pid. Having two inferiors with the same pid would confuse
1332 find_inferior_p(t)id. Transfer the terminal state and info from the
1333 old to the new inferior. */
4a1283c8 1334 following_inferior = add_inferior_with_spaces ();
294c36eb 1335
4a1283c8 1336 swap_terminal_info (following_inferior, execing_inferior);
9324bfea 1337 exit_inferior (execing_inferior);
294c36eb 1338
4a1283c8 1339 following_inferior->pid = pid;
6c95b8df 1340 }
9107fc8d
PA
1341 else
1342 {
4a1283c8
SM
1343 /* follow-exec-mode is "same", we continue execution in the execing
1344 inferior. */
1345 following_inferior = execing_inferior;
1346
9107fc8d
PA
1347 /* The old description may no longer be fit for the new image.
1348 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1349 old description; we'll read a new one below. No need to do
1350 this on "follow-exec-mode new", as the old inferior stays
1351 around (its description is later cleared/refetched on
1352 restart). */
1353 target_clear_description ();
1354 }
6c95b8df 1355
4a1283c8
SM
1356 target_follow_exec (following_inferior, ptid, exec_file_target);
1357
1358 gdb_assert (current_inferior () == following_inferior);
1359 gdb_assert (current_program_space == following_inferior->pspace);
6c95b8df 1360
ecf45d2c
SL
1361 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1362 because the proper displacement for a PIE (Position Independent
1363 Executable) main symbol file will only be computed by
1364 solib_create_inferior_hook below. breakpoint_re_set would fail
1365 to insert the breakpoints with the zero displacement. */
4a1283c8
SM
1366 try_open_exec_file (exec_file_host.get (), following_inferior,
1367 SYMFILE_DEFER_BP_RESET);
c906108c 1368
9107fc8d
PA
1369 /* If the target can specify a description, read it. Must do this
1370 after flipping to the new executable (because the target supplied
1371 description must be compatible with the executable's
1372 architecture, and the old executable may e.g., be 32-bit, while
1373 the new one 64-bit), and before anything involving memory or
1374 registers. */
1375 target_find_description ();
1376
4a1283c8 1377 gdb::observers::inferior_execd.notify (execing_inferior, following_inferior);
4efc6507 1378
c1e56572
JK
1379 breakpoint_re_set ();
1380
c906108c
SS
1381 /* Reinsert all breakpoints. (Those which were symbolic have
1382 been reset to the proper address in the new a.out, thanks
1777feb0 1383 to symbol_file_command...). */
c906108c
SS
1384 insert_breakpoints ();
1385
1386 /* The next resume of this inferior should bring it to the shlib
1387 startup breakpoints. (If the user had also set bp's on
1388 "main" from the old (parent) process, then they'll auto-
1777feb0 1389 matically get reset there in the new process.). */
c906108c
SS
1390}
1391
28d5518b 1392/* The chain of threads that need to do a step-over operation to get
c2829269
PA
1393 past e.g., a breakpoint. What technique is used to step over the
1394 breakpoint/watchpoint does not matter -- all threads end up in the
1395 same queue, to maintain rough temporal order of execution, in order
1396 to avoid starvation, otherwise, we could e.g., find ourselves
1397 constantly stepping the same couple threads past their breakpoints
1398 over and over, if the single-step finish fast enough. */
8b6a69b2 1399thread_step_over_list global_thread_step_over_list;
c2829269 1400
6c4cfb24
PA
1401/* Bit flags indicating what the thread needs to step over. */
1402
8d297bbf 1403enum step_over_what_flag
6c4cfb24
PA
1404 {
1405 /* Step over a breakpoint. */
1406 STEP_OVER_BREAKPOINT = 1,
1407
1408 /* Step past a non-continuable watchpoint, in order to let the
1409 instruction execute so we can evaluate the watchpoint
1410 expression. */
1411 STEP_OVER_WATCHPOINT = 2
1412 };
8d297bbf 1413DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
6c4cfb24 1414
963f9c80 1415/* Info about an instruction that is being stepped over. */
31e77af2
PA
1416
1417struct step_over_info
1418{
963f9c80
PA
1419 /* If we're stepping past a breakpoint, this is the address space
1420 and address of the instruction the breakpoint is set at. We'll
1421 skip inserting all breakpoints here. Valid iff ASPACE is
1422 non-NULL. */
ac7d717c
PA
1423 const address_space *aspace = nullptr;
1424 CORE_ADDR address = 0;
963f9c80
PA
1425
1426 /* The instruction being stepped over triggers a nonsteppable
1427 watchpoint. If true, we'll skip inserting watchpoints. */
ac7d717c 1428 int nonsteppable_watchpoint_p = 0;
21edc42f
YQ
1429
1430 /* The thread's global number. */
ac7d717c 1431 int thread = -1;
31e77af2
PA
1432};
1433
1434/* The step-over info of the location that is being stepped over.
1435
1436 Note that with async/breakpoint always-inserted mode, a user might
1437 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1438 being stepped over. As setting a new breakpoint inserts all
1439 breakpoints, we need to make sure the breakpoint being stepped over
1440 isn't inserted then. We do that by only clearing the step-over
1441 info when the step-over is actually finished (or aborted).
1442
1443 Presently GDB can only step over one breakpoint at any given time.
1444 Given threads that can't run code in the same address space as the
1445 breakpoint's can't really miss the breakpoint, GDB could be taught
1446 to step-over at most one breakpoint per address space (so this info
1447 could move to the address space object if/when GDB is extended).
1448 The set of breakpoints being stepped over will normally be much
1449 smaller than the set of all breakpoints, so a flag in the
1450 breakpoint location structure would be wasteful. A separate list
1451 also saves complexity and run-time, as otherwise we'd have to go
1452 through all breakpoint locations clearing their flag whenever we
1453 start a new sequence. Similar considerations weigh against storing
1454 this info in the thread object. Plus, not all step overs actually
1455 have breakpoint locations -- e.g., stepping past a single-step
1456 breakpoint, or stepping to complete a non-continuable
1457 watchpoint. */
1458static struct step_over_info step_over_info;
1459
1460/* Record the address of the breakpoint/instruction we're currently
ce0db137
DE
1461 stepping over.
1462 N.B. We record the aspace and address now, instead of say just the thread,
1463 because when we need the info later the thread may be running. */
31e77af2
PA
1464
1465static void
8b86c959 1466set_step_over_info (const address_space *aspace, CORE_ADDR address,
21edc42f
YQ
1467 int nonsteppable_watchpoint_p,
1468 int thread)
31e77af2
PA
1469{
1470 step_over_info.aspace = aspace;
1471 step_over_info.address = address;
963f9c80 1472 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
21edc42f 1473 step_over_info.thread = thread;
31e77af2
PA
1474}
1475
1476/* Called when we're not longer stepping over a breakpoint / an
1477 instruction, so all breakpoints are free to be (re)inserted. */
1478
1479static void
1480clear_step_over_info (void)
1481{
1eb8556f 1482 infrun_debug_printf ("clearing step over info");
03acd4d8 1483 step_over_info.aspace = nullptr;
31e77af2 1484 step_over_info.address = 0;
963f9c80 1485 step_over_info.nonsteppable_watchpoint_p = 0;
21edc42f 1486 step_over_info.thread = -1;
31e77af2
PA
1487}
1488
7f89fd65 1489/* See infrun.h. */
31e77af2
PA
1490
1491int
1492stepping_past_instruction_at (struct address_space *aspace,
1493 CORE_ADDR address)
1494{
03acd4d8 1495 return (step_over_info.aspace != nullptr
31e77af2
PA
1496 && breakpoint_address_match (aspace, address,
1497 step_over_info.aspace,
1498 step_over_info.address));
1499}
1500
963f9c80
PA
1501/* See infrun.h. */
1502
21edc42f
YQ
1503int
1504thread_is_stepping_over_breakpoint (int thread)
1505{
1506 return (step_over_info.thread != -1
1507 && thread == step_over_info.thread);
1508}
1509
1510/* See infrun.h. */
1511
963f9c80
PA
1512int
1513stepping_past_nonsteppable_watchpoint (void)
1514{
1515 return step_over_info.nonsteppable_watchpoint_p;
1516}
1517
6cc83d2a
PA
1518/* Returns true if step-over info is valid. */
1519
c4464ade 1520static bool
6cc83d2a
PA
1521step_over_info_valid_p (void)
1522{
03acd4d8 1523 return (step_over_info.aspace != nullptr
963f9c80 1524 || stepping_past_nonsteppable_watchpoint ());
6cc83d2a
PA
1525}
1526
c906108c 1527\f
237fc4c9
PA
1528/* Displaced stepping. */
1529
1530/* In non-stop debugging mode, we must take special care to manage
1531 breakpoints properly; in particular, the traditional strategy for
1532 stepping a thread past a breakpoint it has hit is unsuitable.
1533 'Displaced stepping' is a tactic for stepping one thread past a
1534 breakpoint it has hit while ensuring that other threads running
1535 concurrently will hit the breakpoint as they should.
1536
1537 The traditional way to step a thread T off a breakpoint in a
1538 multi-threaded program in all-stop mode is as follows:
1539
1540 a0) Initially, all threads are stopped, and breakpoints are not
1541 inserted.
1542 a1) We single-step T, leaving breakpoints uninserted.
1543 a2) We insert breakpoints, and resume all threads.
1544
1545 In non-stop debugging, however, this strategy is unsuitable: we
1546 don't want to have to stop all threads in the system in order to
1547 continue or step T past a breakpoint. Instead, we use displaced
1548 stepping:
1549
1550 n0) Initially, T is stopped, other threads are running, and
1551 breakpoints are inserted.
1552 n1) We copy the instruction "under" the breakpoint to a separate
1553 location, outside the main code stream, making any adjustments
1554 to the instruction, register, and memory state as directed by
1555 T's architecture.
1556 n2) We single-step T over the instruction at its new location.
1557 n3) We adjust the resulting register and memory state as directed
1558 by T's architecture. This includes resetting T's PC to point
1559 back into the main instruction stream.
1560 n4) We resume T.
1561
1562 This approach depends on the following gdbarch methods:
1563
1564 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1565 indicate where to copy the instruction, and how much space must
1566 be reserved there. We use these in step n1.
1567
1568 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1569 address, and makes any necessary adjustments to the instruction,
1570 register contents, and memory. We use this in step n1.
1571
1572 - gdbarch_displaced_step_fixup adjusts registers and memory after
85102364 1573 we have successfully single-stepped the instruction, to yield the
237fc4c9
PA
1574 same effect the instruction would have had if we had executed it
1575 at its original address. We use this in step n3.
1576
237fc4c9
PA
1577 The gdbarch_displaced_step_copy_insn and
1578 gdbarch_displaced_step_fixup functions must be written so that
1579 copying an instruction with gdbarch_displaced_step_copy_insn,
1580 single-stepping across the copied instruction, and then applying
1581 gdbarch_displaced_insn_fixup should have the same effects on the
1582 thread's memory and registers as stepping the instruction in place
1583 would have. Exactly which responsibilities fall to the copy and
1584 which fall to the fixup is up to the author of those functions.
1585
1586 See the comments in gdbarch.sh for details.
1587
1588 Note that displaced stepping and software single-step cannot
1589 currently be used in combination, although with some care I think
1590 they could be made to. Software single-step works by placing
1591 breakpoints on all possible subsequent instructions; if the
1592 displaced instruction is a PC-relative jump, those breakpoints
1593 could fall in very strange places --- on pages that aren't
1594 executable, or at addresses that are not proper instruction
1595 boundaries. (We do generally let other threads run while we wait
1596 to hit the software single-step breakpoint, and they might
1597 encounter such a corrupted instruction.) One way to work around
1598 this would be to have gdbarch_displaced_step_copy_insn fully
1599 simulate the effect of PC-relative instructions (and return NULL)
1600 on architectures that use software single-stepping.
1601
1602 In non-stop mode, we can have independent and simultaneous step
1603 requests, so more than one thread may need to simultaneously step
1604 over a breakpoint. The current implementation assumes there is
1605 only one scratch space per process. In this case, we have to
1606 serialize access to the scratch space. If thread A wants to step
1607 over a breakpoint, but we are currently waiting for some other
1608 thread to complete a displaced step, we leave thread A stopped and
1609 place it in the displaced_step_request_queue. Whenever a displaced
1610 step finishes, we pick the next thread in the queue and start a new
1611 displaced step operation on it. See displaced_step_prepare and
7def77a1 1612 displaced_step_finish for details. */
237fc4c9 1613
a46d1843 1614/* Return true if THREAD is doing a displaced step. */
c0987663 1615
c4464ade 1616static bool
00431a78 1617displaced_step_in_progress_thread (thread_info *thread)
c0987663 1618{
03acd4d8 1619 gdb_assert (thread != nullptr);
c0987663 1620
187b041e 1621 return thread->displaced_step_state.in_progress ();
c0987663
YQ
1622}
1623
a46d1843 1624/* Return true if INF has a thread doing a displaced step. */
8f572e5c 1625
c4464ade 1626static bool
00431a78 1627displaced_step_in_progress (inferior *inf)
8f572e5c 1628{
187b041e 1629 return inf->displaced_step_state.in_progress_count > 0;
fc1cf338
PA
1630}
1631
187b041e 1632/* Return true if any thread is doing a displaced step. */
a42244db 1633
187b041e
SM
1634static bool
1635displaced_step_in_progress_any_thread ()
a42244db 1636{
187b041e
SM
1637 for (inferior *inf : all_non_exited_inferiors ())
1638 {
1639 if (displaced_step_in_progress (inf))
1640 return true;
1641 }
a42244db 1642
187b041e 1643 return false;
a42244db
YQ
1644}
1645
fc1cf338
PA
1646static void
1647infrun_inferior_exit (struct inferior *inf)
1648{
d20172fc 1649 inf->displaced_step_state.reset ();
6f5d514f 1650 inf->thread_waiting_for_vfork_done = nullptr;
fc1cf338 1651}
237fc4c9 1652
3b7a962d 1653static void
4a1283c8 1654infrun_inferior_execd (inferior *exec_inf, inferior *follow_inf)
3b7a962d 1655{
187b041e
SM
1656 /* If some threads where was doing a displaced step in this inferior at the
1657 moment of the exec, they no longer exist. Even if the exec'ing thread
3b7a962d
SM
1658 doing a displaced step, we don't want to to any fixup nor restore displaced
1659 stepping buffer bytes. */
4a1283c8 1660 follow_inf->displaced_step_state.reset ();
3b7a962d 1661
4a1283c8 1662 for (thread_info *thread : follow_inf->threads ())
187b041e
SM
1663 thread->displaced_step_state.reset ();
1664
3b7a962d
SM
1665 /* Since an in-line step is done with everything else stopped, if there was
1666 one in progress at the time of the exec, it must have been the exec'ing
1667 thread. */
1668 clear_step_over_info ();
6f5d514f 1669
4a1283c8 1670 follow_inf->thread_waiting_for_vfork_done = nullptr;
3b7a962d
SM
1671}
1672
fff08868
HZ
1673/* If ON, and the architecture supports it, GDB will use displaced
1674 stepping to step over breakpoints. If OFF, or if the architecture
1675 doesn't support it, GDB will instead use the traditional
1676 hold-and-step approach. If AUTO (which is the default), GDB will
1677 decide which technique to use to step over breakpoints depending on
9822cb57 1678 whether the target works in a non-stop way (see use_displaced_stepping). */
fff08868 1679
72d0e2c5 1680static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
fff08868 1681
237fc4c9
PA
1682static void
1683show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1684 struct cmd_list_element *c,
1685 const char *value)
1686{
72d0e2c5 1687 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
6cb06a8c
TT
1688 gdb_printf (file,
1689 _("Debugger's willingness to use displaced stepping "
1690 "to step over breakpoints is %s (currently %s).\n"),
1691 value, target_is_non_stop_p () ? "on" : "off");
fff08868 1692 else
6cb06a8c
TT
1693 gdb_printf (file,
1694 _("Debugger's willingness to use displaced stepping "
1695 "to step over breakpoints is %s.\n"), value);
237fc4c9
PA
1696}
1697
9822cb57
SM
1698/* Return true if the gdbarch implements the required methods to use
1699 displaced stepping. */
1700
1701static bool
1702gdbarch_supports_displaced_stepping (gdbarch *arch)
1703{
187b041e
SM
1704 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1705 that if `prepare` is provided, so is `finish`. */
1706 return gdbarch_displaced_step_prepare_p (arch);
9822cb57
SM
1707}
1708
fff08868 1709/* Return non-zero if displaced stepping can/should be used to step
3fc8eb30 1710 over breakpoints of thread TP. */
fff08868 1711
9822cb57
SM
1712static bool
1713use_displaced_stepping (thread_info *tp)
237fc4c9 1714{
9822cb57
SM
1715 /* If the user disabled it explicitly, don't use displaced stepping. */
1716 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1717 return false;
1718
1719 /* If "auto", only use displaced stepping if the target operates in a non-stop
1720 way. */
1721 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1722 && !target_is_non_stop_p ())
1723 return false;
1724
1725 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1726
1727 /* If the architecture doesn't implement displaced stepping, don't use
1728 it. */
1729 if (!gdbarch_supports_displaced_stepping (gdbarch))
1730 return false;
1731
1732 /* If recording, don't use displaced stepping. */
1733 if (find_record_target () != nullptr)
1734 return false;
1735
9822cb57
SM
1736 /* If displaced stepping failed before for this inferior, don't bother trying
1737 again. */
f5f01699 1738 if (tp->inf->displaced_step_state.failed_before)
9822cb57
SM
1739 return false;
1740
1741 return true;
237fc4c9
PA
1742}
1743
187b041e 1744/* Simple function wrapper around displaced_step_thread_state::reset. */
d8d83535 1745
237fc4c9 1746static void
187b041e 1747displaced_step_reset (displaced_step_thread_state *displaced)
237fc4c9 1748{
d8d83535 1749 displaced->reset ();
237fc4c9
PA
1750}
1751
d8d83535
SM
1752/* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1753 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1754
1755using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
237fc4c9 1756
237fc4c9
PA
1757/* Prepare to single-step, using displaced stepping.
1758
1759 Note that we cannot use displaced stepping when we have a signal to
1760 deliver. If we have a signal to deliver and an instruction to step
1761 over, then after the step, there will be no indication from the
1762 target whether the thread entered a signal handler or ignored the
1763 signal and stepped over the instruction successfully --- both cases
1764 result in a simple SIGTRAP. In the first case we mustn't do a
1765 fixup, and in the second case we must --- but we can't tell which.
1766 Comments in the code for 'random signals' in handle_inferior_event
1767 explain how we handle this case instead.
1768
bab37966
SM
1769 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1770 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1771 if displaced stepping this thread got queued; or
1772 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1773 stepped. */
7f03bd92 1774
bab37966 1775static displaced_step_prepare_status
00431a78 1776displaced_step_prepare_throw (thread_info *tp)
237fc4c9 1777{
00431a78 1778 regcache *regcache = get_thread_regcache (tp);
ac7936df 1779 struct gdbarch *gdbarch = regcache->arch ();
187b041e
SM
1780 displaced_step_thread_state &disp_step_thread_state
1781 = tp->displaced_step_state;
237fc4c9
PA
1782
1783 /* We should never reach this function if the architecture does not
1784 support displaced stepping. */
9822cb57 1785 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
237fc4c9 1786
c2829269
PA
1787 /* Nor if the thread isn't meant to step over a breakpoint. */
1788 gdb_assert (tp->control.trap_expected);
1789
c1e36e3e
PA
1790 /* Disable range stepping while executing in the scratch pad. We
1791 want a single-step even if executing the displaced instruction in
1792 the scratch buffer lands within the stepping range (e.g., a
1793 jump/branch). */
1794 tp->control.may_range_step = 0;
1795
187b041e
SM
1796 /* We are about to start a displaced step for this thread. If one is already
1797 in progress, something's wrong. */
1798 gdb_assert (!disp_step_thread_state.in_progress ());
237fc4c9 1799
187b041e 1800 if (tp->inf->displaced_step_state.unavailable)
237fc4c9 1801 {
187b041e
SM
1802 /* The gdbarch tells us it's not worth asking to try a prepare because
1803 it is likely that it will return unavailable, so don't bother asking. */
237fc4c9 1804
136821d9 1805 displaced_debug_printf ("deferring step of %s",
0fab7955 1806 tp->ptid.to_string ().c_str ());
237fc4c9 1807
28d5518b 1808 global_thread_step_over_chain_enqueue (tp);
bab37966 1809 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
237fc4c9 1810 }
237fc4c9 1811
187b041e 1812 displaced_debug_printf ("displaced-stepping %s now",
0fab7955 1813 tp->ptid.to_string ().c_str ());
237fc4c9 1814
00431a78
PA
1815 scoped_restore_current_thread restore_thread;
1816
1817 switch_to_thread (tp);
ad53cd71 1818
187b041e
SM
1819 CORE_ADDR original_pc = regcache_read_pc (regcache);
1820 CORE_ADDR displaced_pc;
237fc4c9 1821
6d84a385
AB
1822 /* Display the instruction we are going to displaced step. */
1823 if (debug_displaced)
1824 {
1825 string_file tmp_stream;
1826 int dislen = gdb_print_insn (gdbarch, original_pc, &tmp_stream,
1827 nullptr);
1828
1829 if (dislen > 0)
1830 {
1831 gdb::byte_vector insn_buf (dislen);
1832 read_memory (original_pc, insn_buf.data (), insn_buf.size ());
1833
a6e5abae 1834 std::string insn_bytes = bytes_to_string (insn_buf);
6d84a385
AB
1835
1836 displaced_debug_printf ("original insn %s: %s \t %s",
1837 paddress (gdbarch, original_pc),
1838 insn_bytes.c_str (),
1839 tmp_stream.string ().c_str ());
1840 }
1841 else
1842 displaced_debug_printf ("original insn %s: invalid length: %d",
1843 paddress (gdbarch, original_pc), dislen);
1844 }
1845
187b041e
SM
1846 displaced_step_prepare_status status
1847 = gdbarch_displaced_step_prepare (gdbarch, tp, displaced_pc);
237fc4c9 1848
187b041e 1849 if (status == DISPLACED_STEP_PREPARE_STATUS_CANT)
d35ae833 1850 {
187b041e 1851 displaced_debug_printf ("failed to prepare (%s)",
0fab7955 1852 tp->ptid.to_string ().c_str ());
d35ae833 1853
bab37966 1854 return DISPLACED_STEP_PREPARE_STATUS_CANT;
d35ae833 1855 }
187b041e 1856 else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
7f03bd92 1857 {
187b041e
SM
1858 /* Not enough displaced stepping resources available, defer this
1859 request by placing it the queue. */
1860
1861 displaced_debug_printf ("not enough resources available, "
1862 "deferring step of %s",
0fab7955 1863 tp->ptid.to_string ().c_str ());
187b041e
SM
1864
1865 global_thread_step_over_chain_enqueue (tp);
1866
1867 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
7f03bd92 1868 }
237fc4c9 1869
187b041e
SM
1870 gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK);
1871
9f5a595d
UW
1872 /* Save the information we need to fix things up if the step
1873 succeeds. */
187b041e 1874 disp_step_thread_state.set (gdbarch);
9f5a595d 1875
187b041e 1876 tp->inf->displaced_step_state.in_progress_count++;
ad53cd71 1877
187b041e
SM
1878 displaced_debug_printf ("prepared successfully thread=%s, "
1879 "original_pc=%s, displaced_pc=%s",
0fab7955 1880 tp->ptid.to_string ().c_str (),
187b041e
SM
1881 paddress (gdbarch, original_pc),
1882 paddress (gdbarch, displaced_pc));
237fc4c9 1883
6d84a385
AB
1884 /* Display the new displaced instruction(s). */
1885 if (debug_displaced)
1886 {
1887 string_file tmp_stream;
1888 CORE_ADDR addr = displaced_pc;
1889
1890 /* If displaced stepping is going to use h/w single step then we know
1891 that the replacement instruction can only be a single instruction,
1892 in that case set the end address at the next byte.
1893
1894 Otherwise the displaced stepping copy instruction routine could
1895 have generated multiple instructions, and all we know is that they
1896 must fit within the LEN bytes of the buffer. */
1897 CORE_ADDR end
1898 = addr + (gdbarch_displaced_step_hw_singlestep (gdbarch)
1899 ? 1 : gdbarch_displaced_step_buffer_length (gdbarch));
1900
1901 while (addr < end)
1902 {
1903 int dislen = gdb_print_insn (gdbarch, addr, &tmp_stream, nullptr);
1904 if (dislen <= 0)
1905 {
1906 displaced_debug_printf
1907 ("replacement insn %s: invalid length: %d",
1908 paddress (gdbarch, addr), dislen);
1909 break;
1910 }
1911
1912 gdb::byte_vector insn_buf (dislen);
1913 read_memory (addr, insn_buf.data (), insn_buf.size ());
1914
a6e5abae 1915 std::string insn_bytes = bytes_to_string (insn_buf);
6d84a385
AB
1916 std::string insn_str = tmp_stream.release ();
1917 displaced_debug_printf ("replacement insn %s: %s \t %s",
1918 paddress (gdbarch, addr),
1919 insn_bytes.c_str (),
1920 insn_str.c_str ());
1921 addr += dislen;
1922 }
1923 }
1924
bab37966 1925 return DISPLACED_STEP_PREPARE_STATUS_OK;
237fc4c9
PA
1926}
1927
3fc8eb30
PA
1928/* Wrapper for displaced_step_prepare_throw that disabled further
1929 attempts at displaced stepping if we get a memory error. */
1930
bab37966 1931static displaced_step_prepare_status
00431a78 1932displaced_step_prepare (thread_info *thread)
3fc8eb30 1933{
bab37966
SM
1934 displaced_step_prepare_status status
1935 = DISPLACED_STEP_PREPARE_STATUS_CANT;
3fc8eb30 1936
a70b8144 1937 try
3fc8eb30 1938 {
bab37966 1939 status = displaced_step_prepare_throw (thread);
3fc8eb30 1940 }
230d2906 1941 catch (const gdb_exception_error &ex)
3fc8eb30 1942 {
16b41842
PA
1943 if (ex.error != MEMORY_ERROR
1944 && ex.error != NOT_SUPPORTED_ERROR)
eedc3f4f 1945 throw;
3fc8eb30 1946
1eb8556f
SM
1947 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1948 ex.what ());
3fc8eb30
PA
1949
1950 /* Be verbose if "set displaced-stepping" is "on", silent if
1951 "auto". */
1952 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1953 {
fd7dcb94 1954 warning (_("disabling displaced stepping: %s"),
3d6e9d23 1955 ex.what ());
3fc8eb30
PA
1956 }
1957
1958 /* Disable further displaced stepping attempts. */
f5f01699 1959 thread->inf->displaced_step_state.failed_before = 1;
3fc8eb30 1960 }
3fc8eb30 1961
bab37966 1962 return status;
3fc8eb30
PA
1963}
1964
9488c327
PA
1965/* True if any thread of TARGET that matches RESUME_PTID requires
1966 target_thread_events enabled. This assumes TARGET does not support
1967 target thread options. */
1968
1969static bool
1970any_thread_needs_target_thread_events (process_stratum_target *target,
1971 ptid_t resume_ptid)
1972{
1973 for (thread_info *tp : all_non_exited_threads (target, resume_ptid))
1974 if (displaced_step_in_progress_thread (tp)
1975 || schedlock_applies (tp)
1976 || tp->thread_fsm () != nullptr)
1977 return true;
1978 return false;
1979}
1980
65c459ab
PA
1981/* Maybe disable thread-{cloned,created,exited} event reporting after
1982 a step-over (either in-line or displaced) finishes. */
1983
1984static void
21d48304
PA
1985update_thread_events_after_step_over (thread_info *event_thread,
1986 const target_waitstatus &event_status)
65c459ab 1987{
7ac958f2
PA
1988 if (schedlock_applies (event_thread))
1989 {
1990 /* If scheduler-locking applies, continue reporting
1991 thread-created/thread-cloned events. */
1992 return;
1993 }
1994 else if (target_supports_set_thread_options (0))
65c459ab
PA
1995 {
1996 /* We can control per-thread options. Disable events for the
21d48304
PA
1997 event thread, unless the thread is gone. */
1998 if (event_status.kind () != TARGET_WAITKIND_THREAD_EXITED)
1999 event_thread->set_thread_options (0);
65c459ab
PA
2000 }
2001 else
2002 {
2003 /* We can only control the target-wide target_thread_events
9488c327
PA
2004 setting. Disable it, but only if other threads in the target
2005 don't need it enabled. */
2006 process_stratum_target *target = event_thread->inf->process_target ();
2007 if (!any_thread_needs_target_thread_events (target, minus_one_ptid))
65c459ab
PA
2008 target_thread_events (false);
2009 }
2010}
2011
bab37966
SM
2012/* If we displaced stepped an instruction successfully, adjust registers and
2013 memory to yield the same effect the instruction would have had if we had
2014 executed it at its original address, and return
2015 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
2016 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
372316f1 2017
bab37966
SM
2018 If the thread wasn't displaced stepping, return
2019 DISPLACED_STEP_FINISH_STATUS_OK as well. */
2020
2021static displaced_step_finish_status
58c01087
PA
2022displaced_step_finish (thread_info *event_thread,
2023 const target_waitstatus &event_status)
237fc4c9 2024{
0d36baa9 2025 /* Check whether the parent is displaced stepping. */
0d36baa9
PA
2026 inferior *parent_inf = event_thread->inf;
2027
2028 /* If this was a fork/vfork/clone, this event indicates that the
2029 displaced stepping of the syscall instruction has been done, so
2030 we perform cleanup for parent here. Also note that this
2031 operation also cleans up the child for vfork, because their pages
2032 are shared. */
2033
2034 /* If this is a fork (child gets its own address space copy) and
2035 some displaced step buffers were in use at the time of the fork,
2036 restore the displaced step buffer bytes in the child process.
2037
2038 Architectures which support displaced stepping and fork events
2039 must supply an implementation of
2040 gdbarch_displaced_step_restore_all_in_ptid. This is not enforced
2041 during gdbarch validation to support architectures which support
2042 displaced stepping but not forks. */
249d0812
PA
2043 if (event_status.kind () == TARGET_WAITKIND_FORKED)
2044 {
2045 struct regcache *parent_regcache = get_thread_regcache (event_thread);
2046 struct gdbarch *gdbarch = parent_regcache->arch ();
2047
2048 if (gdbarch_supports_displaced_stepping (gdbarch))
2049 gdbarch_displaced_step_restore_all_in_ptid
2050 (gdbarch, parent_inf, event_status.child_ptid ());
2051 }
0d36baa9 2052
187b041e 2053 displaced_step_thread_state *displaced = &event_thread->displaced_step_state;
fc1cf338 2054
187b041e
SM
2055 /* Was this thread performing a displaced step? */
2056 if (!displaced->in_progress ())
bab37966 2057 return DISPLACED_STEP_FINISH_STATUS_OK;
237fc4c9 2058
21d48304 2059 update_thread_events_after_step_over (event_thread, event_status);
65c459ab 2060
187b041e
SM
2061 gdb_assert (event_thread->inf->displaced_step_state.in_progress_count > 0);
2062 event_thread->inf->displaced_step_state.in_progress_count--;
2063
cb71640d
PA
2064 /* Fixup may need to read memory/registers. Switch to the thread
2065 that we're fixing up. Also, target_stopped_by_watchpoint checks
d43b7a2d 2066 the current thread, and displaced_step_restore performs ptid-dependent
328d42d8 2067 memory accesses using current_inferior(). */
00431a78 2068 switch_to_thread (event_thread);
cb71640d 2069
d43b7a2d
TBA
2070 displaced_step_reset_cleanup cleanup (displaced);
2071
187b041e
SM
2072 /* Do the fixup, and release the resources acquired to do the displaced
2073 step. */
0d36baa9
PA
2074 displaced_step_finish_status status
2075 = gdbarch_displaced_step_finish (displaced->get_original_gdbarch (),
2076 event_thread, event_status);
2077
2078 if (event_status.kind () == TARGET_WAITKIND_FORKED
2079 || event_status.kind () == TARGET_WAITKIND_VFORKED
2080 || event_status.kind () == TARGET_WAITKIND_THREAD_CLONED)
2081 {
2082 /* Since the vfork/fork/clone syscall instruction was executed
2083 in the scratchpad, the child's PC is also within the
2084 scratchpad. Set the child's PC to the parent's PC value,
2085 which has already been fixed up. Note: we use the parent's
2086 aspace here, although we're touching the child, because the
2087 child hasn't been added to the inferior list yet at this
2088 point. */
2089
249d0812
PA
2090 struct regcache *parent_regcache = get_thread_regcache (event_thread);
2091 struct gdbarch *gdbarch = parent_regcache->arch ();
0d36baa9 2092 struct regcache *child_regcache
74387712
SM
2093 = get_thread_arch_regcache (parent_inf, event_status.child_ptid (),
2094 gdbarch);
0d36baa9 2095 /* Read PC value of parent. */
249d0812 2096 CORE_ADDR parent_pc = regcache_read_pc (parent_regcache);
0d36baa9
PA
2097
2098 displaced_debug_printf ("write child pc from %s to %s",
2099 paddress (gdbarch,
2100 regcache_read_pc (child_regcache)),
2101 paddress (gdbarch, parent_pc));
2102
2103 regcache_write_pc (child_regcache, parent_pc);
2104 }
2105
2106 return status;
c2829269 2107}
1c5cfe86 2108
4d9d9d04
PA
2109/* Data to be passed around while handling an event. This data is
2110 discarded between events. */
2111struct execution_control_state
2112{
aa563d16
TT
2113 explicit execution_control_state (thread_info *thr = nullptr)
2114 : ptid (thr == nullptr ? null_ptid : thr->ptid),
2115 event_thread (thr)
183be222 2116 {
183be222
SM
2117 }
2118
aa563d16 2119 process_stratum_target *target = nullptr;
4d9d9d04
PA
2120 ptid_t ptid;
2121 /* The thread that got the event, if this was a thread event; NULL
2122 otherwise. */
2123 struct thread_info *event_thread;
2124
2125 struct target_waitstatus ws;
aa563d16 2126 int stop_func_filled_in = 0;
2a8339b7 2127 CORE_ADDR stop_func_alt_start = 0;
aa563d16
TT
2128 CORE_ADDR stop_func_start = 0;
2129 CORE_ADDR stop_func_end = 0;
2130 const char *stop_func_name = nullptr;
2131 int wait_some_more = 0;
4d9d9d04
PA
2132
2133 /* True if the event thread hit the single-step breakpoint of
2134 another thread. Thus the event doesn't cause a stop, the thread
2135 needs to be single-stepped past the single-step breakpoint before
2136 we can switch back to the original stepping thread. */
aa563d16 2137 int hit_singlestep_breakpoint = 0;
4d9d9d04
PA
2138};
2139
4d9d9d04
PA
2140static void keep_going_pass_signal (struct execution_control_state *ecs);
2141static void prepare_to_wait (struct execution_control_state *ecs);
c4464ade 2142static bool keep_going_stepped_thread (struct thread_info *tp);
8d297bbf 2143static step_over_what thread_still_needs_step_over (struct thread_info *tp);
4d9d9d04
PA
2144
2145/* Are there any pending step-over requests? If so, run all we can
2146 now and return true. Otherwise, return false. */
2147
c4464ade 2148static bool
c2829269
PA
2149start_step_over (void)
2150{
3ec3145c
SM
2151 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
2152
372316f1
PA
2153 /* Don't start a new step-over if we already have an in-line
2154 step-over operation ongoing. */
2155 if (step_over_info_valid_p ())
c4464ade 2156 return false;
372316f1 2157
187b041e
SM
2158 /* Steal the global thread step over chain. As we try to initiate displaced
2159 steps, threads will be enqueued in the global chain if no buffers are
2160 available. If we iterated on the global chain directly, we might iterate
2161 indefinitely. */
8b6a69b2
SM
2162 thread_step_over_list threads_to_step
2163 = std::move (global_thread_step_over_list);
187b041e
SM
2164
2165 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
2166 thread_step_over_chain_length (threads_to_step));
2167
2168 bool started = false;
2169
2170 /* On scope exit (whatever the reason, return or exception), if there are
2171 threads left in the THREADS_TO_STEP chain, put back these threads in the
2172 global list. */
2173 SCOPE_EXIT
2174 {
8b6a69b2 2175 if (threads_to_step.empty ())
187b041e
SM
2176 infrun_debug_printf ("step-over queue now empty");
2177 else
2178 {
2179 infrun_debug_printf ("putting back %d threads to step in global queue",
2180 thread_step_over_chain_length (threads_to_step));
2181
8b6a69b2
SM
2182 global_thread_step_over_chain_enqueue_chain
2183 (std::move (threads_to_step));
187b041e
SM
2184 }
2185 };
2186
8b6a69b2
SM
2187 thread_step_over_list_safe_range range
2188 = make_thread_step_over_list_safe_range (threads_to_step);
2189
2190 for (thread_info *tp : range)
237fc4c9 2191 {
8d297bbf 2192 step_over_what step_what;
372316f1 2193 int must_be_in_line;
c2829269 2194
c65d6b55
PA
2195 gdb_assert (!tp->stop_requested);
2196
187b041e
SM
2197 if (tp->inf->displaced_step_state.unavailable)
2198 {
2199 /* The arch told us to not even try preparing another displaced step
2200 for this inferior. Just leave the thread in THREADS_TO_STEP, it
2201 will get moved to the global chain on scope exit. */
2202 continue;
2203 }
2204
d8bbae6e
SM
2205 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
2206 {
2207 /* When we stop all threads, handling a vfork, any thread in the step
2208 over chain remains there. A user could also try to continue a
2209 thread stopped at a breakpoint while another thread is waiting for
2210 a vfork-done event. In any case, we don't want to start a step
2211 over right now. */
2212 continue;
2213 }
2214
187b041e
SM
2215 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
2216 while we try to prepare the displaced step, we don't add it back to
2217 the global step over chain. This is to avoid a thread staying in the
2218 step over chain indefinitely if something goes wrong when resuming it
2219 If the error is intermittent and it still needs a step over, it will
2220 get enqueued again when we try to resume it normally. */
8b6a69b2 2221 threads_to_step.erase (threads_to_step.iterator_to (*tp));
c2829269 2222
372316f1
PA
2223 step_what = thread_still_needs_step_over (tp);
2224 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
2225 || ((step_what & STEP_OVER_BREAKPOINT)
3fc8eb30 2226 && !use_displaced_stepping (tp)));
372316f1
PA
2227
2228 /* We currently stop all threads of all processes to step-over
2229 in-line. If we need to start a new in-line step-over, let
2230 any pending displaced steps finish first. */
187b041e
SM
2231 if (must_be_in_line && displaced_step_in_progress_any_thread ())
2232 {
2233 global_thread_step_over_chain_enqueue (tp);
2234 continue;
2235 }
c2829269 2236
372316f1 2237 if (tp->control.trap_expected
7846f3aa 2238 || tp->resumed ()
611841bb 2239 || tp->executing ())
ad53cd71 2240 {
f34652de 2241 internal_error ("[%s] has inconsistent state: "
372316f1 2242 "trap_expected=%d, resumed=%d, executing=%d\n",
0fab7955 2243 tp->ptid.to_string ().c_str (),
4d9d9d04 2244 tp->control.trap_expected,
7846f3aa 2245 tp->resumed (),
611841bb 2246 tp->executing ());
ad53cd71 2247 }
1c5cfe86 2248
1eb8556f 2249 infrun_debug_printf ("resuming [%s] for step-over",
0fab7955 2250 tp->ptid.to_string ().c_str ());
4d9d9d04
PA
2251
2252 /* keep_going_pass_signal skips the step-over if the breakpoint
2253 is no longer inserted. In all-stop, we want to keep looking
2254 for a thread that needs a step-over instead of resuming TP,
2255 because we wouldn't be able to resume anything else until the
2256 target stops again. In non-stop, the resume always resumes
2257 only TP, so it's OK to let the thread resume freely. */
fbea99ea 2258 if (!target_is_non_stop_p () && !step_what)
4d9d9d04 2259 continue;
8550d3b3 2260
00431a78 2261 switch_to_thread (tp);
aa563d16
TT
2262 execution_control_state ecs (tp);
2263 keep_going_pass_signal (&ecs);
1c5cfe86 2264
aa563d16 2265 if (!ecs.wait_some_more)
4d9d9d04 2266 error (_("Command aborted."));
1c5cfe86 2267
187b041e
SM
2268 /* If the thread's step over could not be initiated because no buffers
2269 were available, it was re-added to the global step over chain. */
7846f3aa 2270 if (tp->resumed ())
187b041e
SM
2271 {
2272 infrun_debug_printf ("[%s] was resumed.",
0fab7955 2273 tp->ptid.to_string ().c_str ());
187b041e
SM
2274 gdb_assert (!thread_is_in_step_over_chain (tp));
2275 }
2276 else
2277 {
2278 infrun_debug_printf ("[%s] was NOT resumed.",
0fab7955 2279 tp->ptid.to_string ().c_str ());
187b041e
SM
2280 gdb_assert (thread_is_in_step_over_chain (tp));
2281 }
372316f1
PA
2282
2283 /* If we started a new in-line step-over, we're done. */
2284 if (step_over_info_valid_p ())
2285 {
2286 gdb_assert (tp->control.trap_expected);
187b041e
SM
2287 started = true;
2288 break;
372316f1
PA
2289 }
2290
fbea99ea 2291 if (!target_is_non_stop_p ())
4d9d9d04
PA
2292 {
2293 /* On all-stop, shouldn't have resumed unless we needed a
2294 step over. */
2295 gdb_assert (tp->control.trap_expected
2296 || tp->step_after_step_resume_breakpoint);
2297
2298 /* With remote targets (at least), in all-stop, we can't
2299 issue any further remote commands until the program stops
2300 again. */
187b041e
SM
2301 started = true;
2302 break;
1c5cfe86 2303 }
c2829269 2304
4d9d9d04
PA
2305 /* Either the thread no longer needed a step-over, or a new
2306 displaced stepping sequence started. Even in the latter
2307 case, continue looking. Maybe we can also start another
2308 displaced step on a thread of other process. */
237fc4c9 2309 }
4d9d9d04 2310
187b041e 2311 return started;
237fc4c9
PA
2312}
2313
5231c1fd
PA
2314/* Update global variables holding ptids to hold NEW_PTID if they were
2315 holding OLD_PTID. */
2316static void
b161a60d
SM
2317infrun_thread_ptid_changed (process_stratum_target *target,
2318 ptid_t old_ptid, ptid_t new_ptid)
5231c1fd 2319{
b161a60d
SM
2320 if (inferior_ptid == old_ptid
2321 && current_inferior ()->process_target () == target)
5231c1fd 2322 inferior_ptid = new_ptid;
5231c1fd
PA
2323}
2324
237fc4c9 2325\f
c906108c 2326
53904c9e
AC
2327static const char schedlock_off[] = "off";
2328static const char schedlock_on[] = "on";
2329static const char schedlock_step[] = "step";
f2665db5 2330static const char schedlock_replay[] = "replay";
40478521 2331static const char *const scheduler_enums[] = {
ef346e04
AC
2332 schedlock_off,
2333 schedlock_on,
2334 schedlock_step,
f2665db5 2335 schedlock_replay,
03acd4d8 2336 nullptr
ef346e04 2337};
f2665db5 2338static const char *scheduler_mode = schedlock_replay;
920d2a44
AC
2339static void
2340show_scheduler_mode (struct ui_file *file, int from_tty,
2341 struct cmd_list_element *c, const char *value)
2342{
6cb06a8c
TT
2343 gdb_printf (file,
2344 _("Mode for locking scheduler "
2345 "during execution is \"%s\".\n"),
2346 value);
920d2a44 2347}
c906108c
SS
2348
2349static void
eb4c3f4a 2350set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
c906108c 2351{
8a3ecb79 2352 if (!target_can_lock_scheduler ())
eefe576e
AC
2353 {
2354 scheduler_mode = schedlock_off;
d777bf0d
SM
2355 error (_("Target '%s' cannot support this command."),
2356 target_shortname ());
eefe576e 2357 }
c906108c
SS
2358}
2359
d4db2f36
PA
2360/* True if execution commands resume all threads of all processes by
2361 default; otherwise, resume only threads of the current inferior
2362 process. */
491144b5 2363bool sched_multi = false;
d4db2f36 2364
22b11ba9
LS
2365/* Try to setup for software single stepping. Return true if target_resume()
2366 should use hardware single step.
2facfe5c 2367
22b11ba9 2368 GDBARCH the current gdbarch. */
2facfe5c 2369
c4464ade 2370static bool
22b11ba9 2371maybe_software_singlestep (struct gdbarch *gdbarch)
2facfe5c 2372{
c4464ade 2373 bool hw_step = true;
2facfe5c 2374
f02253f1 2375 if (execution_direction == EXEC_FORWARD
93f9a11f
YQ
2376 && gdbarch_software_single_step_p (gdbarch))
2377 hw_step = !insert_single_step_breakpoints (gdbarch);
2378
2facfe5c
DD
2379 return hw_step;
2380}
c906108c 2381
f3263aa4
PA
2382/* See infrun.h. */
2383
09cee04b
PA
2384ptid_t
2385user_visible_resume_ptid (int step)
2386{
f3263aa4 2387 ptid_t resume_ptid;
09cee04b 2388
09cee04b
PA
2389 if (non_stop)
2390 {
2391 /* With non-stop mode on, threads are always handled
2392 individually. */
2393 resume_ptid = inferior_ptid;
2394 }
2395 else if ((scheduler_mode == schedlock_on)
03d46957 2396 || (scheduler_mode == schedlock_step && step))
09cee04b 2397 {
f3263aa4
PA
2398 /* User-settable 'scheduler' mode requires solo thread
2399 resume. */
09cee04b
PA
2400 resume_ptid = inferior_ptid;
2401 }
f2665db5
MM
2402 else if ((scheduler_mode == schedlock_replay)
2403 && target_record_will_replay (minus_one_ptid, execution_direction))
2404 {
2405 /* User-settable 'scheduler' mode requires solo thread resume in replay
2406 mode. */
2407 resume_ptid = inferior_ptid;
2408 }
f3263aa4
PA
2409 else if (!sched_multi && target_supports_multi_process ())
2410 {
2411 /* Resume all threads of the current process (and none of other
2412 processes). */
e99b03dc 2413 resume_ptid = ptid_t (inferior_ptid.pid ());
f3263aa4
PA
2414 }
2415 else
2416 {
2417 /* Resume all threads of all processes. */
2418 resume_ptid = RESUME_ALL;
2419 }
09cee04b
PA
2420
2421 return resume_ptid;
2422}
2423
5b6d1e4f
PA
2424/* See infrun.h. */
2425
2426process_stratum_target *
2427user_visible_resume_target (ptid_t resume_ptid)
2428{
2429 return (resume_ptid == minus_one_ptid && sched_multi
03acd4d8 2430 ? nullptr
5b6d1e4f
PA
2431 : current_inferior ()->process_target ());
2432}
2433
bd9482bc
PA
2434/* Find a thread from the inferiors that we'll resume that is waiting
2435 for a vfork-done event. */
2436
2437static thread_info *
2438find_thread_waiting_for_vfork_done ()
2439{
2440 gdb_assert (!target_is_non_stop_p ());
2441
2442 if (sched_multi)
2443 {
2444 for (inferior *inf : all_non_exited_inferiors ())
2445 if (inf->thread_waiting_for_vfork_done != nullptr)
2446 return inf->thread_waiting_for_vfork_done;
2447 }
2448 else
2449 {
2450 inferior *cur_inf = current_inferior ();
2451 if (cur_inf->thread_waiting_for_vfork_done != nullptr)
2452 return cur_inf->thread_waiting_for_vfork_done;
2453 }
2454 return nullptr;
2455}
2456
fbea99ea
PA
2457/* Return a ptid representing the set of threads that we will resume,
2458 in the perspective of the target, assuming run control handling
2459 does not require leaving some threads stopped (e.g., stepping past
2460 breakpoint). USER_STEP indicates whether we're about to start the
2461 target for a stepping command. */
2462
2463static ptid_t
2464internal_resume_ptid (int user_step)
2465{
2466 /* In non-stop, we always control threads individually. Note that
2467 the target may always work in non-stop mode even with "set
2468 non-stop off", in which case user_visible_resume_ptid could
2469 return a wildcard ptid. */
2470 if (target_is_non_stop_p ())
2471 return inferior_ptid;
d8bbae6e
SM
2472
2473 /* The rest of the function assumes non-stop==off and
2474 target-non-stop==off.
2475
2476 If a thread is waiting for a vfork-done event, it means breakpoints are out
2477 for this inferior (well, program space in fact). We don't want to resume
2478 any thread other than the one waiting for vfork done, otherwise these other
2479 threads could miss breakpoints. So if a thread in the resumption set is
2480 waiting for a vfork-done event, resume only that thread.
2481
2482 The resumption set width depends on whether schedule-multiple is on or off.
2483
2484 Note that if the target_resume interface was more flexible, we could be
2485 smarter here when schedule-multiple is on. For example, imagine 3
2486 inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
2487 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
2488 target(s) to resume:
2489
2490 - All threads of inferior 1
2491 - Thread 2.1
2492 - Thread 3.2
2493
2494 Since we don't have that flexibility (we can only pass one ptid), just
2495 resume the first thread waiting for a vfork-done event we find (e.g. thread
2496 2.1). */
bd9482bc
PA
2497 thread_info *thr = find_thread_waiting_for_vfork_done ();
2498 if (thr != nullptr)
d8bbae6e 2499 {
bd9482bc
PA
2500 /* If we have a thread that is waiting for a vfork-done event,
2501 then we should have switched to it earlier. Calling
2502 target_resume with thread scope is only possible when the
2503 current thread matches the thread scope. */
2504 gdb_assert (thr->ptid == inferior_ptid);
2505 gdb_assert (thr->inf->process_target ()
2506 == inferior_thread ()->inf->process_target ());
2507 return thr->ptid;
d8bbae6e 2508 }
d8bbae6e
SM
2509
2510 return user_visible_resume_ptid (user_step);
fbea99ea
PA
2511}
2512
64ce06e4
PA
2513/* Wrapper for target_resume, that handles infrun-specific
2514 bookkeeping. */
2515
2516static void
c4464ade 2517do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
64ce06e4
PA
2518{
2519 struct thread_info *tp = inferior_thread ();
2520
c65d6b55
PA
2521 gdb_assert (!tp->stop_requested);
2522
64ce06e4 2523 /* Install inferior's terminal modes. */
223ffa71 2524 target_terminal::inferior ();
64ce06e4
PA
2525
2526 /* Avoid confusing the next resume, if the next stop/resume
2527 happens to apply to another thread. */
1edb66d8 2528 tp->set_stop_signal (GDB_SIGNAL_0);
64ce06e4 2529
8f572e5c
PA
2530 /* Advise target which signals may be handled silently.
2531
2532 If we have removed breakpoints because we are stepping over one
2533 in-line (in any thread), we need to receive all signals to avoid
2534 accidentally skipping a breakpoint during execution of a signal
2535 handler.
2536
2537 Likewise if we're displaced stepping, otherwise a trap for a
2538 breakpoint in a signal handler might be confused with the
7def77a1 2539 displaced step finishing. We don't make the displaced_step_finish
8f572e5c
PA
2540 step distinguish the cases instead, because:
2541
2542 - a backtrace while stopped in the signal handler would show the
2543 scratch pad as frame older than the signal handler, instead of
2544 the real mainline code.
2545
2546 - when the thread is later resumed, the signal handler would
2547 return to the scratch pad area, which would no longer be
2548 valid. */
2549 if (step_over_info_valid_p ()
00431a78 2550 || displaced_step_in_progress (tp->inf))
adc6a863 2551 target_pass_signals ({});
64ce06e4 2552 else
adc6a863 2553 target_pass_signals (signal_pass);
64ce06e4 2554
d8d96409
PA
2555 /* Request that the target report thread-{created,cloned,exited}
2556 events in the following situations:
65c459ab
PA
2557
2558 - If we are performing an in-line step-over-breakpoint, then we
2559 will remove a breakpoint from the target and only run the
2560 current thread. We don't want any new thread (spawned by the
d8d96409
PA
2561 step) to start running, as it might miss the breakpoint. We
2562 need to clear the step-over state if the stepped thread exits,
2563 so we also enable thread-exit events.
65c459ab
PA
2564
2565 - If we are stepping over a breakpoint out of line (displaced
2566 stepping) then we won't remove a breakpoint from the target,
2567 but, if the step spawns a new clone thread, then we will need
2568 to fixup the $pc address in the clone child too, so we need it
d8d96409
PA
2569 to start stopped. We need to release the displaced stepping
2570 buffer if the stepped thread exits, so we also enable
2571 thread-exit events.
7ac958f2
PA
2572
2573 - If scheduler-locking applies, threads that the current thread
2574 spawns should remain halted. It's not strictly necessary to
2575 enable thread-exit events in this case, but it doesn't hurt.
65c459ab
PA
2576 */
2577 if (step_over_info_valid_p ()
7ac958f2
PA
2578 || displaced_step_in_progress_thread (tp)
2579 || schedlock_applies (tp))
65c459ab 2580 {
d8d96409
PA
2581 gdb_thread_options options
2582 = GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
65c459ab
PA
2583 if (target_supports_set_thread_options (options))
2584 tp->set_thread_options (options);
2585 else
2586 target_thread_events (true);
2587 }
9488c327
PA
2588 else if (tp->thread_fsm () != nullptr)
2589 {
2590 gdb_thread_options options = GDB_THREAD_OPTION_EXIT;
2591 if (target_supports_set_thread_options (options))
2592 tp->set_thread_options (options);
2593 else
2594 target_thread_events (true);
2595 }
7ac958f2
PA
2596 else
2597 {
2598 if (target_supports_set_thread_options (0))
2599 tp->set_thread_options (0);
9488c327
PA
2600 else
2601 {
2602 process_stratum_target *resume_target = tp->inf->process_target ();
2603 if (!any_thread_needs_target_thread_events (resume_target,
2604 resume_ptid))
2605 target_thread_events (false);
2606 }
7ac958f2 2607 }
65c459ab
PA
2608
2609 /* If we're resuming more than one thread simultaneously, then any
2610 thread other than the leader is being set to run free. Clear any
2611 previous thread option for those threads. */
2612 if (resume_ptid != inferior_ptid && target_supports_set_thread_options (0))
2613 {
2614 process_stratum_target *resume_target = tp->inf->process_target ();
2615 for (thread_info *thr_iter : all_non_exited_threads (resume_target,
2616 resume_ptid))
2617 if (thr_iter != tp)
2618 thr_iter->set_thread_options (0);
2619 }
2620
05d65a7a
SM
2621 infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
2622 resume_ptid.to_string ().c_str (),
2623 step, gdb_signal_to_symbol_string (sig));
2624
64ce06e4
PA
2625 target_resume (resume_ptid, step, sig);
2626}
2627
d930703d 2628/* Resume the inferior. SIG is the signal to give the inferior
71d378ae
PA
2629 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2630 call 'resume', which handles exceptions. */
c906108c 2631
71d378ae
PA
2632static void
2633resume_1 (enum gdb_signal sig)
c906108c 2634{
4e1c45ea 2635 struct thread_info *tp = inferior_thread ();
9c742269
SM
2636 regcache *regcache = get_thread_regcache (tp);
2637 struct gdbarch *gdbarch = regcache->arch ();
b0f16a3e 2638 ptid_t resume_ptid;
856e7dd6
PA
2639 /* This represents the user's step vs continue request. When
2640 deciding whether "set scheduler-locking step" applies, it's the
2641 user's intention that counts. */
2642 const int user_step = tp->control.stepping_command;
64ce06e4
PA
2643 /* This represents what we'll actually request the target to do.
2644 This can decay from a step to a continue, if e.g., we need to
2645 implement single-stepping with breakpoints (software
2646 single-step). */
c4464ade 2647 bool step;
c7e8a53c 2648
c65d6b55 2649 gdb_assert (!tp->stop_requested);
c2829269
PA
2650 gdb_assert (!thread_is_in_step_over_chain (tp));
2651
1edb66d8 2652 if (tp->has_pending_waitstatus ())
372316f1 2653 {
1eb8556f
SM
2654 infrun_debug_printf
2655 ("thread %s has pending wait "
2656 "status %s (currently_stepping=%d).",
0fab7955 2657 tp->ptid.to_string ().c_str (),
7dca2ea7 2658 tp->pending_waitstatus ().to_string ().c_str (),
1eb8556f 2659 currently_stepping (tp));
372316f1 2660
5b6d1e4f 2661 tp->inf->process_target ()->threads_executing = true;
7846f3aa 2662 tp->set_resumed (true);
372316f1
PA
2663
2664 /* FIXME: What should we do if we are supposed to resume this
2665 thread with a signal? Maybe we should maintain a queue of
2666 pending signals to deliver. */
2667 if (sig != GDB_SIGNAL_0)
2668 {
fd7dcb94 2669 warning (_("Couldn't deliver signal %s to %s."),
a068643d 2670 gdb_signal_to_name (sig),
0fab7955 2671 tp->ptid.to_string ().c_str ());
372316f1
PA
2672 }
2673
1edb66d8 2674 tp->set_stop_signal (GDB_SIGNAL_0);
372316f1
PA
2675
2676 if (target_can_async_p ())
9516f85a 2677 {
4a570176 2678 target_async (true);
9516f85a
AB
2679 /* Tell the event loop we have an event to process. */
2680 mark_async_event_handler (infrun_async_inferior_event_token);
2681 }
372316f1
PA
2682 return;
2683 }
2684
2685 tp->stepped_breakpoint = 0;
2686
6b403daa
PA
2687 /* Depends on stepped_breakpoint. */
2688 step = currently_stepping (tp);
2689
6f5d514f 2690 if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
74609e71 2691 {
48f9886d
PA
2692 /* Don't try to single-step a vfork parent that is waiting for
2693 the child to get out of the shared memory region (by exec'ing
2694 or exiting). This is particularly important on software
2695 single-step archs, as the child process would trip on the
2696 software single step breakpoint inserted for the parent
2697 process. Since the parent will not actually execute any
2698 instruction until the child is out of the shared region (such
2699 are vfork's semantics), it is safe to simply continue it.
2700 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2701 the parent, and tell it to `keep_going', which automatically
2702 re-sets it stepping. */
1eb8556f 2703 infrun_debug_printf ("resume : clear step");
c4464ade 2704 step = false;
74609e71
YQ
2705 }
2706
7ca9b62a
TBA
2707 CORE_ADDR pc = regcache_read_pc (regcache);
2708
1eb8556f
SM
2709 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2710 "current thread [%s] at %s",
2711 step, gdb_signal_to_symbol_string (sig),
2712 tp->control.trap_expected,
0fab7955 2713 inferior_ptid.to_string ().c_str (),
1eb8556f 2714 paddress (gdbarch, pc));
c906108c 2715
f9582a22 2716 const address_space *aspace = tp->inf->aspace.get ();
74387712 2717
c2c6d25f
JM
2718 /* Normally, by the time we reach `resume', the breakpoints are either
2719 removed or inserted, as appropriate. The exception is if we're sitting
2720 at a permanent breakpoint; we need to step over it, but permanent
2721 breakpoints can't be removed. So we have to test for it here. */
6c95b8df 2722 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
6d350bb5 2723 {
af48d08f
PA
2724 if (sig != GDB_SIGNAL_0)
2725 {
2726 /* We have a signal to pass to the inferior. The resume
2727 may, or may not take us to the signal handler. If this
2728 is a step, we'll need to stop in the signal handler, if
2729 there's one, (if the target supports stepping into
2730 handlers), or in the next mainline instruction, if
2731 there's no handler. If this is a continue, we need to be
2732 sure to run the handler with all breakpoints inserted.
2733 In all cases, set a breakpoint at the current address
2734 (where the handler returns to), and once that breakpoint
2735 is hit, resume skipping the permanent breakpoint. If
2736 that breakpoint isn't hit, then we've stepped into the
2737 signal handler (or hit some other event). We'll delete
2738 the step-resume breakpoint then. */
2739
1eb8556f
SM
2740 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2741 "deliver signal first");
af48d08f
PA
2742
2743 clear_step_over_info ();
2744 tp->control.trap_expected = 0;
2745
03acd4d8 2746 if (tp->control.step_resume_breakpoint == nullptr)
af48d08f
PA
2747 {
2748 /* Set a "high-priority" step-resume, as we don't want
2749 user breakpoints at PC to trigger (again) when this
2750 hits. */
2751 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
f5951b9f
SM
2752 gdb_assert (tp->control.step_resume_breakpoint->first_loc ()
2753 .permanent);
af48d08f
PA
2754
2755 tp->step_after_step_resume_breakpoint = step;
2756 }
2757
2758 insert_breakpoints ();
2759 }
2760 else
2761 {
2762 /* There's no signal to pass, we can go ahead and skip the
2763 permanent breakpoint manually. */
1eb8556f 2764 infrun_debug_printf ("skipping permanent breakpoint");
af48d08f
PA
2765 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2766 /* Update pc to reflect the new address from which we will
2767 execute instructions. */
2768 pc = regcache_read_pc (regcache);
2769
2770 if (step)
2771 {
2772 /* We've already advanced the PC, so the stepping part
2773 is done. Now we need to arrange for a trap to be
2774 reported to handle_inferior_event. Set a breakpoint
2775 at the current PC, and run to it. Don't update
2776 prev_pc, because if we end in
44a1ee51
PA
2777 switch_back_to_stepped_thread, we want the "expected
2778 thread advanced also" branch to be taken. IOW, we
2779 don't want this thread to step further from PC
af48d08f 2780 (overstep). */
1ac806b8 2781 gdb_assert (!step_over_info_valid_p ());
af48d08f
PA
2782 insert_single_step_breakpoint (gdbarch, aspace, pc);
2783 insert_breakpoints ();
2784
fbea99ea 2785 resume_ptid = internal_resume_ptid (user_step);
c4464ade 2786 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
7846f3aa 2787 tp->set_resumed (true);
af48d08f
PA
2788 return;
2789 }
2790 }
6d350bb5 2791 }
c2c6d25f 2792
c1e36e3e
PA
2793 /* If we have a breakpoint to step over, make sure to do a single
2794 step only. Same if we have software watchpoints. */
2795 if (tp->control.trap_expected || bpstat_should_step ())
2796 tp->control.may_range_step = 0;
2797
7da6a5b9
LM
2798 /* If displaced stepping is enabled, step over breakpoints by executing a
2799 copy of the instruction at a different address.
237fc4c9
PA
2800
2801 We can't use displaced stepping when we have a signal to deliver;
2802 the comments for displaced_step_prepare explain why. The
2803 comments in the handle_inferior event for dealing with 'random
74609e71
YQ
2804 signals' explain what we do instead.
2805
2806 We can't use displaced stepping when we are waiting for vfork_done
2807 event, displaced stepping breaks the vfork child similarly as single
2808 step software breakpoint. */
3fc8eb30
PA
2809 if (tp->control.trap_expected
2810 && use_displaced_stepping (tp)
cb71640d 2811 && !step_over_info_valid_p ()
a493e3e2 2812 && sig == GDB_SIGNAL_0
6f5d514f 2813 && current_inferior ()->thread_waiting_for_vfork_done == nullptr)
237fc4c9 2814 {
bab37966
SM
2815 displaced_step_prepare_status prepare_status
2816 = displaced_step_prepare (tp);
fc1cf338 2817
bab37966 2818 if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
d56b7306 2819 {
1eb8556f 2820 infrun_debug_printf ("Got placed in step-over queue");
4d9d9d04
PA
2821
2822 tp->control.trap_expected = 0;
d56b7306
VP
2823 return;
2824 }
bab37966 2825 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_CANT)
3fc8eb30
PA
2826 {
2827 /* Fallback to stepping over the breakpoint in-line. */
2828
2829 if (target_is_non_stop_p ())
4f5539f0 2830 stop_all_threads ("displaced stepping falling back on inline stepping");
3fc8eb30 2831
74387712
SM
2832 set_step_over_info (aspace, regcache_read_pc (regcache), 0,
2833 tp->global_num);
3fc8eb30 2834
22b11ba9 2835 step = maybe_software_singlestep (gdbarch);
3fc8eb30
PA
2836
2837 insert_breakpoints ();
2838 }
bab37966 2839 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
3fc8eb30 2840 {
3fc8eb30
PA
2841 /* Update pc to reflect the new address from which we will
2842 execute instructions due to displaced stepping. */
00431a78 2843 pc = regcache_read_pc (get_thread_regcache (tp));
ca7781d2 2844
40a53766 2845 step = gdbarch_displaced_step_hw_singlestep (gdbarch);
3fc8eb30 2846 }
bab37966 2847 else
557b4d76
SM
2848 gdb_assert_not_reached ("Invalid displaced_step_prepare_status "
2849 "value.");
237fc4c9
PA
2850 }
2851
2facfe5c 2852 /* Do we need to do it the hard way, w/temp breakpoints? */
99e40580 2853 else if (step)
22b11ba9 2854 step = maybe_software_singlestep (gdbarch);
c906108c 2855
30852783
UW
2856 /* Currently, our software single-step implementation leads to different
2857 results than hardware single-stepping in one situation: when stepping
2858 into delivering a signal which has an associated signal handler,
2859 hardware single-step will stop at the first instruction of the handler,
2860 while software single-step will simply skip execution of the handler.
2861
2862 For now, this difference in behavior is accepted since there is no
2863 easy way to actually implement single-stepping into a signal handler
2864 without kernel support.
2865
2866 However, there is one scenario where this difference leads to follow-on
2867 problems: if we're stepping off a breakpoint by removing all breakpoints
2868 and then single-stepping. In this case, the software single-step
2869 behavior means that even if there is a *breakpoint* in the signal
2870 handler, GDB still would not stop.
2871
2872 Fortunately, we can at least fix this particular issue. We detect
2873 here the case where we are about to deliver a signal while software
2874 single-stepping with breakpoints removed. In this situation, we
2875 revert the decisions to remove all breakpoints and insert single-
2876 step breakpoints, and instead we install a step-resume breakpoint
2877 at the current address, deliver the signal without stepping, and
2878 once we arrive back at the step-resume breakpoint, actually step
2879 over the breakpoint we originally wanted to step over. */
34b7e8a6 2880 if (thread_has_single_step_breakpoints_set (tp)
6cc83d2a
PA
2881 && sig != GDB_SIGNAL_0
2882 && step_over_info_valid_p ())
30852783
UW
2883 {
2884 /* If we have nested signals or a pending signal is delivered
7da6a5b9 2885 immediately after a handler returns, might already have
30852783
UW
2886 a step-resume breakpoint set on the earlier handler. We cannot
2887 set another step-resume breakpoint; just continue on until the
2888 original breakpoint is hit. */
03acd4d8 2889 if (tp->control.step_resume_breakpoint == nullptr)
30852783 2890 {
2c03e5be 2891 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
30852783
UW
2892 tp->step_after_step_resume_breakpoint = 1;
2893 }
2894
34b7e8a6 2895 delete_single_step_breakpoints (tp);
30852783 2896
31e77af2 2897 clear_step_over_info ();
30852783 2898 tp->control.trap_expected = 0;
31e77af2
PA
2899
2900 insert_breakpoints ();
30852783
UW
2901 }
2902
b0f16a3e
SM
2903 /* If STEP is set, it's a request to use hardware stepping
2904 facilities. But in that case, we should never
2905 use singlestep breakpoint. */
34b7e8a6 2906 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
dfcd3bfb 2907
fbea99ea 2908 /* Decide the set of threads to ask the target to resume. */
1946c4cc 2909 if (tp->control.trap_expected)
b0f16a3e
SM
2910 {
2911 /* We're allowing a thread to run past a breakpoint it has
1946c4cc
YQ
2912 hit, either by single-stepping the thread with the breakpoint
2913 removed, or by displaced stepping, with the breakpoint inserted.
2914 In the former case, we need to single-step only this thread,
2915 and keep others stopped, as they can miss this breakpoint if
2916 allowed to run. That's not really a problem for displaced
2917 stepping, but, we still keep other threads stopped, in case
2918 another thread is also stopped for a breakpoint waiting for
2919 its turn in the displaced stepping queue. */
b0f16a3e
SM
2920 resume_ptid = inferior_ptid;
2921 }
fbea99ea
PA
2922 else
2923 resume_ptid = internal_resume_ptid (user_step);
d4db2f36 2924
7f5ef605
PA
2925 if (execution_direction != EXEC_REVERSE
2926 && step && breakpoint_inserted_here_p (aspace, pc))
b0f16a3e 2927 {
372316f1
PA
2928 /* There are two cases where we currently need to step a
2929 breakpoint instruction when we have a signal to deliver:
2930
2931 - See handle_signal_stop where we handle random signals that
2932 could take out us out of the stepping range. Normally, in
2933 that case we end up continuing (instead of stepping) over the
7f5ef605
PA
2934 signal handler with a breakpoint at PC, but there are cases
2935 where we should _always_ single-step, even if we have a
2936 step-resume breakpoint, like when a software watchpoint is
2937 set. Assuming single-stepping and delivering a signal at the
2938 same time would takes us to the signal handler, then we could
2939 have removed the breakpoint at PC to step over it. However,
2940 some hardware step targets (like e.g., Mac OS) can't step
2941 into signal handlers, and for those, we need to leave the
2942 breakpoint at PC inserted, as otherwise if the handler
2943 recurses and executes PC again, it'll miss the breakpoint.
2944 So we leave the breakpoint inserted anyway, but we need to
2945 record that we tried to step a breakpoint instruction, so
372316f1
PA
2946 that adjust_pc_after_break doesn't end up confused.
2947
dda83cd7 2948 - In non-stop if we insert a breakpoint (e.g., a step-resume)
372316f1
PA
2949 in one thread after another thread that was stepping had been
2950 momentarily paused for a step-over. When we re-resume the
2951 stepping thread, it may be resumed from that address with a
2952 breakpoint that hasn't trapped yet. Seen with
2953 gdb.threads/non-stop-fair-events.exp, on targets that don't
2954 do displaced stepping. */
2955
1eb8556f 2956 infrun_debug_printf ("resume: [%s] stepped breakpoint",
0fab7955 2957 tp->ptid.to_string ().c_str ());
7f5ef605
PA
2958
2959 tp->stepped_breakpoint = 1;
2960
b0f16a3e
SM
2961 /* Most targets can step a breakpoint instruction, thus
2962 executing it normally. But if this one cannot, just
2963 continue and we will hit it anyway. */
7f5ef605 2964 if (gdbarch_cannot_step_breakpoint (gdbarch))
c4464ade 2965 step = false;
b0f16a3e 2966 }
ef5cf84e 2967
b0f16a3e
SM
2968 if (tp->control.may_range_step)
2969 {
2970 /* If we're resuming a thread with the PC out of the step
2971 range, then we're doing some nested/finer run control
2972 operation, like stepping the thread out of the dynamic
2973 linker or the displaced stepping scratch pad. We
2974 shouldn't have allowed a range step then. */
2975 gdb_assert (pc_in_thread_step_range (pc, tp));
2976 }
c1e36e3e 2977
64ce06e4 2978 do_target_resume (resume_ptid, step, sig);
7846f3aa 2979 tp->set_resumed (true);
c906108c 2980}
71d378ae
PA
2981
2982/* Resume the inferior. SIG is the signal to give the inferior
2983 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2984 rolls back state on error. */
2985
aff4e175 2986static void
71d378ae
PA
2987resume (gdb_signal sig)
2988{
a70b8144 2989 try
71d378ae
PA
2990 {
2991 resume_1 (sig);
2992 }
230d2906 2993 catch (const gdb_exception &ex)
71d378ae
PA
2994 {
2995 /* If resuming is being aborted for any reason, delete any
2996 single-step breakpoint resume_1 may have created, to avoid
2997 confusing the following resumption, and to avoid leaving
2998 single-step breakpoints perturbing other threads, in case
2999 we're running in non-stop mode. */
3000 if (inferior_ptid != null_ptid)
3001 delete_single_step_breakpoints (inferior_thread ());
eedc3f4f 3002 throw;
71d378ae 3003 }
71d378ae
PA
3004}
3005
c906108c 3006\f
237fc4c9 3007/* Proceeding. */
c906108c 3008
4c2f2a79
PA
3009/* See infrun.h. */
3010
3011/* Counter that tracks number of user visible stops. This can be used
3012 to tell whether a command has proceeded the inferior past the
3013 current location. This allows e.g., inferior function calls in
3014 breakpoint commands to not interrupt the command list. When the
3015 call finishes successfully, the inferior is standing at the same
3016 breakpoint as if nothing happened (and so we don't call
3017 normal_stop). */
3018static ULONGEST current_stop_id;
3019
3020/* See infrun.h. */
3021
3022ULONGEST
3023get_stop_id (void)
3024{
3025 return current_stop_id;
3026}
3027
3028/* Called when we report a user visible stop. */
3029
3030static void
3031new_stop_id (void)
3032{
3033 current_stop_id++;
3034}
3035
c906108c
SS
3036/* Clear out all variables saying what to do when inferior is continued.
3037 First do this, then set the ones you want, then call `proceed'. */
3038
a7212384
UW
3039static void
3040clear_proceed_status_thread (struct thread_info *tp)
c906108c 3041{
0fab7955 3042 infrun_debug_printf ("%s", tp->ptid.to_string ().c_str ());
d6b48e9c 3043
372316f1
PA
3044 /* If we're starting a new sequence, then the previous finished
3045 single-step is no longer relevant. */
1edb66d8 3046 if (tp->has_pending_waitstatus ())
372316f1 3047 {
1edb66d8 3048 if (tp->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP)
372316f1 3049 {
1eb8556f
SM
3050 infrun_debug_printf ("pending event of %s was a finished step. "
3051 "Discarding.",
0fab7955 3052 tp->ptid.to_string ().c_str ());
372316f1 3053
1edb66d8
SM
3054 tp->clear_pending_waitstatus ();
3055 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
372316f1 3056 }
1eb8556f 3057 else
372316f1 3058 {
1eb8556f
SM
3059 infrun_debug_printf
3060 ("thread %s has pending wait status %s (currently_stepping=%d).",
0fab7955 3061 tp->ptid.to_string ().c_str (),
7dca2ea7 3062 tp->pending_waitstatus ().to_string ().c_str (),
1eb8556f 3063 currently_stepping (tp));
372316f1
PA
3064 }
3065 }
3066
70509625
PA
3067 /* If this signal should not be seen by program, give it zero.
3068 Used for debugging signals. */
1edb66d8
SM
3069 if (!signal_pass_state (tp->stop_signal ()))
3070 tp->set_stop_signal (GDB_SIGNAL_0);
70509625 3071
573269a8 3072 tp->release_thread_fsm ();
243a9253 3073
16c381f0
JK
3074 tp->control.trap_expected = 0;
3075 tp->control.step_range_start = 0;
3076 tp->control.step_range_end = 0;
c1e36e3e 3077 tp->control.may_range_step = 0;
16c381f0
JK
3078 tp->control.step_frame_id = null_frame_id;
3079 tp->control.step_stack_frame_id = null_frame_id;
3080 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
03acd4d8 3081 tp->control.step_start_function = nullptr;
a7212384 3082 tp->stop_requested = 0;
4e1c45ea 3083
16c381f0 3084 tp->control.stop_step = 0;
32400beb 3085
b986eec5
CL
3086 tp->control.proceed_to_finish = 0;
3087
856e7dd6 3088 tp->control.stepping_command = 0;
17b2616c 3089
a7212384 3090 /* Discard any remaining commands or status from previous stop. */
16c381f0 3091 bpstat_clear (&tp->control.stop_bpstat);
a7212384 3092}
32400beb 3093
7603ea6a
SM
3094/* Notify the current interpreter and observers that the target is about to
3095 proceed. */
3096
3097static void
3098notify_about_to_proceed ()
3099{
3100 top_level_interpreter ()->on_about_to_proceed ();
3101 gdb::observers::about_to_proceed.notify ();
3102}
3103
a7212384 3104void
70509625 3105clear_proceed_status (int step)
a7212384 3106{
f2665db5
MM
3107 /* With scheduler-locking replay, stop replaying other threads if we're
3108 not replaying the user-visible resume ptid.
3109
3110 This is a convenience feature to not require the user to explicitly
3111 stop replaying the other threads. We're assuming that the user's
3112 intent is to resume tracing the recorded process. */
3113 if (!non_stop && scheduler_mode == schedlock_replay
3114 && target_record_is_replaying (minus_one_ptid)
3115 && !target_record_will_replay (user_visible_resume_ptid (step),
3116 execution_direction))
3117 target_record_stop_replaying ();
3118
08036331 3119 if (!non_stop && inferior_ptid != null_ptid)
6c95b8df 3120 {
08036331 3121 ptid_t resume_ptid = user_visible_resume_ptid (step);
5b6d1e4f
PA
3122 process_stratum_target *resume_target
3123 = user_visible_resume_target (resume_ptid);
70509625
PA
3124
3125 /* In all-stop mode, delete the per-thread status of all threads
3126 we're about to resume, implicitly and explicitly. */
5b6d1e4f 3127 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
08036331 3128 clear_proceed_status_thread (tp);
6c95b8df
PA
3129 }
3130
d7e15655 3131 if (inferior_ptid != null_ptid)
a7212384
UW
3132 {
3133 struct inferior *inferior;
3134
3135 if (non_stop)
3136 {
6c95b8df
PA
3137 /* If in non-stop mode, only delete the per-thread status of
3138 the current thread. */
a7212384
UW
3139 clear_proceed_status_thread (inferior_thread ());
3140 }
6c95b8df 3141
d6b48e9c 3142 inferior = current_inferior ();
16c381f0 3143 inferior->control.stop_soon = NO_STOP_QUIETLY;
4e1c45ea
PA
3144 }
3145
7603ea6a 3146 notify_about_to_proceed ();
c906108c
SS
3147}
3148
99619bea
PA
3149/* Returns true if TP is still stopped at a breakpoint that needs
3150 stepping-over in order to make progress. If the breakpoint is gone
3151 meanwhile, we can skip the whole step-over dance. */
ea67f13b 3152
c4464ade 3153static bool
6c4cfb24 3154thread_still_needs_step_over_bp (struct thread_info *tp)
99619bea
PA
3155{
3156 if (tp->stepping_over_breakpoint)
3157 {
00431a78 3158 struct regcache *regcache = get_thread_regcache (tp);
99619bea 3159
f9582a22 3160 if (breakpoint_here_p (tp->inf->aspace.get (),
af48d08f
PA
3161 regcache_read_pc (regcache))
3162 == ordinary_breakpoint_here)
c4464ade 3163 return true;
99619bea
PA
3164
3165 tp->stepping_over_breakpoint = 0;
3166 }
3167
c4464ade 3168 return false;
99619bea
PA
3169}
3170
6c4cfb24
PA
3171/* Check whether thread TP still needs to start a step-over in order
3172 to make progress when resumed. Returns an bitwise or of enum
3173 step_over_what bits, indicating what needs to be stepped over. */
3174
8d297bbf 3175static step_over_what
6c4cfb24
PA
3176thread_still_needs_step_over (struct thread_info *tp)
3177{
8d297bbf 3178 step_over_what what = 0;
6c4cfb24
PA
3179
3180 if (thread_still_needs_step_over_bp (tp))
3181 what |= STEP_OVER_BREAKPOINT;
3182
3183 if (tp->stepping_over_watchpoint
9aed480c 3184 && !target_have_steppable_watchpoint ())
6c4cfb24
PA
3185 what |= STEP_OVER_WATCHPOINT;
3186
3187 return what;
3188}
3189
483805cf
PA
3190/* Returns true if scheduler locking applies. STEP indicates whether
3191 we're about to do a step/next-like command to a thread. */
3192
c4464ade 3193static bool
856e7dd6 3194schedlock_applies (struct thread_info *tp)
483805cf
PA
3195{
3196 return (scheduler_mode == schedlock_on
3197 || (scheduler_mode == schedlock_step
f2665db5
MM
3198 && tp->control.stepping_command)
3199 || (scheduler_mode == schedlock_replay
3200 && target_record_will_replay (minus_one_ptid,
3201 execution_direction)));
483805cf
PA
3202}
3203
1192f124
SM
3204/* Set process_stratum_target::COMMIT_RESUMED_STATE in all target
3205 stacks that have threads executing and don't have threads with
3206 pending events. */
5b6d1e4f
PA
3207
3208static void
1192f124
SM
3209maybe_set_commit_resumed_all_targets ()
3210{
b4b1a226
SM
3211 scoped_restore_current_thread restore_thread;
3212
1192f124
SM
3213 for (inferior *inf : all_non_exited_inferiors ())
3214 {
3215 process_stratum_target *proc_target = inf->process_target ();
3216
3217 if (proc_target->commit_resumed_state)
3218 {
3219 /* We already set this in a previous iteration, via another
3220 inferior sharing the process_stratum target. */
3221 continue;
3222 }
3223
3224 /* If the target has no resumed threads, it would be useless to
3225 ask it to commit the resumed threads. */
3226 if (!proc_target->threads_executing)
3227 {
3228 infrun_debug_printf ("not requesting commit-resumed for target "
3229 "%s, no resumed threads",
3230 proc_target->shortname ());
3231 continue;
3232 }
3233
3234 /* As an optimization, if a thread from this target has some
3235 status to report, handle it before requiring the target to
3236 commit its resumed threads: handling the status might lead to
3237 resuming more threads. */
273dadf2 3238 if (proc_target->has_resumed_with_pending_wait_status ())
1192f124
SM
3239 {
3240 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
3241 " thread has a pending waitstatus",
3242 proc_target->shortname ());
3243 continue;
3244 }
3245
b4b1a226
SM
3246 switch_to_inferior_no_thread (inf);
3247
3248 if (target_has_pending_events ())
3249 {
3250 infrun_debug_printf ("not requesting commit-resumed for target %s, "
3251 "target has pending events",
3252 proc_target->shortname ());
3253 continue;
3254 }
3255
1192f124
SM
3256 infrun_debug_printf ("enabling commit-resumed for target %s",
3257 proc_target->shortname ());
3258
3259 proc_target->commit_resumed_state = true;
3260 }
3261}
3262
3263/* See infrun.h. */
3264
3265void
3266maybe_call_commit_resumed_all_targets ()
5b6d1e4f
PA
3267{
3268 scoped_restore_current_thread restore_thread;
3269
1192f124
SM
3270 for (inferior *inf : all_non_exited_inferiors ())
3271 {
3272 process_stratum_target *proc_target = inf->process_target ();
3273
3274 if (!proc_target->commit_resumed_state)
3275 continue;
3276
3277 switch_to_inferior_no_thread (inf);
3278
3279 infrun_debug_printf ("calling commit_resumed for target %s",
3280 proc_target->shortname());
3281
3282 target_commit_resumed ();
3283 }
3284}
3285
3286/* To track nesting of scoped_disable_commit_resumed objects, ensuring
3287 that only the outermost one attempts to re-enable
3288 commit-resumed. */
3289static bool enable_commit_resumed = true;
3290
3291/* See infrun.h. */
3292
3293scoped_disable_commit_resumed::scoped_disable_commit_resumed
3294 (const char *reason)
3295 : m_reason (reason),
3296 m_prev_enable_commit_resumed (enable_commit_resumed)
3297{
3298 infrun_debug_printf ("reason=%s", m_reason);
3299
3300 enable_commit_resumed = false;
5b6d1e4f
PA
3301
3302 for (inferior *inf : all_non_exited_inferiors ())
1192f124
SM
3303 {
3304 process_stratum_target *proc_target = inf->process_target ();
5b6d1e4f 3305
1192f124
SM
3306 if (m_prev_enable_commit_resumed)
3307 {
3308 /* This is the outermost instance: force all
3309 COMMIT_RESUMED_STATE to false. */
3310 proc_target->commit_resumed_state = false;
3311 }
3312 else
3313 {
3314 /* This is not the outermost instance, we expect
3315 COMMIT_RESUMED_STATE to have been cleared by the
3316 outermost instance. */
3317 gdb_assert (!proc_target->commit_resumed_state);
3318 }
3319 }
3320}
3321
3322/* See infrun.h. */
3323
3324void
3325scoped_disable_commit_resumed::reset ()
3326{
3327 if (m_reset)
3328 return;
3329 m_reset = true;
3330
3331 infrun_debug_printf ("reason=%s", m_reason);
3332
3333 gdb_assert (!enable_commit_resumed);
3334
3335 enable_commit_resumed = m_prev_enable_commit_resumed;
3336
3337 if (m_prev_enable_commit_resumed)
5b6d1e4f 3338 {
1192f124 3339 /* This is the outermost instance, re-enable
287de656 3340 COMMIT_RESUMED_STATE on the targets where it's possible. */
1192f124
SM
3341 maybe_set_commit_resumed_all_targets ();
3342 }
3343 else
3344 {
3345 /* This is not the outermost instance, we expect
3346 COMMIT_RESUMED_STATE to still be false. */
3347 for (inferior *inf : all_non_exited_inferiors ())
3348 {
3349 process_stratum_target *proc_target = inf->process_target ();
3350 gdb_assert (!proc_target->commit_resumed_state);
3351 }
3352 }
3353}
3354
3355/* See infrun.h. */
3356
3357scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
3358{
3359 reset ();
3360}
3361
3362/* See infrun.h. */
3363
3364void
3365scoped_disable_commit_resumed::reset_and_commit ()
3366{
3367 reset ();
3368 maybe_call_commit_resumed_all_targets ();
3369}
3370
3371/* See infrun.h. */
3372
3373scoped_enable_commit_resumed::scoped_enable_commit_resumed
3374 (const char *reason)
3375 : m_reason (reason),
3376 m_prev_enable_commit_resumed (enable_commit_resumed)
3377{
3378 infrun_debug_printf ("reason=%s", m_reason);
3379
3380 if (!enable_commit_resumed)
3381 {
3382 enable_commit_resumed = true;
3383
3384 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
3385 possible. */
3386 maybe_set_commit_resumed_all_targets ();
3387
3388 maybe_call_commit_resumed_all_targets ();
3389 }
3390}
3391
3392/* See infrun.h. */
3393
3394scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
3395{
3396 infrun_debug_printf ("reason=%s", m_reason);
3397
3398 gdb_assert (enable_commit_resumed);
3399
3400 enable_commit_resumed = m_prev_enable_commit_resumed;
3401
3402 if (!enable_commit_resumed)
3403 {
3404 /* Force all COMMIT_RESUMED_STATE back to false. */
3405 for (inferior *inf : all_non_exited_inferiors ())
3406 {
3407 process_stratum_target *proc_target = inf->process_target ();
3408 proc_target->commit_resumed_state = false;
3409 }
5b6d1e4f
PA
3410 }
3411}
3412
2f4fcf00
PA
3413/* Check that all the targets we're about to resume are in non-stop
3414 mode. Ideally, we'd only care whether all targets support
3415 target-async, but we're not there yet. E.g., stop_all_threads
3416 doesn't know how to handle all-stop targets. Also, the remote
3417 protocol in all-stop mode is synchronous, irrespective of
3418 target-async, which means that things like a breakpoint re-set
3419 triggered by one target would try to read memory from all targets
3420 and fail. */
3421
3422static void
3423check_multi_target_resumption (process_stratum_target *resume_target)
3424{
3425 if (!non_stop && resume_target == nullptr)
3426 {
3427 scoped_restore_current_thread restore_thread;
3428
3429 /* This is used to track whether we're resuming more than one
3430 target. */
3431 process_stratum_target *first_connection = nullptr;
3432
3433 /* The first inferior we see with a target that does not work in
3434 always-non-stop mode. */
3435 inferior *first_not_non_stop = nullptr;
3436
f058c521 3437 for (inferior *inf : all_non_exited_inferiors ())
2f4fcf00
PA
3438 {
3439 switch_to_inferior_no_thread (inf);
3440
55f6301a 3441 if (!target_has_execution ())
2f4fcf00
PA
3442 continue;
3443
3444 process_stratum_target *proc_target
3445 = current_inferior ()->process_target();
3446
3447 if (!target_is_non_stop_p ())
3448 first_not_non_stop = inf;
3449
3450 if (first_connection == nullptr)
3451 first_connection = proc_target;
3452 else if (first_connection != proc_target
3453 && first_not_non_stop != nullptr)
3454 {
3455 switch_to_inferior_no_thread (first_not_non_stop);
3456
3457 proc_target = current_inferior ()->process_target();
3458
3459 error (_("Connection %d (%s) does not support "
3460 "multi-target resumption."),
3461 proc_target->connection_number,
3462 make_target_connection_string (proc_target).c_str ());
3463 }
3464 }
3465 }
3466}
3467
e07d892c
MS
3468/* Helper function for `proceed`. Check if thread TP is suitable for
3469 resuming, and, if it is, switch to the thread and call
3470 `keep_going_pass_signal`. If TP is not suitable for resuming then this
3471 function will just return without switching threads. */
3472
3473static void
3474proceed_resume_thread_checked (thread_info *tp)
3475{
3476 if (!tp->inf->has_execution ())
3477 {
3478 infrun_debug_printf ("[%s] target has no execution",
3479 tp->ptid.to_string ().c_str ());
3480 return;
3481 }
3482
3483 if (tp->resumed ())
3484 {
3485 infrun_debug_printf ("[%s] resumed",
3486 tp->ptid.to_string ().c_str ());
3487 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
3488 return;
3489 }
3490
3491 if (thread_is_in_step_over_chain (tp))
3492 {
3493 infrun_debug_printf ("[%s] needs step-over",
3494 tp->ptid.to_string ().c_str ());
3495 return;
3496 }
3497
3498 /* When handling a vfork GDB removes all breakpoints from the program
b1e0126e
AB
3499 space in which the vfork is being handled. If we are following the
3500 parent then GDB will set the thread_waiting_for_vfork_done member of
3501 the parent inferior. In this case we should take care to only resume
3502 the vfork parent thread, the kernel will hold this thread suspended
3503 until the vfork child has exited or execd, at which point the parent
3504 will be resumed and a VFORK_DONE event sent to GDB. */
e07d892c
MS
3505 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
3506 {
3507 if (target_is_non_stop_p ())
3508 {
3509 /* For non-stop targets, regardless of whether GDB is using
3510 all-stop or non-stop mode, threads are controlled
3511 individually.
3512
3513 When a thread is handling a vfork, breakpoints are removed
3514 from the inferior (well, program space in fact), so it is
3515 critical that we don't try to resume any thread other than the
3516 vfork parent. */
3517 if (tp != tp->inf->thread_waiting_for_vfork_done)
3518 {
3519 infrun_debug_printf ("[%s] thread %s of this inferior is "
3520 "waiting for vfork-done",
3521 tp->ptid.to_string ().c_str (),
3522 tp->inf->thread_waiting_for_vfork_done
3523 ->ptid.to_string ().c_str ());
3524 return;
3525 }
3526 }
3527 else
3528 {
3529 /* For all-stop targets, when we attempt to resume the inferior,
3530 we will only resume the vfork parent thread, this is handled
3531 in internal_resume_ptid.
3532
3533 Additionally, we will always be called with the vfork parent
3534 thread as the current thread (TP) thanks to follow_fork, as
3535 such the following assertion should hold.
3536
3537 Beyond this there is nothing more that needs to be done
3538 here. */
3539 gdb_assert (tp == tp->inf->thread_waiting_for_vfork_done);
3540 }
3541 }
3542
b1e0126e
AB
3543 /* When handling a vfork GDB removes all breakpoints from the program
3544 space in which the vfork is being handled. If we are following the
3545 child then GDB will set vfork_child member of the vfork parent
3546 inferior. Once the child has either exited or execd then GDB will
3547 detach from the parent process. Until that point GDB should not
3548 resume any thread in the parent process. */
3549 if (tp->inf->vfork_child != nullptr)
3550 {
3551 infrun_debug_printf ("[%s] thread is part of a vfork parent, child is %d",
3552 tp->ptid.to_string ().c_str (),
3553 tp->inf->vfork_child->pid);
3554 return;
3555 }
3556
e07d892c
MS
3557 infrun_debug_printf ("resuming %s",
3558 tp->ptid.to_string ().c_str ());
3559
3560 execution_control_state ecs (tp);
3561 switch_to_thread (tp);
3562 keep_going_pass_signal (&ecs);
3563 if (!ecs.wait_some_more)
3564 error (_("Command aborted."));
3565}
3566
c906108c
SS
3567/* Basic routine for continuing the program in various fashions.
3568
3569 ADDR is the address to resume at, or -1 for resume where stopped.
aff4e175
AB
3570 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3571 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
c906108c
SS
3572
3573 You should call clear_proceed_status before calling proceed. */
3574
3575void
64ce06e4 3576proceed (CORE_ADDR addr, enum gdb_signal siggnal)
c906108c 3577{
3ec3145c
SM
3578 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
3579
e58b0e63 3580 struct gdbarch *gdbarch;
e58b0e63 3581 CORE_ADDR pc;
c906108c 3582
05e1cac2
AB
3583 /* If we're stopped at a fork/vfork, switch to either the parent or child
3584 thread as defined by the "set follow-fork-mode" command, or, if both
3585 the parent and child are controlled by GDB, and schedule-multiple is
3586 on, follow the child. If none of the above apply then we just proceed
e58b0e63
PA
3587 resuming the current thread. */
3588 if (!follow_fork ())
3589 {
3590 /* The target for some reason decided not to resume. */
3591 normal_stop ();
f148b27e 3592 if (target_can_async_p ())
b1a35af2 3593 inferior_event_handler (INF_EXEC_COMPLETE);
e58b0e63
PA
3594 return;
3595 }
3596
842951eb 3597 /* We'll update this if & when we switch to a new thread. */
a81871f7 3598 update_previous_thread ();
842951eb 3599
08036331 3600 thread_info *cur_thr = inferior_thread ();
b26b06dd
AB
3601 infrun_debug_printf ("cur_thr = %s", cur_thr->ptid.to_string ().c_str ());
3602
9c742269
SM
3603 regcache *regcache = get_thread_regcache (cur_thr);
3604 gdbarch = regcache->arch ();
3605 pc = regcache_read_pc_protected (regcache);
3606
99619bea 3607 /* Fill in with reasonable starting values. */
08036331 3608 init_thread_stepping_state (cur_thr);
99619bea 3609
08036331 3610 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
c2829269 3611
5b6d1e4f
PA
3612 ptid_t resume_ptid
3613 = user_visible_resume_ptid (cur_thr->control.stepping_command);
3614 process_stratum_target *resume_target
3615 = user_visible_resume_target (resume_ptid);
3616
2f4fcf00
PA
3617 check_multi_target_resumption (resume_target);
3618
2acceee2 3619 if (addr == (CORE_ADDR) -1)
c906108c 3620 {
f9582a22 3621 const address_space *aspace = cur_thr->inf->aspace.get ();
74387712 3622
351031f2
AB
3623 if (cur_thr->stop_pc_p ()
3624 && pc == cur_thr->stop_pc ()
af48d08f 3625 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
b2175913 3626 && execution_direction != EXEC_REVERSE)
3352ef37
AC
3627 /* There is a breakpoint at the address we will resume at,
3628 step one instruction before inserting breakpoints so that
3629 we do not stop right away (and report a second hit at this
b2175913
MS
3630 breakpoint).
3631
3632 Note, we don't do this in reverse, because we won't
3633 actually be executing the breakpoint insn anyway.
3634 We'll be (un-)executing the previous instruction. */
08036331 3635 cur_thr->stepping_over_breakpoint = 1;
515630c5
UW
3636 else if (gdbarch_single_step_through_delay_p (gdbarch)
3637 && gdbarch_single_step_through_delay (gdbarch,
3638 get_current_frame ()))
3352ef37
AC
3639 /* We stepped onto an instruction that needs to be stepped
3640 again before re-inserting the breakpoint, do so. */
08036331 3641 cur_thr->stepping_over_breakpoint = 1;
c906108c
SS
3642 }
3643 else
3644 {
515630c5 3645 regcache_write_pc (regcache, addr);
c906108c
SS
3646 }
3647
70509625 3648 if (siggnal != GDB_SIGNAL_DEFAULT)
1edb66d8 3649 cur_thr->set_stop_signal (siggnal);
70509625 3650
4d9d9d04
PA
3651 /* If an exception is thrown from this point on, make sure to
3652 propagate GDB's knowledge of the executing state to the
3653 frontend/user running state. */
5b6d1e4f 3654 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
4d9d9d04
PA
3655
3656 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3657 threads (e.g., we might need to set threads stepping over
3658 breakpoints first), from the user/frontend's point of view, all
3659 threads in RESUME_PTID are now running. Unless we're calling an
3660 inferior function, as in that case we pretend the inferior
3661 doesn't run at all. */
08036331 3662 if (!cur_thr->control.in_infcall)
719546c4 3663 set_running (resume_target, resume_ptid, true);
17b2616c 3664
b26b06dd
AB
3665 infrun_debug_printf ("addr=%s, signal=%s, resume_ptid=%s",
3666 paddress (gdbarch, addr),
3667 gdb_signal_to_symbol_string (siggnal),
3668 resume_ptid.to_string ().c_str ());
527159b7 3669
4d9d9d04
PA
3670 annotate_starting ();
3671
3672 /* Make sure that output from GDB appears before output from the
3673 inferior. */
3674 gdb_flush (gdb_stdout);
3675
d930703d
PA
3676 /* Since we've marked the inferior running, give it the terminal. A
3677 QUIT/Ctrl-C from here on is forwarded to the target (which can
3678 still detect attempts to unblock a stuck connection with repeated
3679 Ctrl-C from within target_pass_ctrlc). */
3680 target_terminal::inferior ();
3681
4d9d9d04
PA
3682 /* In a multi-threaded task we may select another thread and
3683 then continue or step.
3684
3685 But if a thread that we're resuming had stopped at a breakpoint,
3686 it will immediately cause another breakpoint stop without any
3687 execution (i.e. it will report a breakpoint hit incorrectly). So
3688 we must step over it first.
3689
3690 Look for threads other than the current (TP) that reported a
3691 breakpoint hit and haven't been resumed yet since. */
3692
3693 /* If scheduler locking applies, we can avoid iterating over all
3694 threads. */
08036331 3695 if (!non_stop && !schedlock_applies (cur_thr))
94cc34af 3696 {
5b6d1e4f
PA
3697 for (thread_info *tp : all_non_exited_threads (resume_target,
3698 resume_ptid))
08036331 3699 {
f3f8ece4
PA
3700 switch_to_thread_no_regs (tp);
3701
4d9d9d04
PA
3702 /* Ignore the current thread here. It's handled
3703 afterwards. */
08036331 3704 if (tp == cur_thr)
4d9d9d04 3705 continue;
c906108c 3706
4d9d9d04
PA
3707 if (!thread_still_needs_step_over (tp))
3708 continue;
3709
3710 gdb_assert (!thread_is_in_step_over_chain (tp));
c906108c 3711
1eb8556f 3712 infrun_debug_printf ("need to step-over [%s] first",
0fab7955 3713 tp->ptid.to_string ().c_str ());
99619bea 3714
28d5518b 3715 global_thread_step_over_chain_enqueue (tp);
2adfaa28 3716 }
f3f8ece4
PA
3717
3718 switch_to_thread (cur_thr);
30852783
UW
3719 }
3720
4d9d9d04
PA
3721 /* Enqueue the current thread last, so that we move all other
3722 threads over their breakpoints first. */
08036331 3723 if (cur_thr->stepping_over_breakpoint)
28d5518b 3724 global_thread_step_over_chain_enqueue (cur_thr);
30852783 3725
4d9d9d04
PA
3726 /* If the thread isn't started, we'll still need to set its prev_pc,
3727 so that switch_back_to_stepped_thread knows the thread hasn't
3728 advanced. Must do this before resuming any thread, as in
3729 all-stop/remote, once we resume we can't send any other packet
3730 until the target stops again. */
fc75c28b 3731 cur_thr->prev_pc = regcache_read_pc_protected (regcache);
99619bea 3732
a9bc57b9 3733 {
1192f124 3734 scoped_disable_commit_resumed disable_commit_resumed ("proceeding");
8bf10e2e 3735 bool step_over_started = start_step_over ();
c906108c 3736
a9bc57b9
TT
3737 if (step_over_info_valid_p ())
3738 {
3739 /* Either this thread started a new in-line step over, or some
3740 other thread was already doing one. In either case, don't
3741 resume anything else until the step-over is finished. */
3742 }
8bf10e2e 3743 else if (step_over_started && !target_is_non_stop_p ())
a9bc57b9
TT
3744 {
3745 /* A new displaced stepping sequence was started. In all-stop,
3746 we can't talk to the target anymore until it next stops. */
3747 }
3748 else if (!non_stop && target_is_non_stop_p ())
3749 {
3ec3145c
SM
3750 INFRUN_SCOPED_DEBUG_START_END
3751 ("resuming threads, all-stop-on-top-of-non-stop");
3752
a9bc57b9
TT
3753 /* In all-stop, but the target is always in non-stop mode.
3754 Start all other threads that are implicitly resumed too. */
5b6d1e4f
PA
3755 for (thread_info *tp : all_non_exited_threads (resume_target,
3756 resume_ptid))
3757 {
3758 switch_to_thread_no_regs (tp);
e07d892c 3759 proceed_resume_thread_checked (tp);
d5f5a83a 3760 }
a9bc57b9 3761 }
e07d892c
MS
3762 else
3763 proceed_resume_thread_checked (cur_thr);
c906108c 3764
1192f124
SM
3765 disable_commit_resumed.reset_and_commit ();
3766 }
85ad3aaf 3767
731f534f 3768 finish_state.release ();
c906108c 3769
873657b9
PA
3770 /* If we've switched threads above, switch back to the previously
3771 current thread. We don't want the user to see a different
3772 selected thread. */
3773 switch_to_thread (cur_thr);
3774
0b333c5e
PA
3775 /* Tell the event loop to wait for it to stop. If the target
3776 supports asynchronous execution, it'll do this from within
3777 target_resume. */
362646f5 3778 if (!target_can_async_p ())
0b333c5e 3779 mark_async_event_handler (infrun_async_inferior_event_token);
c906108c 3780}
c906108c
SS
3781\f
3782
3783/* Start remote-debugging of a machine over a serial link. */
96baa820 3784
c906108c 3785void
8621d6a9 3786start_remote (int from_tty)
c906108c 3787{
5b6d1e4f
PA
3788 inferior *inf = current_inferior ();
3789 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
43ff13b4 3790
1777feb0 3791 /* Always go on waiting for the target, regardless of the mode. */
6426a772 3792 /* FIXME: cagney/1999-09-23: At present it isn't possible to
7e73cedf 3793 indicate to wait_for_inferior that a target should timeout if
6426a772
JM
3794 nothing is returned (instead of just blocking). Because of this,
3795 targets expecting an immediate response need to, internally, set
3796 things up so that the target_wait() is forced to eventually
1777feb0 3797 timeout. */
6426a772
JM
3798 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3799 differentiate to its caller what the state of the target is after
3800 the initial open has been performed. Here we're assuming that
3801 the target has stopped. It should be possible to eventually have
3802 target_open() return to the caller an indication that the target
3803 is currently running and GDB state should be set to the same as
1777feb0 3804 for an async run. */
5b6d1e4f 3805 wait_for_inferior (inf);
8621d6a9
DJ
3806
3807 /* Now that the inferior has stopped, do any bookkeeping like
3808 loading shared libraries. We want to do this before normal_stop,
3809 so that the displayed frame is up to date. */
a7aba266 3810 post_create_inferior (from_tty);
8621d6a9 3811
6426a772 3812 normal_stop ();
c906108c
SS
3813}
3814
3815/* Initialize static vars when a new inferior begins. */
3816
3817void
96baa820 3818init_wait_for_inferior (void)
c906108c
SS
3819{
3820 /* These are meaningless until the first time through wait_for_inferior. */
c906108c 3821
c906108c
SS
3822 breakpoint_init_inferior (inf_starting);
3823
70509625 3824 clear_proceed_status (0);
9f976b41 3825
ab1ddbcf 3826 nullify_last_target_wait_ptid ();
237fc4c9 3827
a81871f7 3828 update_previous_thread ();
c906108c 3829}
237fc4c9 3830
c906108c 3831\f
488f131b 3832
ec9499be 3833static void handle_inferior_event (struct execution_control_state *ecs);
cd0fc7c3 3834
568d6575
UW
3835static void handle_step_into_function (struct gdbarch *gdbarch,
3836 struct execution_control_state *ecs);
3837static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3838 struct execution_control_state *ecs);
4f5d7f63 3839static void handle_signal_stop (struct execution_control_state *ecs);
186c406b 3840static void check_exception_resume (struct execution_control_state *,
9efe17a3 3841 frame_info_ptr);
611c83ae 3842
bdc36728 3843static void end_stepping_range (struct execution_control_state *ecs);
22bcd14b 3844static void stop_waiting (struct execution_control_state *ecs);
d4f3574e 3845static void keep_going (struct execution_control_state *ecs);
94c57d6a 3846static void process_event_stop_test (struct execution_control_state *ecs);
c4464ade 3847static bool switch_back_to_stepped_thread (struct execution_control_state *ecs);
104c1213 3848
252fbfc8
PA
3849/* This function is attached as a "thread_stop_requested" observer.
3850 Cleanup local state that assumed the PTID was to be resumed, and
3851 report the stop to the frontend. */
3852
2c0b251b 3853static void
252fbfc8
PA
3854infrun_thread_stop_requested (ptid_t ptid)
3855{
5b6d1e4f
PA
3856 process_stratum_target *curr_target = current_inferior ()->process_target ();
3857
c65d6b55
PA
3858 /* PTID was requested to stop. If the thread was already stopped,
3859 but the user/frontend doesn't know about that yet (e.g., the
3860 thread had been temporarily paused for some step-over), set up
3861 for reporting the stop now. */
5b6d1e4f 3862 for (thread_info *tp : all_threads (curr_target, ptid))
08036331
PA
3863 {
3864 if (tp->state != THREAD_RUNNING)
3865 continue;
611841bb 3866 if (tp->executing ())
08036331 3867 continue;
c65d6b55 3868
08036331
PA
3869 /* Remove matching threads from the step-over queue, so
3870 start_step_over doesn't try to resume them
3871 automatically. */
3872 if (thread_is_in_step_over_chain (tp))
28d5518b 3873 global_thread_step_over_chain_remove (tp);
c65d6b55 3874
08036331
PA
3875 /* If the thread is stopped, but the user/frontend doesn't
3876 know about that yet, queue a pending event, as if the
3877 thread had just stopped now. Unless the thread already had
3878 a pending event. */
1edb66d8 3879 if (!tp->has_pending_waitstatus ())
08036331 3880 {
1edb66d8 3881 target_waitstatus ws;
183be222 3882 ws.set_stopped (GDB_SIGNAL_0);
1edb66d8 3883 tp->set_pending_waitstatus (ws);
08036331 3884 }
c65d6b55 3885
08036331
PA
3886 /* Clear the inline-frame state, since we're re-processing the
3887 stop. */
5b6d1e4f 3888 clear_inline_frame_state (tp);
c65d6b55 3889
08036331
PA
3890 /* If this thread was paused because some other thread was
3891 doing an inline-step over, let that finish first. Once
3892 that happens, we'll restart all threads and consume pending
3893 stop events then. */
3894 if (step_over_info_valid_p ())
3895 continue;
3896
3897 /* Otherwise we can process the (new) pending event now. Set
3898 it so this pending event is considered by
3899 do_target_wait. */
7846f3aa 3900 tp->set_resumed (true);
08036331 3901 }
252fbfc8
PA
3902}
3903
0cbcdb96
PA
3904/* Delete the step resume, single-step and longjmp/exception resume
3905 breakpoints of TP. */
4e1c45ea 3906
0cbcdb96
PA
3907static void
3908delete_thread_infrun_breakpoints (struct thread_info *tp)
4e1c45ea 3909{
0cbcdb96
PA
3910 delete_step_resume_breakpoint (tp);
3911 delete_exception_resume_breakpoint (tp);
34b7e8a6 3912 delete_single_step_breakpoints (tp);
4e1c45ea
PA
3913}
3914
0cbcdb96
PA
3915/* If the target still has execution, call FUNC for each thread that
3916 just stopped. In all-stop, that's all the non-exited threads; in
3917 non-stop, that's the current thread, only. */
3918
3919typedef void (*for_each_just_stopped_thread_callback_func)
3920 (struct thread_info *tp);
4e1c45ea
PA
3921
3922static void
0cbcdb96 3923for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
4e1c45ea 3924{
55f6301a 3925 if (!target_has_execution () || inferior_ptid == null_ptid)
4e1c45ea
PA
3926 return;
3927
fbea99ea 3928 if (target_is_non_stop_p ())
4e1c45ea 3929 {
0cbcdb96
PA
3930 /* If in non-stop mode, only the current thread stopped. */
3931 func (inferior_thread ());
4e1c45ea
PA
3932 }
3933 else
0cbcdb96 3934 {
0cbcdb96 3935 /* In all-stop mode, all threads have stopped. */
08036331
PA
3936 for (thread_info *tp : all_non_exited_threads ())
3937 func (tp);
0cbcdb96
PA
3938 }
3939}
3940
3941/* Delete the step resume and longjmp/exception resume breakpoints of
3942 the threads that just stopped. */
3943
3944static void
3945delete_just_stopped_threads_infrun_breakpoints (void)
3946{
3947 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
34b7e8a6
PA
3948}
3949
3950/* Delete the single-step breakpoints of the threads that just
3951 stopped. */
7c16b83e 3952
34b7e8a6
PA
3953static void
3954delete_just_stopped_threads_single_step_breakpoints (void)
3955{
3956 for_each_just_stopped_thread (delete_single_step_breakpoints);
4e1c45ea
PA
3957}
3958
221e1a37 3959/* See infrun.h. */
223698f8 3960
221e1a37 3961void
223698f8 3962print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
c272a98c 3963 const struct target_waitstatus &ws)
223698f8 3964{
17e971f7
SM
3965 infrun_debug_printf ("target_wait (%s [%s], status) =",
3966 waiton_ptid.to_string ().c_str (),
e71daf80 3967 target_pid_to_str (waiton_ptid).c_str ());
17e971f7
SM
3968 infrun_debug_printf (" %s [%s],",
3969 result_ptid.to_string ().c_str (),
e71daf80 3970 target_pid_to_str (result_ptid).c_str ());
c272a98c 3971 infrun_debug_printf (" %s", ws.to_string ().c_str ());
223698f8
DE
3972}
3973
372316f1
PA
3974/* Select a thread at random, out of those which are resumed and have
3975 had events. */
3976
3977static struct thread_info *
5b6d1e4f 3978random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
372316f1 3979{
71a23490
SM
3980 process_stratum_target *proc_target = inf->process_target ();
3981 thread_info *thread
3982 = proc_target->random_resumed_with_pending_wait_status (inf, waiton_ptid);
08036331 3983
71a23490 3984 if (thread == nullptr)
08036331 3985 {
71a23490
SM
3986 infrun_debug_printf ("None found.");
3987 return nullptr;
3988 }
372316f1 3989
0fab7955 3990 infrun_debug_printf ("Found %s.", thread->ptid.to_string ().c_str ());
71a23490
SM
3991 gdb_assert (thread->resumed ());
3992 gdb_assert (thread->has_pending_waitstatus ());
372316f1 3993
71a23490 3994 return thread;
372316f1
PA
3995}
3996
3997/* Wrapper for target_wait that first checks whether threads have
3998 pending statuses to report before actually asking the target for
5b6d1e4f
PA
3999 more events. INF is the inferior we're using to call target_wait
4000 on. */
372316f1
PA
4001
4002static ptid_t
5b6d1e4f 4003do_target_wait_1 (inferior *inf, ptid_t ptid,
b60cea74 4004 target_waitstatus *status, target_wait_flags options)
372316f1 4005{
372316f1
PA
4006 struct thread_info *tp;
4007
24ed6739
AB
4008 /* We know that we are looking for an event in the target of inferior
4009 INF, but we don't know which thread the event might come from. As
4010 such we want to make sure that INFERIOR_PTID is reset so that none of
4011 the wait code relies on it - doing so is always a mistake. */
4012 switch_to_inferior_no_thread (inf);
4013
372316f1
PA
4014 /* First check if there is a resumed thread with a wait status
4015 pending. */
d7e15655 4016 if (ptid == minus_one_ptid || ptid.is_pid ())
372316f1 4017 {
5b6d1e4f 4018 tp = random_pending_event_thread (inf, ptid);
372316f1
PA
4019 }
4020 else
4021 {
1eb8556f 4022 infrun_debug_printf ("Waiting for specific thread %s.",
0fab7955 4023 ptid.to_string ().c_str ());
372316f1
PA
4024
4025 /* We have a specific thread to check. */
3c8af02f 4026 tp = inf->find_thread (ptid);
03acd4d8 4027 gdb_assert (tp != nullptr);
1edb66d8 4028 if (!tp->has_pending_waitstatus ())
03acd4d8 4029 tp = nullptr;
372316f1
PA
4030 }
4031
03acd4d8 4032 if (tp != nullptr
1edb66d8
SM
4033 && (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4034 || tp->stop_reason () == TARGET_STOPPED_BY_HW_BREAKPOINT))
372316f1 4035 {
00431a78 4036 struct regcache *regcache = get_thread_regcache (tp);
ac7936df 4037 struct gdbarch *gdbarch = regcache->arch ();
372316f1
PA
4038 CORE_ADDR pc;
4039 int discard = 0;
4040
4041 pc = regcache_read_pc (regcache);
4042
1edb66d8 4043 if (pc != tp->stop_pc ())
372316f1 4044 {
1eb8556f 4045 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
0fab7955 4046 tp->ptid.to_string ().c_str (),
1edb66d8 4047 paddress (gdbarch, tp->stop_pc ()),
1eb8556f 4048 paddress (gdbarch, pc));
372316f1
PA
4049 discard = 1;
4050 }
f9582a22 4051 else if (!breakpoint_inserted_here_p (tp->inf->aspace.get (), pc))
372316f1 4052 {
1eb8556f 4053 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
0fab7955 4054 tp->ptid.to_string ().c_str (),
1eb8556f 4055 paddress (gdbarch, pc));
372316f1
PA
4056
4057 discard = 1;
4058 }
4059
4060 if (discard)
4061 {
1eb8556f 4062 infrun_debug_printf ("pending event of %s cancelled.",
0fab7955 4063 tp->ptid.to_string ().c_str ());
372316f1 4064
1edb66d8
SM
4065 tp->clear_pending_waitstatus ();
4066 target_waitstatus ws;
183be222 4067 ws.set_spurious ();
1edb66d8
SM
4068 tp->set_pending_waitstatus (ws);
4069 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
372316f1
PA
4070 }
4071 }
4072
03acd4d8 4073 if (tp != nullptr)
372316f1 4074 {
1eb8556f 4075 infrun_debug_printf ("Using pending wait status %s for %s.",
7dca2ea7 4076 tp->pending_waitstatus ().to_string ().c_str (),
0fab7955 4077 tp->ptid.to_string ().c_str ());
372316f1
PA
4078
4079 /* Now that we've selected our final event LWP, un-adjust its PC
4080 if it was a software breakpoint (and the target doesn't
4081 always adjust the PC itself). */
1edb66d8 4082 if (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
372316f1
PA
4083 && !target_supports_stopped_by_sw_breakpoint ())
4084 {
4085 struct regcache *regcache;
4086 struct gdbarch *gdbarch;
4087 int decr_pc;
4088
00431a78 4089 regcache = get_thread_regcache (tp);
ac7936df 4090 gdbarch = regcache->arch ();
372316f1
PA
4091
4092 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4093 if (decr_pc != 0)
4094 {
4095 CORE_ADDR pc;
4096
4097 pc = regcache_read_pc (regcache);
4098 regcache_write_pc (regcache, pc + decr_pc);
4099 }
4100 }
4101
1edb66d8
SM
4102 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
4103 *status = tp->pending_waitstatus ();
4104 tp->clear_pending_waitstatus ();
372316f1
PA
4105
4106 /* Wake up the event loop again, until all pending events are
4107 processed. */
4108 if (target_is_async_p ())
4109 mark_async_event_handler (infrun_async_inferior_event_token);
4110 return tp->ptid;
4111 }
4112
4113 /* But if we don't find one, we'll have to wait. */
4114
d3a07122
SM
4115 /* We can't ask a non-async target to do a non-blocking wait, so this will be
4116 a blocking wait. */
71247709 4117 if (!target_can_async_p ())
d3a07122
SM
4118 options &= ~TARGET_WNOHANG;
4119
fb85cece 4120 return target_wait (ptid, status, options);
372316f1
PA
4121}
4122
5b6d1e4f
PA
4123/* Wrapper for target_wait that first checks whether threads have
4124 pending statuses to report before actually asking the target for
b3e3a4c1 4125 more events. Polls for events from all inferiors/targets. */
5b6d1e4f
PA
4126
4127static bool
ac0d67ed 4128do_target_wait (execution_control_state *ecs, target_wait_flags options)
5b6d1e4f
PA
4129{
4130 int num_inferiors = 0;
4131 int random_selector;
4132
b3e3a4c1
SM
4133 /* For fairness, we pick the first inferior/target to poll at random
4134 out of all inferiors that may report events, and then continue
4135 polling the rest of the inferior list starting from that one in a
4136 circular fashion until the whole list is polled once. */
5b6d1e4f 4137
ac0d67ed 4138 auto inferior_matches = [] (inferior *inf)
5b6d1e4f 4139 {
ac0d67ed 4140 return inf->process_target () != nullptr;
5b6d1e4f
PA
4141 };
4142
b3e3a4c1 4143 /* First see how many matching inferiors we have. */
5b6d1e4f
PA
4144 for (inferior *inf : all_inferiors ())
4145 if (inferior_matches (inf))
4146 num_inferiors++;
4147
4148 if (num_inferiors == 0)
4149 {
183be222 4150 ecs->ws.set_ignore ();
5b6d1e4f
PA
4151 return false;
4152 }
4153
b3e3a4c1 4154 /* Now randomly pick an inferior out of those that matched. */
5b6d1e4f
PA
4155 random_selector = (int)
4156 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
4157
1eb8556f
SM
4158 if (num_inferiors > 1)
4159 infrun_debug_printf ("Found %d inferiors, starting at #%d",
4160 num_inferiors, random_selector);
5b6d1e4f 4161
b3e3a4c1 4162 /* Select the Nth inferior that matched. */
5b6d1e4f
PA
4163
4164 inferior *selected = nullptr;
4165
4166 for (inferior *inf : all_inferiors ())
4167 if (inferior_matches (inf))
4168 if (random_selector-- == 0)
4169 {
4170 selected = inf;
4171 break;
4172 }
4173
b3e3a4c1 4174 /* Now poll for events out of each of the matching inferior's
5b6d1e4f
PA
4175 targets, starting from the selected one. */
4176
4177 auto do_wait = [&] (inferior *inf)
4178 {
ac0d67ed 4179 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, options);
5b6d1e4f 4180 ecs->target = inf->process_target ();
183be222 4181 return (ecs->ws.kind () != TARGET_WAITKIND_IGNORE);
5b6d1e4f
PA
4182 };
4183
b3e3a4c1
SM
4184 /* Needed in 'all-stop + target-non-stop' mode, because we end up
4185 here spuriously after the target is all stopped and we've already
5b6d1e4f
PA
4186 reported the stop to the user, polling for events. */
4187 scoped_restore_current_thread restore_thread;
4188
08bdefb5
PA
4189 intrusive_list_iterator<inferior> start
4190 = inferior_list.iterator_to (*selected);
4191
4192 for (intrusive_list_iterator<inferior> it = start;
4193 it != inferior_list.end ();
4194 ++it)
4195 {
4196 inferior *inf = &*it;
4197
4198 if (inferior_matches (inf) && do_wait (inf))
5b6d1e4f 4199 return true;
08bdefb5 4200 }
5b6d1e4f 4201
08bdefb5
PA
4202 for (intrusive_list_iterator<inferior> it = inferior_list.begin ();
4203 it != start;
4204 ++it)
4205 {
4206 inferior *inf = &*it;
4207
4208 if (inferior_matches (inf) && do_wait (inf))
5b6d1e4f 4209 return true;
08bdefb5 4210 }
5b6d1e4f 4211
183be222 4212 ecs->ws.set_ignore ();
5b6d1e4f
PA
4213 return false;
4214}
4215
8ff53139
PA
4216/* An event reported by wait_one. */
4217
4218struct wait_one_event
4219{
4220 /* The target the event came out of. */
4221 process_stratum_target *target;
4222
4223 /* The PTID the event was for. */
4224 ptid_t ptid;
4225
4226 /* The waitstatus. */
4227 target_waitstatus ws;
4228};
4229
4230static bool handle_one (const wait_one_event &event);
21d48304 4231static int finish_step_over (struct execution_control_state *ecs);
8ff53139 4232
24291992
PA
4233/* Prepare and stabilize the inferior for detaching it. E.g.,
4234 detaching while a thread is displaced stepping is a recipe for
4235 crashing it, as nothing would readjust the PC out of the scratch
4236 pad. */
4237
4238void
4239prepare_for_detach (void)
4240{
4241 struct inferior *inf = current_inferior ();
f2907e49 4242 ptid_t pid_ptid = ptid_t (inf->pid);
8ff53139 4243 scoped_restore_current_thread restore_thread;
24291992 4244
9bcb1f16 4245 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
24291992 4246
8ff53139
PA
4247 /* Remove all threads of INF from the global step-over chain. We
4248 want to stop any ongoing step-over, not start any new one. */
8b6a69b2
SM
4249 thread_step_over_list_safe_range range
4250 = make_thread_step_over_list_safe_range (global_thread_step_over_list);
4251
4252 for (thread_info *tp : range)
4253 if (tp->inf == inf)
4254 {
4255 infrun_debug_printf ("removing thread %s from global step over chain",
0fab7955 4256 tp->ptid.to_string ().c_str ());
8ff53139 4257 global_thread_step_over_chain_remove (tp);
8b6a69b2 4258 }
24291992 4259
ac7d717c
PA
4260 /* If we were already in the middle of an inline step-over, and the
4261 thread stepping belongs to the inferior we're detaching, we need
4262 to restart the threads of other inferiors. */
4263 if (step_over_info.thread != -1)
4264 {
4265 infrun_debug_printf ("inline step-over in-process while detaching");
4266
4267 thread_info *thr = find_thread_global_id (step_over_info.thread);
4268 if (thr->inf == inf)
4269 {
4270 /* Since we removed threads of INF from the step-over chain,
4271 we know this won't start a step-over for INF. */
4272 clear_step_over_info ();
4273
4274 if (target_is_non_stop_p ())
4275 {
4276 /* Start a new step-over in another thread if there's
4277 one that needs it. */
4278 start_step_over ();
4279
4280 /* Restart all other threads (except the
4281 previously-stepping thread, since that one is still
4282 running). */
4283 if (!step_over_info_valid_p ())
4284 restart_threads (thr);
4285 }
4286 }
4287 }
4288
8ff53139
PA
4289 if (displaced_step_in_progress (inf))
4290 {
4291 infrun_debug_printf ("displaced-stepping in-process while detaching");
24291992 4292
8ff53139 4293 /* Stop threads currently displaced stepping, aborting it. */
24291992 4294
8ff53139
PA
4295 for (thread_info *thr : inf->non_exited_threads ())
4296 {
4297 if (thr->displaced_step_state.in_progress ())
4298 {
611841bb 4299 if (thr->executing ())
8ff53139
PA
4300 {
4301 if (!thr->stop_requested)
4302 {
4303 target_stop (thr->ptid);
4304 thr->stop_requested = true;
4305 }
4306 }
4307 else
7846f3aa 4308 thr->set_resumed (false);
8ff53139
PA
4309 }
4310 }
24291992 4311
8ff53139
PA
4312 while (displaced_step_in_progress (inf))
4313 {
4314 wait_one_event event;
24291992 4315
8ff53139
PA
4316 event.target = inf->process_target ();
4317 event.ptid = do_target_wait_1 (inf, pid_ptid, &event.ws, 0);
24291992 4318
8ff53139 4319 if (debug_infrun)
c272a98c 4320 print_target_wait_results (pid_ptid, event.ptid, event.ws);
24291992 4321
8ff53139
PA
4322 handle_one (event);
4323 }
24291992 4324
8ff53139
PA
4325 /* It's OK to leave some of the threads of INF stopped, since
4326 they'll be detached shortly. */
24291992 4327 }
24291992
PA
4328}
4329
e0c01ce6
PA
4330/* If all-stop, but there exists a non-stop target, stop all threads
4331 now that we're presenting the stop to the user. */
4332
4333static void
4334stop_all_threads_if_all_stop_mode ()
4335{
4336 if (!non_stop && exists_non_stop_target ())
4337 stop_all_threads ("presenting stop to user in all-stop");
4338}
4339
cd0fc7c3 4340/* Wait for control to return from inferior to debugger.
ae123ec6 4341
cd0fc7c3
SS
4342 If inferior gets a signal, we may decide to start it up again
4343 instead of returning. That is why there is a loop in this function.
4344 When this function actually returns it means the inferior
4345 should be left stopped and GDB should read more commands. */
4346
5b6d1e4f
PA
4347static void
4348wait_for_inferior (inferior *inf)
cd0fc7c3 4349{
1eb8556f 4350 infrun_debug_printf ("wait_for_inferior ()");
527159b7 4351
4c41382a 4352 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
cd0fc7c3 4353
e6f5c25b
PA
4354 /* If an error happens while handling the event, propagate GDB's
4355 knowledge of the executing state to the frontend/user running
4356 state. */
5b6d1e4f
PA
4357 scoped_finish_thread_state finish_state
4358 (inf->process_target (), minus_one_ptid);
e6f5c25b 4359
c906108c
SS
4360 while (1)
4361 {
aa563d16 4362 execution_control_state ecs;
29f49a6a 4363
ec9499be 4364 overlay_cache_invalid = 1;
ec9499be 4365
f15cb84a
YQ
4366 /* Flush target cache before starting to handle each event.
4367 Target was running and cache could be stale. This is just a
4368 heuristic. Running threads may modify target memory, but we
4369 don't get any event. */
41336620 4370 target_dcache_invalidate (current_program_space->aspace);
f15cb84a 4371
aa563d16
TT
4372 ecs.ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs.ws, 0);
4373 ecs.target = inf->process_target ();
c906108c 4374
f00150c9 4375 if (debug_infrun)
aa563d16 4376 print_target_wait_results (minus_one_ptid, ecs.ptid, ecs.ws);
f00150c9 4377
cd0fc7c3 4378 /* Now figure out what to do with the result of the result. */
aa563d16 4379 handle_inferior_event (&ecs);
c906108c 4380
aa563d16 4381 if (!ecs.wait_some_more)
cd0fc7c3
SS
4382 break;
4383 }
4e1c45ea 4384
e0c01ce6
PA
4385 stop_all_threads_if_all_stop_mode ();
4386
e6f5c25b 4387 /* No error, don't finish the state yet. */
731f534f 4388 finish_state.release ();
cd0fc7c3 4389}
c906108c 4390
d3d4baed
PA
4391/* Cleanup that reinstalls the readline callback handler, if the
4392 target is running in the background. If while handling the target
4393 event something triggered a secondary prompt, like e.g., a
4394 pagination prompt, we'll have removed the callback handler (see
4395 gdb_readline_wrapper_line). Need to do this as we go back to the
4396 event loop, ready to process further input. Note this has no
4397 effect if the handler hasn't actually been removed, because calling
4398 rl_callback_handler_install resets the line buffer, thus losing
4399 input. */
4400
4401static void
d238133d 4402reinstall_readline_callback_handler_cleanup ()
d3d4baed 4403{
3b12939d
PA
4404 struct ui *ui = current_ui;
4405
4406 if (!ui->async)
6c400b59
PA
4407 {
4408 /* We're not going back to the top level event loop yet. Don't
4409 install the readline callback, as it'd prep the terminal,
4410 readline-style (raw, noecho) (e.g., --batch). We'll install
4411 it the next time the prompt is displayed, when we're ready
4412 for input. */
4413 return;
4414 }
4415
3b12939d 4416 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
d3d4baed
PA
4417 gdb_rl_callback_handler_reinstall ();
4418}
4419
243a9253 4420/* Clean up the FSMs of threads that are now stopped. In non-stop,
7730e5c6
PA
4421 that's just the event thread. In all-stop, that's all threads. In
4422 all-stop, threads that had a pending exit no longer have a reason
4423 to be around, as their FSMs/commands are canceled, so we delete
4424 them. This avoids "info threads" listing such threads as if they
4425 were alive (and failing to read their registers), the user being
4426 able to select and resume them (and that failing), etc. */
243a9253
PA
4427
4428static void
4429clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
4430{
22517040
SM
4431 /* The first clean_up call below assumes the event thread is the current
4432 one. */
4433 if (ecs->event_thread != nullptr)
4434 gdb_assert (ecs->event_thread == inferior_thread ());
4435
573269a8
LS
4436 if (ecs->event_thread != nullptr
4437 && ecs->event_thread->thread_fsm () != nullptr)
4438 ecs->event_thread->thread_fsm ()->clean_up (ecs->event_thread);
243a9253
PA
4439
4440 if (!non_stop)
4441 {
22517040
SM
4442 scoped_restore_current_thread restore_thread;
4443
7730e5c6 4444 for (thread_info *thr : all_threads_safe ())
dda83cd7 4445 {
7730e5c6 4446 if (thr->state == THREAD_EXITED)
243a9253 4447 continue;
7730e5c6 4448
243a9253
PA
4449 if (thr == ecs->event_thread)
4450 continue;
4451
7730e5c6
PA
4452 if (thr->thread_fsm () != nullptr)
4453 {
4454 switch_to_thread (thr);
4455 thr->thread_fsm ()->clean_up (thr);
4456 }
4457
4458 /* As we are cancelling the command/FSM of this thread,
4459 whatever was the reason we needed to report a thread
4460 exited event to the user, that reason is gone. Delete
4461 the thread, so that the user doesn't see it in the thread
4462 list, the next proceed doesn't try to resume it, etc. */
4463 if (thr->has_pending_waitstatus ()
4464 && (thr->pending_waitstatus ().kind ()
4465 == TARGET_WAITKIND_THREAD_EXITED))
4466 delete_thread (thr);
243a9253 4467 }
243a9253
PA
4468 }
4469}
4470
3b12939d
PA
4471/* Helper for all_uis_check_sync_execution_done that works on the
4472 current UI. */
4473
4474static void
4475check_curr_ui_sync_execution_done (void)
4476{
4477 struct ui *ui = current_ui;
4478
4479 if (ui->prompt_state == PROMPT_NEEDED
4480 && ui->async
4481 && !gdb_in_secondary_prompt_p (ui))
4482 {
223ffa71 4483 target_terminal::ours ();
c3d321de 4484 top_level_interpreter ()->on_sync_execution_done ();
8f7f9b3a 4485 ui->register_file_handler ();
3b12939d
PA
4486 }
4487}
4488
4489/* See infrun.h. */
4490
4491void
4492all_uis_check_sync_execution_done (void)
4493{
0e454242 4494 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
4495 {
4496 check_curr_ui_sync_execution_done ();
4497 }
4498}
4499
a8836c93
PA
4500/* See infrun.h. */
4501
4502void
4503all_uis_on_sync_execution_starting (void)
4504{
0e454242 4505 SWITCH_THRU_ALL_UIS ()
a8836c93
PA
4506 {
4507 if (current_ui->prompt_state == PROMPT_NEEDED)
4508 async_disable_stdin ();
4509 }
4510}
4511
0ace6ace
PA
4512/* A quit_handler callback installed while we're handling inferior
4513 events. */
4514
4515static void
4516infrun_quit_handler ()
4517{
4518 if (target_terminal::is_ours ())
4519 {
4520 /* Do nothing.
4521
4522 default_quit_handler would throw a quit in this case, but if
4523 we're handling an event while we have the terminal, it means
4524 the target is running a background execution command, and
4525 thus when users press Ctrl-C, they're wanting to interrupt
4526 whatever command they were executing in the command line.
4527 E.g.:
4528
4529 (gdb) c&
4530 (gdb) foo bar whatever<ctrl-c>
4531
4532 That Ctrl-C should clear the input line, not interrupt event
4533 handling if it happens that the user types Ctrl-C at just the
4534 "wrong" time!
4535
4536 It's as-if background event handling was handled by a
4537 separate background thread.
4538
4539 To be clear, the Ctrl-C is not lost -- it will be processed
4540 by the next QUIT call once we're out of fetch_inferior_event
4541 again. */
4542 }
4543 else
4544 {
4545 if (check_quit_flag ())
4546 target_pass_ctrlc ();
4547 }
4548}
4549
1777feb0 4550/* Asynchronous version of wait_for_inferior. It is called by the
43ff13b4 4551 event loop whenever a change of state is detected on the file
1777feb0
MS
4552 descriptor corresponding to the target. It can be called more than
4553 once to complete a single execution command. In such cases we need
4554 to keep the state in a global variable ECSS. If it is the last time
a474d7c2
PA
4555 that this function is called for a single execution command, then
4556 report to the user that the inferior has stopped, and do the
1777feb0 4557 necessary cleanups. */
43ff13b4
JM
4558
4559void
b1a35af2 4560fetch_inferior_event ()
43ff13b4 4561{
3ec3145c
SM
4562 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
4563
aa563d16 4564 execution_control_state ecs;
0f641c01 4565 int cmd_done = 0;
43ff13b4 4566
c61db772
PA
4567 /* Events are always processed with the main UI as current UI. This
4568 way, warnings, debug output, etc. are always consistently sent to
4569 the main console. */
4b6749b9 4570 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
c61db772 4571
b78b3a29
TBA
4572 /* Temporarily disable pagination. Otherwise, the user would be
4573 given an option to press 'q' to quit, which would cause an early
4574 exit and could leave GDB in a half-baked state. */
4575 scoped_restore save_pagination
4576 = make_scoped_restore (&pagination_enabled, false);
4577
0ace6ace
PA
4578 /* Install a quit handler that does nothing if we have the terminal
4579 (meaning the target is running a background execution command),
4580 so that Ctrl-C never interrupts GDB before the event is fully
4581 handled. */
4582 scoped_restore restore_quit_handler
4583 = make_scoped_restore (&quit_handler, infrun_quit_handler);
4584
141cd158
PA
4585 /* Make sure a SIGINT does not interrupt an extension language while
4586 we're handling an event. That could interrupt a Python unwinder
4587 or a Python observer or some such. A Ctrl-C should either be
4588 forwarded to the inferior if the inferior has the terminal, or,
4589 if GDB has the terminal, should interrupt the command the user is
4590 typing in the CLI. */
4591 scoped_disable_cooperative_sigint_handling restore_coop_sigint;
4592
d3d4baed 4593 /* End up with readline processing input, if necessary. */
d238133d
TT
4594 {
4595 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
4596
4597 /* We're handling a live event, so make sure we're doing live
4598 debugging. If we're looking at traceframes while the target is
4599 running, we're going to need to get back to that mode after
4600 handling the event. */
6b09f134 4601 std::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
d238133d
TT
4602 if (non_stop)
4603 {
4604 maybe_restore_traceframe.emplace ();
4605 set_current_traceframe (-1);
4606 }
43ff13b4 4607
873657b9
PA
4608 /* The user/frontend should not notice a thread switch due to
4609 internal events. Make sure we revert to the user selected
4610 thread and frame after handling the event and running any
4611 breakpoint commands. */
4612 scoped_restore_current_thread restore_thread;
d238133d
TT
4613
4614 overlay_cache_invalid = 1;
4615 /* Flush target cache before starting to handle each event. Target
4616 was running and cache could be stale. This is just a heuristic.
4617 Running threads may modify target memory, but we don't get any
4618 event. */
41336620 4619 target_dcache_invalidate (current_program_space->aspace);
d238133d
TT
4620
4621 scoped_restore save_exec_dir
4622 = make_scoped_restore (&execution_direction,
4623 target_execution_direction ());
4624
1192f124
SM
4625 /* Allow targets to pause their resumed threads while we handle
4626 the event. */
4627 scoped_disable_commit_resumed disable_commit_resumed ("handling event");
4628
aa563d16 4629 if (!do_target_wait (&ecs, TARGET_WNOHANG))
1192f124
SM
4630 {
4631 infrun_debug_printf ("do_target_wait returned no event");
4632 disable_commit_resumed.reset_and_commit ();
4633 return;
4634 }
5b6d1e4f 4635
aa563d16 4636 gdb_assert (ecs.ws.kind () != TARGET_WAITKIND_IGNORE);
5b6d1e4f 4637
9145fd43
SM
4638 /* Switch to the inferior that generated the event, so we can do
4639 target calls. If the event was not associated to a ptid, */
4640 if (ecs.ptid != null_ptid
4641 && ecs.ptid != minus_one_ptid)
4642 switch_to_inferior_no_thread (find_inferior_ptid (ecs.target, ecs.ptid));
4643 else
4644 switch_to_target_no_thread (ecs.target);
d238133d
TT
4645
4646 if (debug_infrun)
aa563d16 4647 print_target_wait_results (minus_one_ptid, ecs.ptid, ecs.ws);
d238133d
TT
4648
4649 /* If an error happens while handling the event, propagate GDB's
4650 knowledge of the executing state to the frontend/user running
4651 state. */
aa563d16
TT
4652 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs.ptid;
4653 scoped_finish_thread_state finish_state (ecs.target, finish_ptid);
d238133d 4654
979a0d13 4655 /* Get executed before scoped_restore_current_thread above to apply
d238133d
TT
4656 still for the thread which has thrown the exception. */
4657 auto defer_bpstat_clear
4658 = make_scope_exit (bpstat_clear_actions);
4659 auto defer_delete_threads
4660 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
4661
b1c0ab20
AB
4662 int stop_id = get_stop_id ();
4663
d238133d 4664 /* Now figure out what to do with the result of the result. */
aa563d16 4665 handle_inferior_event (&ecs);
d238133d 4666
aa563d16 4667 if (!ecs.wait_some_more)
d238133d 4668 {
aa563d16 4669 struct inferior *inf = find_inferior_ptid (ecs.target, ecs.ptid);
758cb810 4670 bool should_stop = true;
aa563d16 4671 struct thread_info *thr = ecs.event_thread;
d6b48e9c 4672
d238133d 4673 delete_just_stopped_threads_infrun_breakpoints ();
f107f563 4674
573269a8
LS
4675 if (thr != nullptr && thr->thread_fsm () != nullptr)
4676 should_stop = thr->thread_fsm ()->should_stop (thr);
243a9253 4677
d238133d
TT
4678 if (!should_stop)
4679 {
aa563d16 4680 keep_going (&ecs);
d238133d
TT
4681 }
4682 else
4683 {
46e3ed7f 4684 bool should_notify_stop = true;
8dd08de7 4685 bool proceeded = false;
1840d81a 4686
e0c01ce6
PA
4687 stop_all_threads_if_all_stop_mode ();
4688
aa563d16 4689 clean_up_just_stopped_threads_fsms (&ecs);
243a9253 4690
b1c0ab20
AB
4691 if (stop_id != get_stop_id ())
4692 {
4693 /* If the stop-id has changed then a stop has already been
4694 presented to the user in handle_inferior_event, this is
4695 likely a failed inferior call. As the stop has already
4696 been announced then we should not notify again.
4697
4698 Also, if the prompt state is not PROMPT_NEEDED then GDB
4699 will not be ready for user input after this function. */
4700 should_notify_stop = false;
4701 gdb_assert (current_ui->prompt_state == PROMPT_NEEDED);
4702 }
4703 else if (thr != nullptr && thr->thread_fsm () != nullptr)
573269a8
LS
4704 should_notify_stop
4705 = thr->thread_fsm ()->should_notify_stop ();
388a7084 4706
d238133d
TT
4707 if (should_notify_stop)
4708 {
4709 /* We may not find an inferior if this was a process exit. */
03acd4d8 4710 if (inf == nullptr || inf->control.stop_soon == NO_STOP_QUIETLY)
d238133d
TT
4711 proceeded = normal_stop ();
4712 }
243a9253 4713
d238133d
TT
4714 if (!proceeded)
4715 {
b1a35af2 4716 inferior_event_handler (INF_EXEC_COMPLETE);
d238133d
TT
4717 cmd_done = 1;
4718 }
873657b9
PA
4719
4720 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4721 previously selected thread is gone. We have two
4722 choices - switch to no thread selected, or restore the
4723 previously selected thread (now exited). We chose the
4724 later, just because that's what GDB used to do. After
4725 this, "info threads" says "The current thread <Thread
4726 ID 2> has terminated." instead of "No thread
4727 selected.". */
4728 if (!non_stop
4729 && cmd_done
aa563d16 4730 && ecs.ws.kind () != TARGET_WAITKIND_NO_RESUMED)
873657b9 4731 restore_thread.dont_restore ();
d238133d
TT
4732 }
4733 }
4f8d22e3 4734
d238133d
TT
4735 defer_delete_threads.release ();
4736 defer_bpstat_clear.release ();
29f49a6a 4737
d238133d
TT
4738 /* No error, don't finish the thread states yet. */
4739 finish_state.release ();
731f534f 4740
1192f124
SM
4741 disable_commit_resumed.reset_and_commit ();
4742
d238133d
TT
4743 /* This scope is used to ensure that readline callbacks are
4744 reinstalled here. */
4745 }
4f8d22e3 4746
152a1749
SM
4747 /* Handling this event might have caused some inferiors to become prunable.
4748 For example, the exit of an inferior that was automatically added. Try
4749 to get rid of them. Keeping those around slows down things linearly.
4750
4751 Note that this never removes the current inferior. Therefore, call this
4752 after RESTORE_THREAD went out of scope, in case the event inferior (which was
4753 temporarily made the current inferior) is meant to be deleted.
4754
4755 Call this before all_uis_check_sync_execution_done, so that notifications about
4756 removed inferiors appear before the prompt. */
4757 prune_inferiors ();
4758
3b12939d
PA
4759 /* If a UI was in sync execution mode, and now isn't, restore its
4760 prompt (a synchronous execution command has finished, and we're
4761 ready for input). */
4762 all_uis_check_sync_execution_done ();
0f641c01
PA
4763
4764 if (cmd_done
0f641c01 4765 && exec_done_display_p
00431a78
PA
4766 && (inferior_ptid == null_ptid
4767 || inferior_thread ()->state != THREAD_RUNNING))
6cb06a8c 4768 gdb_printf (_("completed.\n"));
43ff13b4
JM
4769}
4770
29734269
SM
4771/* See infrun.h. */
4772
edb3359d 4773void
bd2b40ac 4774set_step_info (thread_info *tp, frame_info_ptr frame,
29734269 4775 struct symtab_and_line sal)
edb3359d 4776{
29734269
SM
4777 /* This can be removed once this function no longer implicitly relies on the
4778 inferior_ptid value. */
4779 gdb_assert (inferior_ptid == tp->ptid);
edb3359d 4780
16c381f0
JK
4781 tp->control.step_frame_id = get_frame_id (frame);
4782 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
edb3359d
DJ
4783
4784 tp->current_symtab = sal.symtab;
4785 tp->current_line = sal.line;
c8353d68
AB
4786
4787 infrun_debug_printf
4788 ("symtab = %s, line = %d, step_frame_id = %s, step_stack_frame_id = %s",
b7e07722
PA
4789 tp->current_symtab != nullptr ? tp->current_symtab->filename : "<null>",
4790 tp->current_line,
c8353d68
AB
4791 tp->control.step_frame_id.to_string ().c_str (),
4792 tp->control.step_stack_frame_id.to_string ().c_str ());
edb3359d
DJ
4793}
4794
0d1e5fa7
PA
4795/* Clear context switchable stepping state. */
4796
4797void
4e1c45ea 4798init_thread_stepping_state (struct thread_info *tss)
0d1e5fa7 4799{
7f5ef605 4800 tss->stepped_breakpoint = 0;
0d1e5fa7 4801 tss->stepping_over_breakpoint = 0;
963f9c80 4802 tss->stepping_over_watchpoint = 0;
0d1e5fa7 4803 tss->step_after_step_resume_breakpoint = 0;
cd0fc7c3
SS
4804}
4805
ab1ddbcf 4806/* See infrun.h. */
c32c64b7 4807
6efcd9a8 4808void
5b6d1e4f 4809set_last_target_status (process_stratum_target *target, ptid_t ptid,
183be222 4810 const target_waitstatus &status)
c32c64b7 4811{
5b6d1e4f 4812 target_last_proc_target = target;
c32c64b7
DE
4813 target_last_wait_ptid = ptid;
4814 target_last_waitstatus = status;
4815}
4816
ab1ddbcf 4817/* See infrun.h. */
e02bc4cc
DS
4818
4819void
5b6d1e4f
PA
4820get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4821 target_waitstatus *status)
e02bc4cc 4822{
5b6d1e4f
PA
4823 if (target != nullptr)
4824 *target = target_last_proc_target;
ab1ddbcf
PA
4825 if (ptid != nullptr)
4826 *ptid = target_last_wait_ptid;
4827 if (status != nullptr)
4828 *status = target_last_waitstatus;
e02bc4cc
DS
4829}
4830
ab1ddbcf
PA
4831/* See infrun.h. */
4832
ac264b3b
MS
4833void
4834nullify_last_target_wait_ptid (void)
4835{
5b6d1e4f 4836 target_last_proc_target = nullptr;
ac264b3b 4837 target_last_wait_ptid = minus_one_ptid;
ab1ddbcf 4838 target_last_waitstatus = {};
ac264b3b
MS
4839}
4840
dcf4fbde 4841/* Switch thread contexts. */
dd80620e
MS
4842
4843static void
00431a78 4844context_switch (execution_control_state *ecs)
dd80620e 4845{
1eb8556f 4846 if (ecs->ptid != inferior_ptid
5b6d1e4f
PA
4847 && (inferior_ptid == null_ptid
4848 || ecs->event_thread != inferior_thread ()))
fd48f117 4849 {
1eb8556f 4850 infrun_debug_printf ("Switching context from %s to %s",
0fab7955
SM
4851 inferior_ptid.to_string ().c_str (),
4852 ecs->ptid.to_string ().c_str ());
fd48f117
DJ
4853 }
4854
00431a78 4855 switch_to_thread (ecs->event_thread);
dd80620e
MS
4856}
4857
d8dd4d5f
PA
4858/* If the target can't tell whether we've hit breakpoints
4859 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4860 check whether that could have been caused by a breakpoint. If so,
4861 adjust the PC, per gdbarch_decr_pc_after_break. */
4862
4fa8626c 4863static void
d8dd4d5f 4864adjust_pc_after_break (struct thread_info *thread,
c272a98c 4865 const target_waitstatus &ws)
4fa8626c 4866{
24a73cce
UW
4867 struct regcache *regcache;
4868 struct gdbarch *gdbarch;
118e6252 4869 CORE_ADDR breakpoint_pc, decr_pc;
4fa8626c 4870
4fa8626c
DJ
4871 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4872 we aren't, just return.
9709f61c
DJ
4873
4874 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
b798847d
UW
4875 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4876 implemented by software breakpoints should be handled through the normal
4877 breakpoint layer.
8fb3e588 4878
4fa8626c
DJ
4879 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4880 different signals (SIGILL or SIGEMT for instance), but it is less
4881 clear where the PC is pointing afterwards. It may not match
b798847d
UW
4882 gdbarch_decr_pc_after_break. I don't know any specific target that
4883 generates these signals at breakpoints (the code has been in GDB since at
4884 least 1992) so I can not guess how to handle them here.
8fb3e588 4885
e6cf7916
UW
4886 In earlier versions of GDB, a target with
4887 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
b798847d
UW
4888 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4889 target with both of these set in GDB history, and it seems unlikely to be
4890 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4fa8626c 4891
c272a98c 4892 if (ws.kind () != TARGET_WAITKIND_STOPPED)
4fa8626c
DJ
4893 return;
4894
c272a98c 4895 if (ws.sig () != GDB_SIGNAL_TRAP)
4fa8626c
DJ
4896 return;
4897
4058b839
PA
4898 /* In reverse execution, when a breakpoint is hit, the instruction
4899 under it has already been de-executed. The reported PC always
4900 points at the breakpoint address, so adjusting it further would
4901 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4902 architecture:
4903
4904 B1 0x08000000 : INSN1
4905 B2 0x08000001 : INSN2
4906 0x08000002 : INSN3
4907 PC -> 0x08000003 : INSN4
4908
4909 Say you're stopped at 0x08000003 as above. Reverse continuing
4910 from that point should hit B2 as below. Reading the PC when the
4911 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4912 been de-executed already.
4913
4914 B1 0x08000000 : INSN1
4915 B2 PC -> 0x08000001 : INSN2
4916 0x08000002 : INSN3
4917 0x08000003 : INSN4
4918
4919 We can't apply the same logic as for forward execution, because
4920 we would wrongly adjust the PC to 0x08000000, since there's a
4921 breakpoint at PC - 1. We'd then report a hit on B1, although
4922 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4923 behaviour. */
4924 if (execution_direction == EXEC_REVERSE)
4925 return;
4926
1cf4d951
PA
4927 /* If the target can tell whether the thread hit a SW breakpoint,
4928 trust it. Targets that can tell also adjust the PC
4929 themselves. */
4930 if (target_supports_stopped_by_sw_breakpoint ())
4931 return;
4932
4933 /* Note that relying on whether a breakpoint is planted in memory to
4934 determine this can fail. E.g,. the breakpoint could have been
4935 removed since. Or the thread could have been told to step an
4936 instruction the size of a breakpoint instruction, and only
4937 _after_ was a breakpoint inserted at its address. */
4938
24a73cce
UW
4939 /* If this target does not decrement the PC after breakpoints, then
4940 we have nothing to do. */
00431a78 4941 regcache = get_thread_regcache (thread);
ac7936df 4942 gdbarch = regcache->arch ();
118e6252 4943
527a273a 4944 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
118e6252 4945 if (decr_pc == 0)
24a73cce
UW
4946 return;
4947
f9582a22 4948 const address_space *aspace = thread->inf->aspace.get ();
6c95b8df 4949
8aad930b
AC
4950 /* Find the location where (if we've hit a breakpoint) the
4951 breakpoint would be. */
118e6252 4952 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
8aad930b 4953
1cf4d951
PA
4954 /* If the target can't tell whether a software breakpoint triggered,
4955 fallback to figuring it out based on breakpoints we think were
4956 inserted in the target, and on whether the thread was stepped or
4957 continued. */
4958
1c5cfe86
PA
4959 /* Check whether there actually is a software breakpoint inserted at
4960 that location.
4961
4962 If in non-stop mode, a race condition is possible where we've
4963 removed a breakpoint, but stop events for that breakpoint were
4964 already queued and arrive later. To suppress those spurious
4965 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
1cf4d951
PA
4966 and retire them after a number of stop events are reported. Note
4967 this is an heuristic and can thus get confused. The real fix is
4968 to get the "stopped by SW BP and needs adjustment" info out of
4969 the target/kernel (and thus never reach here; see above). */
6c95b8df 4970 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
fbea99ea
PA
4971 || (target_is_non_stop_p ()
4972 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
8aad930b 4973 {
6b09f134 4974 std::optional<scoped_restore_tmpl<int>> restore_operation_disable;
abbb1732 4975
8213266a 4976 if (record_full_is_used ())
07036511
TT
4977 restore_operation_disable.emplace
4978 (record_full_gdb_operation_disable_set ());
96429cc8 4979
1c0fdd0e
UW
4980 /* When using hardware single-step, a SIGTRAP is reported for both
4981 a completed single-step and a software breakpoint. Need to
4982 differentiate between the two, as the latter needs adjusting
4983 but the former does not.
4984
4985 The SIGTRAP can be due to a completed hardware single-step only if
4986 - we didn't insert software single-step breakpoints
1c0fdd0e
UW
4987 - this thread is currently being stepped
4988
4989 If any of these events did not occur, we must have stopped due
4990 to hitting a software breakpoint, and have to back up to the
4991 breakpoint address.
4992
4993 As a special case, we could have hardware single-stepped a
4994 software breakpoint. In this case (prev_pc == breakpoint_pc),
4995 we also need to back up to the breakpoint address. */
4996
d8dd4d5f
PA
4997 if (thread_has_single_step_breakpoints_set (thread)
4998 || !currently_stepping (thread)
4999 || (thread->stepped_breakpoint
5000 && thread->prev_pc == breakpoint_pc))
515630c5 5001 regcache_write_pc (regcache, breakpoint_pc);
8aad930b 5002 }
4fa8626c
DJ
5003}
5004
c4464ade 5005static bool
bd2b40ac 5006stepped_in_from (frame_info_ptr frame, struct frame_id step_frame_id)
edb3359d
DJ
5007{
5008 for (frame = get_prev_frame (frame);
03acd4d8 5009 frame != nullptr;
edb3359d
DJ
5010 frame = get_prev_frame (frame))
5011 {
a0cbd650 5012 if (get_frame_id (frame) == step_frame_id)
c4464ade
SM
5013 return true;
5014
edb3359d
DJ
5015 if (get_frame_type (frame) != INLINE_FRAME)
5016 break;
5017 }
5018
c4464ade 5019 return false;
edb3359d
DJ
5020}
5021
4a4c04f1
BE
5022/* Look for an inline frame that is marked for skip.
5023 If PREV_FRAME is TRUE start at the previous frame,
5024 otherwise start at the current frame. Stop at the
5025 first non-inline frame, or at the frame where the
5026 step started. */
5027
5028static bool
5029inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
5030{
bd2b40ac 5031 frame_info_ptr frame = get_current_frame ();
4a4c04f1
BE
5032
5033 if (prev_frame)
5034 frame = get_prev_frame (frame);
5035
03acd4d8 5036 for (; frame != nullptr; frame = get_prev_frame (frame))
4a4c04f1 5037 {
03acd4d8 5038 const char *fn = nullptr;
4a4c04f1
BE
5039 symtab_and_line sal;
5040 struct symbol *sym;
5041
a0cbd650 5042 if (get_frame_id (frame) == tp->control.step_frame_id)
4a4c04f1
BE
5043 break;
5044 if (get_frame_type (frame) != INLINE_FRAME)
5045 break;
5046
5047 sal = find_frame_sal (frame);
5048 sym = get_frame_function (frame);
5049
03acd4d8 5050 if (sym != nullptr)
4a4c04f1
BE
5051 fn = sym->print_name ();
5052
5053 if (sal.line != 0
5054 && function_name_is_marked_for_skip (fn, sal))
5055 return true;
5056 }
5057
5058 return false;
5059}
5060
c65d6b55
PA
5061/* If the event thread has the stop requested flag set, pretend it
5062 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
5063 target_stop). */
5064
5065static bool
5066handle_stop_requested (struct execution_control_state *ecs)
5067{
5068 if (ecs->event_thread->stop_requested)
5069 {
183be222 5070 ecs->ws.set_stopped (GDB_SIGNAL_0);
c65d6b55
PA
5071 handle_signal_stop (ecs);
5072 return true;
5073 }
5074 return false;
5075}
5076
a96d9b2e 5077/* Auxiliary function that handles syscall entry/return events.
c4464ade
SM
5078 It returns true if the inferior should keep going (and GDB
5079 should ignore the event), or false if the event deserves to be
a96d9b2e 5080 processed. */
ca2163eb 5081
c4464ade 5082static bool
ca2163eb 5083handle_syscall_event (struct execution_control_state *ecs)
a96d9b2e 5084{
ca2163eb 5085 struct regcache *regcache;
ca2163eb
PA
5086 int syscall_number;
5087
00431a78 5088 context_switch (ecs);
ca2163eb 5089
00431a78 5090 regcache = get_thread_regcache (ecs->event_thread);
183be222 5091 syscall_number = ecs->ws.syscall_number ();
1edb66d8 5092 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
ca2163eb 5093
f087eb27 5094 if (catch_syscall_enabled ()
9fe3819e 5095 && catching_syscall_number (syscall_number))
a96d9b2e 5096 {
1eb8556f 5097 infrun_debug_printf ("syscall number=%d", syscall_number);
a96d9b2e 5098
16c381f0 5099 ecs->event_thread->control.stop_bpstat
f9582a22 5100 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
d37e0847
PA
5101 ecs->event_thread->stop_pc (),
5102 ecs->event_thread, ecs->ws);
ab04a2af 5103
c65d6b55 5104 if (handle_stop_requested (ecs))
c4464ade 5105 return false;
c65d6b55 5106
ce12b012 5107 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
ca2163eb
PA
5108 {
5109 /* Catchpoint hit. */
c4464ade 5110 return false;
ca2163eb 5111 }
a96d9b2e 5112 }
ca2163eb 5113
c65d6b55 5114 if (handle_stop_requested (ecs))
c4464ade 5115 return false;
c65d6b55 5116
ca2163eb 5117 /* If no catchpoint triggered for this, then keep going. */
ca2163eb 5118 keep_going (ecs);
c4464ade
SM
5119
5120 return true;
a96d9b2e
SDJ
5121}
5122
7e324e48
GB
5123/* Lazily fill in the execution_control_state's stop_func_* fields. */
5124
5125static void
5126fill_in_stop_func (struct gdbarch *gdbarch,
5127 struct execution_control_state *ecs)
5128{
5129 if (!ecs->stop_func_filled_in)
5130 {
98a617f8 5131 const block *block;
fe830662 5132 const general_symbol_info *gsi;
98a617f8 5133
7e324e48
GB
5134 /* Don't care about return value; stop_func_start and stop_func_name
5135 will both be 0 if it doesn't work. */
1edb66d8 5136 find_pc_partial_function_sym (ecs->event_thread->stop_pc (),
fe830662
TT
5137 &gsi,
5138 &ecs->stop_func_start,
5139 &ecs->stop_func_end,
5140 &block);
5141 ecs->stop_func_name = gsi == nullptr ? nullptr : gsi->print_name ();
98a617f8
KB
5142
5143 /* The call to find_pc_partial_function, above, will set
5144 stop_func_start and stop_func_end to the start and end
5145 of the range containing the stop pc. If this range
5146 contains the entry pc for the block (which is always the
5147 case for contiguous blocks), advance stop_func_start past
5148 the function's start offset and entrypoint. Note that
5149 stop_func_start is NOT advanced when in a range of a
5150 non-contiguous block that does not contain the entry pc. */
5151 if (block != nullptr
6395b628
SM
5152 && ecs->stop_func_start <= block->entry_pc ()
5153 && block->entry_pc () < ecs->stop_func_end)
98a617f8
KB
5154 {
5155 ecs->stop_func_start
5156 += gdbarch_deprecated_function_start_offset (gdbarch);
5157
2a8339b7
CL
5158 /* PowerPC functions have a Local Entry Point (LEP) and a Global
5159 Entry Point (GEP). There is only one Entry Point (GEP = LEP) for
5160 other architectures. */
5161 ecs->stop_func_alt_start = ecs->stop_func_start;
5162
98a617f8
KB
5163 if (gdbarch_skip_entrypoint_p (gdbarch))
5164 ecs->stop_func_start
5165 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
5166 }
591a12a1 5167
7e324e48
GB
5168 ecs->stop_func_filled_in = 1;
5169 }
5170}
5171
4f5d7f63 5172
00431a78 5173/* Return the STOP_SOON field of the inferior pointed at by ECS. */
4f5d7f63
PA
5174
5175static enum stop_kind
00431a78 5176get_inferior_stop_soon (execution_control_state *ecs)
4f5d7f63 5177{
5b6d1e4f 5178 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4f5d7f63 5179
03acd4d8 5180 gdb_assert (inf != nullptr);
4f5d7f63
PA
5181 return inf->control.stop_soon;
5182}
5183
5b6d1e4f
PA
5184/* Poll for one event out of the current target. Store the resulting
5185 waitstatus in WS, and return the event ptid. Does not block. */
372316f1
PA
5186
5187static ptid_t
5b6d1e4f 5188poll_one_curr_target (struct target_waitstatus *ws)
372316f1
PA
5189{
5190 ptid_t event_ptid;
372316f1
PA
5191
5192 overlay_cache_invalid = 1;
5193
5194 /* Flush target cache before starting to handle each event.
5195 Target was running and cache could be stale. This is just a
5196 heuristic. Running threads may modify target memory, but we
5197 don't get any event. */
41336620 5198 target_dcache_invalidate (current_program_space->aspace);
372316f1 5199
fb85cece 5200 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
372316f1
PA
5201
5202 if (debug_infrun)
c272a98c 5203 print_target_wait_results (minus_one_ptid, event_ptid, *ws);
372316f1
PA
5204
5205 return event_ptid;
5206}
5207
5b6d1e4f
PA
5208/* Wait for one event out of any target. */
5209
5210static wait_one_event
5211wait_one ()
5212{
5213 while (1)
5214 {
5215 for (inferior *inf : all_inferiors ())
5216 {
5217 process_stratum_target *target = inf->process_target ();
03acd4d8 5218 if (target == nullptr
5b6d1e4f
PA
5219 || !target->is_async_p ()
5220 || !target->threads_executing)
5221 continue;
5222
5223 switch_to_inferior_no_thread (inf);
5224
5225 wait_one_event event;
5226 event.target = target;
5227 event.ptid = poll_one_curr_target (&event.ws);
5228
183be222 5229 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5b6d1e4f
PA
5230 {
5231 /* If nothing is resumed, remove the target from the
5232 event loop. */
4a570176 5233 target_async (false);
5b6d1e4f 5234 }
183be222 5235 else if (event.ws.kind () != TARGET_WAITKIND_IGNORE)
5b6d1e4f
PA
5236 return event;
5237 }
5238
5239 /* Block waiting for some event. */
5240
5241 fd_set readfds;
5242 int nfds = 0;
5243
5244 FD_ZERO (&readfds);
5245
5246 for (inferior *inf : all_inferiors ())
5247 {
5248 process_stratum_target *target = inf->process_target ();
03acd4d8 5249 if (target == nullptr
5b6d1e4f
PA
5250 || !target->is_async_p ()
5251 || !target->threads_executing)
5252 continue;
5253
5254 int fd = target->async_wait_fd ();
5255 FD_SET (fd, &readfds);
5256 if (nfds <= fd)
5257 nfds = fd + 1;
5258 }
5259
5260 if (nfds == 0)
5261 {
5262 /* No waitable targets left. All must be stopped. */
d828dbed
PA
5263 infrun_debug_printf ("no waitable targets left");
5264
183be222
SM
5265 target_waitstatus ws;
5266 ws.set_no_resumed ();
03acd4d8 5267 return {nullptr, minus_one_ptid, std::move (ws)};
5b6d1e4f
PA
5268 }
5269
5270 QUIT;
5271
03acd4d8 5272 int numfds = interruptible_select (nfds, &readfds, 0, nullptr, 0);
5b6d1e4f
PA
5273 if (numfds < 0)
5274 {
5275 if (errno == EINTR)
5276 continue;
5277 else
5278 perror_with_name ("interruptible_select");
5279 }
5280 }
5281}
5282
372316f1
PA
5283/* Save the thread's event and stop reason to process it later. */
5284
5285static void
c272a98c 5286save_waitstatus (struct thread_info *tp, const target_waitstatus &ws)
372316f1 5287{
96bbe3ef 5288 infrun_debug_printf ("saving status %s for %s",
c272a98c 5289 ws.to_string ().c_str (),
96bbe3ef 5290 tp->ptid.to_string ().c_str ());
372316f1
PA
5291
5292 /* Record for later. */
c272a98c 5293 tp->set_pending_waitstatus (ws);
372316f1 5294
c272a98c
SM
5295 if (ws.kind () == TARGET_WAITKIND_STOPPED
5296 && ws.sig () == GDB_SIGNAL_TRAP)
372316f1 5297 {
89ba430c 5298 struct regcache *regcache = get_thread_regcache (tp);
f9582a22 5299 const address_space *aspace = tp->inf->aspace.get ();
372316f1
PA
5300 CORE_ADDR pc = regcache_read_pc (regcache);
5301
c272a98c 5302 adjust_pc_after_break (tp, tp->pending_waitstatus ());
372316f1 5303
18493a00
PA
5304 scoped_restore_current_thread restore_thread;
5305 switch_to_thread (tp);
5306
5307 if (target_stopped_by_watchpoint ())
1edb66d8 5308 tp->set_stop_reason (TARGET_STOPPED_BY_WATCHPOINT);
372316f1 5309 else if (target_supports_stopped_by_sw_breakpoint ()
18493a00 5310 && target_stopped_by_sw_breakpoint ())
1edb66d8 5311 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
372316f1 5312 else if (target_supports_stopped_by_hw_breakpoint ()
18493a00 5313 && target_stopped_by_hw_breakpoint ())
1edb66d8 5314 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
372316f1 5315 else if (!target_supports_stopped_by_hw_breakpoint ()
1edb66d8
SM
5316 && hardware_breakpoint_inserted_here_p (aspace, pc))
5317 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
372316f1 5318 else if (!target_supports_stopped_by_sw_breakpoint ()
1edb66d8
SM
5319 && software_breakpoint_inserted_here_p (aspace, pc))
5320 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
372316f1
PA
5321 else if (!thread_has_single_step_breakpoints_set (tp)
5322 && currently_stepping (tp))
1edb66d8 5323 tp->set_stop_reason (TARGET_STOPPED_BY_SINGLE_STEP);
372316f1
PA
5324 }
5325}
5326
293b3ebc
TBA
5327/* Mark the non-executing threads accordingly. In all-stop, all
5328 threads of all processes are stopped when we get any event
5329 reported. In non-stop mode, only the event thread stops. */
5330
5331static void
5332mark_non_executing_threads (process_stratum_target *target,
5333 ptid_t event_ptid,
183be222 5334 const target_waitstatus &ws)
293b3ebc
TBA
5335{
5336 ptid_t mark_ptid;
5337
5338 if (!target_is_non_stop_p ())
5339 mark_ptid = minus_one_ptid;
183be222
SM
5340 else if (ws.kind () == TARGET_WAITKIND_SIGNALLED
5341 || ws.kind () == TARGET_WAITKIND_EXITED)
293b3ebc
TBA
5342 {
5343 /* If we're handling a process exit in non-stop mode, even
5344 though threads haven't been deleted yet, one would think
5345 that there is nothing to do, as threads of the dead process
5346 will be soon deleted, and threads of any other process were
5347 left running. However, on some targets, threads survive a
5348 process exit event. E.g., for the "checkpoint" command,
5349 when the current checkpoint/fork exits, linux-fork.c
5350 automatically switches to another fork from within
5351 target_mourn_inferior, by associating the same
5352 inferior/thread to another fork. We haven't mourned yet at
5353 this point, but we must mark any threads left in the
5354 process as not-executing so that finish_thread_state marks
5355 them stopped (in the user's perspective) if/when we present
5356 the stop to the user. */
5357 mark_ptid = ptid_t (event_ptid.pid ());
5358 }
5359 else
5360 mark_ptid = event_ptid;
5361
5362 set_executing (target, mark_ptid, false);
5363
5364 /* Likewise the resumed flag. */
5365 set_resumed (target, mark_ptid, false);
5366}
5367
d758e62c
PA
5368/* Handle one event after stopping threads. If the eventing thread
5369 reports back any interesting event, we leave it pending. If the
5370 eventing thread was in the middle of a displaced step, we
8ff53139
PA
5371 cancel/finish it, and unless the thread's inferior is being
5372 detached, put the thread back in the step-over chain. Returns true
5373 if there are no resumed threads left in the target (thus there's no
5374 point in waiting further), false otherwise. */
d758e62c
PA
5375
5376static bool
5377handle_one (const wait_one_event &event)
5378{
5379 infrun_debug_printf
7dca2ea7 5380 ("%s %s", event.ws.to_string ().c_str (),
0fab7955 5381 event.ptid.to_string ().c_str ());
d758e62c 5382
183be222 5383 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
d758e62c
PA
5384 {
5385 /* All resumed threads exited. */
5386 return true;
5387 }
183be222
SM
5388 else if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED
5389 || event.ws.kind () == TARGET_WAITKIND_EXITED
5390 || event.ws.kind () == TARGET_WAITKIND_SIGNALLED)
d758e62c
PA
5391 {
5392 /* One thread/process exited/signalled. */
5393
5394 thread_info *t = nullptr;
5395
5396 /* The target may have reported just a pid. If so, try
5397 the first non-exited thread. */
5398 if (event.ptid.is_pid ())
5399 {
5400 int pid = event.ptid.pid ();
5401 inferior *inf = find_inferior_pid (event.target, pid);
5402 for (thread_info *tp : inf->non_exited_threads ())
5403 {
5404 t = tp;
5405 break;
5406 }
5407
5408 /* If there is no available thread, the event would
5409 have to be appended to a per-inferior event list,
5410 which does not exist (and if it did, we'd have
5411 to adjust run control command to be able to
5412 resume such an inferior). We assert here instead
5413 of going into an infinite loop. */
5414 gdb_assert (t != nullptr);
5415
5416 infrun_debug_printf
0fab7955 5417 ("using %s", t->ptid.to_string ().c_str ());
d758e62c
PA
5418 }
5419 else
5420 {
9213a6d7 5421 t = event.target->find_thread (event.ptid);
d758e62c
PA
5422 /* Check if this is the first time we see this thread.
5423 Don't bother adding if it individually exited. */
5424 if (t == nullptr
183be222 5425 && event.ws.kind () != TARGET_WAITKIND_THREAD_EXITED)
d758e62c
PA
5426 t = add_thread (event.target, event.ptid);
5427 }
5428
5429 if (t != nullptr)
5430 {
5431 /* Set the threads as non-executing to avoid
5432 another stop attempt on them. */
5433 switch_to_thread_no_regs (t);
5434 mark_non_executing_threads (event.target, event.ptid,
5435 event.ws);
c272a98c 5436 save_waitstatus (t, event.ws);
d758e62c 5437 t->stop_requested = false;
21d48304
PA
5438
5439 if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED)
5440 {
5441 if (displaced_step_finish (t, event.ws)
5442 != DISPLACED_STEP_FINISH_STATUS_OK)
5443 {
5444 gdb_assert_not_reached ("displaced_step_finish on "
5445 "exited thread failed");
5446 }
5447 }
d758e62c
PA
5448 }
5449 }
5450 else
5451 {
9213a6d7 5452 thread_info *t = event.target->find_thread (event.ptid);
03acd4d8 5453 if (t == nullptr)
d758e62c
PA
5454 t = add_thread (event.target, event.ptid);
5455
5456 t->stop_requested = 0;
611841bb 5457 t->set_executing (false);
7846f3aa 5458 t->set_resumed (false);
d758e62c
PA
5459 t->control.may_range_step = 0;
5460
5461 /* This may be the first time we see the inferior report
5462 a stop. */
3db13541 5463 if (t->inf->needs_setup)
d758e62c
PA
5464 {
5465 switch_to_thread_no_regs (t);
5466 setup_inferior (0);
5467 }
5468
183be222
SM
5469 if (event.ws.kind () == TARGET_WAITKIND_STOPPED
5470 && event.ws.sig () == GDB_SIGNAL_0)
d758e62c
PA
5471 {
5472 /* We caught the event that we intended to catch, so
1edb66d8 5473 there's no event to save as pending. */
d758e62c 5474
58c01087 5475 if (displaced_step_finish (t, event.ws)
d758e62c
PA
5476 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5477 {
5478 /* Add it back to the step-over queue. */
5479 infrun_debug_printf
5480 ("displaced-step of %s canceled",
0fab7955 5481 t->ptid.to_string ().c_str ());
d758e62c
PA
5482
5483 t->control.trap_expected = 0;
8ff53139
PA
5484 if (!t->inf->detaching)
5485 global_thread_step_over_chain_enqueue (t);
d758e62c
PA
5486 }
5487 }
5488 else
5489 {
d758e62c
PA
5490 struct regcache *regcache;
5491
5492 infrun_debug_printf
96bbe3ef 5493 ("target_wait %s, saving status for %s",
7dca2ea7 5494 event.ws.to_string ().c_str (),
96bbe3ef 5495 t->ptid.to_string ().c_str ());
d758e62c
PA
5496
5497 /* Record for later. */
c272a98c 5498 save_waitstatus (t, event.ws);
d758e62c 5499
58c01087 5500 if (displaced_step_finish (t, event.ws)
d758e62c
PA
5501 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5502 {
5503 /* Add it back to the step-over queue. */
5504 t->control.trap_expected = 0;
8ff53139
PA
5505 if (!t->inf->detaching)
5506 global_thread_step_over_chain_enqueue (t);
d758e62c
PA
5507 }
5508
5509 regcache = get_thread_regcache (t);
1edb66d8 5510 t->set_stop_pc (regcache_read_pc (regcache));
d758e62c
PA
5511
5512 infrun_debug_printf ("saved stop_pc=%s for %s "
5513 "(currently_stepping=%d)",
99d9c3b9
SM
5514 paddress (current_inferior ()->arch (),
5515 t->stop_pc ()),
0fab7955 5516 t->ptid.to_string ().c_str (),
d758e62c
PA
5517 currently_stepping (t));
5518 }
5519 }
5520
5521 return false;
5522}
5523
d828dbed
PA
5524/* Helper for stop_all_threads. wait_one waits for events until it
5525 sees a TARGET_WAITKIND_NO_RESUMED event. When it sees one, it
5526 disables target_async for the target to stop waiting for events
5527 from it. TARGET_WAITKIND_NO_RESUMED can be delayed though,
5528 consider, debugging against gdbserver:
5529
5530 #1 - Threads 1-5 are running, and thread 1 hits a breakpoint.
5531
5532 #2 - gdb processes the breakpoint hit for thread 1, stops all
5533 threads, and steps thread 1 over the breakpoint. while
5534 stopping threads, some other threads reported interesting
5535 events, which were left pending in the thread's objects
5536 (infrun's queue).
5537
5538 #2 - Thread 1 exits (it stepped an exit syscall), and gdbserver
5539 reports the thread exit for thread 1. The event ends up in
5540 remote's stop reply queue.
5541
5542 #3 - That was the last resumed thread, so gdbserver reports
5543 no-resumed, and that event also ends up in remote's stop
5544 reply queue, queued after the thread exit from #2.
5545
5546 #4 - gdb processes the thread exit event, which finishes the
5547 step-over, and so gdb restarts all threads (threads with
5548 pending events are left marked resumed, but aren't set
5549 executing). The no-resumed event is still left pending in
5550 the remote stop reply queue.
5551
5552 #5 - Since there are now resumed threads with pending breakpoint
5553 hits, gdb picks one at random to process next.
5554
5555 #5 - gdb picks the breakpoint hit for thread 2 this time, and that
5556 breakpoint also needs to be stepped over, so gdb stops all
5557 threads again.
5558
5559 #6 - stop_all_threads counts number of expected stops and calls
5560 wait_one once for each.
5561
5562 #7 - The first wait_one call collects the no-resumed event from #3
5563 above.
5564
5565 #9 - Seeing the no-resumed event, wait_one disables target async
5566 for the remote target, to stop waiting for events from it.
5567 wait_one from here on always return no-resumed directly
5568 without reaching the target.
5569
5570 #10 - stop_all_threads still hasn't seen all the stops it expects,
5571 so it does another pass.
5572
5573 #11 - Since the remote target is not async (disabled in #9),
5574 wait_one doesn't wait on it, so it won't see the expected
5575 stops, and instead returns no-resumed directly.
5576
5577 #12 - stop_all_threads still haven't seen all the stops, so it
5578 does another pass. goto #11, looping forever.
5579
5580 To handle this, we explicitly (re-)enable target async on all
5581 targets that can async every time stop_all_threads goes wait for
5582 the expected stops. */
5583
5584static void
5585reenable_target_async ()
5586{
5587 for (inferior *inf : all_inferiors ())
5588 {
5589 process_stratum_target *target = inf->process_target ();
5590 if (target != nullptr
5591 && target->threads_executing
5592 && target->can_async_p ()
5593 && !target->is_async_p ())
5594 {
5595 switch_to_inferior_no_thread (inf);
5596 target_async (1);
5597 }
5598 }
5599}
5600
6efcd9a8 5601/* See infrun.h. */
372316f1 5602
6efcd9a8 5603void
148cf134 5604stop_all_threads (const char *reason, inferior *inf)
372316f1
PA
5605{
5606 /* We may need multiple passes to discover all threads. */
5607 int pass;
5608 int iterations = 0;
372316f1 5609
53cccef1 5610 gdb_assert (exists_non_stop_target ());
372316f1 5611
148cf134
SM
5612 INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason,
5613 inf != nullptr ? inf->num : -1);
372316f1 5614
1f9d9e32
AB
5615 infrun_debug_show_threads ("non-exited threads",
5616 all_non_exited_threads ());
5617
00431a78 5618 scoped_restore_current_thread restore_thread;
372316f1 5619
148cf134 5620 /* Enable thread events on relevant targets. */
6ad82919
TBA
5621 for (auto *target : all_non_exited_process_targets ())
5622 {
148cf134
SM
5623 if (inf != nullptr && inf->process_target () != target)
5624 continue;
5625
6ad82919
TBA
5626 switch_to_target_no_thread (target);
5627 target_thread_events (true);
5628 }
5629
5630 SCOPE_EXIT
5631 {
148cf134 5632 /* Disable thread events on relevant targets. */
6ad82919
TBA
5633 for (auto *target : all_non_exited_process_targets ())
5634 {
148cf134
SM
5635 if (inf != nullptr && inf->process_target () != target)
5636 continue;
5637
6ad82919
TBA
5638 switch_to_target_no_thread (target);
5639 target_thread_events (false);
5640 }
5641
17417fb0 5642 /* Use debug_prefixed_printf directly to get a meaningful function
dda83cd7 5643 name. */
6ad82919 5644 if (debug_infrun)
17417fb0 5645 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
6ad82919 5646 };
65706a29 5647
372316f1
PA
5648 /* Request threads to stop, and then wait for the stops. Because
5649 threads we already know about can spawn more threads while we're
5650 trying to stop them, and we only learn about new threads when we
5651 update the thread list, do this in a loop, and keep iterating
5652 until two passes find no threads that need to be stopped. */
5653 for (pass = 0; pass < 2; pass++, iterations++)
5654 {
1eb8556f 5655 infrun_debug_printf ("pass=%d, iterations=%d", pass, iterations);
372316f1
PA
5656 while (1)
5657 {
29d6859f 5658 int waits_needed = 0;
372316f1 5659
a05575d3
TBA
5660 for (auto *target : all_non_exited_process_targets ())
5661 {
148cf134
SM
5662 if (inf != nullptr && inf->process_target () != target)
5663 continue;
5664
a05575d3
TBA
5665 switch_to_target_no_thread (target);
5666 update_thread_list ();
5667 }
372316f1
PA
5668
5669 /* Go through all threads looking for threads that we need
5670 to tell the target to stop. */
08036331 5671 for (thread_info *t : all_non_exited_threads ())
372316f1 5672 {
148cf134
SM
5673 if (inf != nullptr && t->inf != inf)
5674 continue;
5675
53cccef1
TBA
5676 /* For a single-target setting with an all-stop target,
5677 we would not even arrive here. For a multi-target
5678 setting, until GDB is able to handle a mixture of
5679 all-stop and non-stop targets, simply skip all-stop
5680 targets' threads. This should be fine due to the
5681 protection of 'check_multi_target_resumption'. */
5682
5683 switch_to_thread_no_regs (t);
5684 if (!target_is_non_stop_p ())
5685 continue;
5686
611841bb 5687 if (t->executing ())
372316f1
PA
5688 {
5689 /* If already stopping, don't request a stop again.
5690 We just haven't seen the notification yet. */
5691 if (!t->stop_requested)
5692 {
1eb8556f 5693 infrun_debug_printf (" %s executing, need stop",
0fab7955 5694 t->ptid.to_string ().c_str ());
372316f1
PA
5695 target_stop (t->ptid);
5696 t->stop_requested = 1;
5697 }
5698 else
5699 {
1eb8556f 5700 infrun_debug_printf (" %s executing, already stopping",
0fab7955 5701 t->ptid.to_string ().c_str ());
372316f1
PA
5702 }
5703
5704 if (t->stop_requested)
29d6859f 5705 waits_needed++;
372316f1
PA
5706 }
5707 else
5708 {
1eb8556f 5709 infrun_debug_printf (" %s not executing",
0fab7955 5710 t->ptid.to_string ().c_str ());
372316f1
PA
5711
5712 /* The thread may be not executing, but still be
5713 resumed with a pending status to process. */
7846f3aa 5714 t->set_resumed (false);
372316f1
PA
5715 }
5716 }
5717
29d6859f 5718 if (waits_needed == 0)
372316f1
PA
5719 break;
5720
5721 /* If we find new threads on the second iteration, restart
5722 over. We want to see two iterations in a row with all
5723 threads stopped. */
5724 if (pass > 0)
5725 pass = -1;
5726
d828dbed
PA
5727 reenable_target_async ();
5728
29d6859f 5729 for (int i = 0; i < waits_needed; i++)
c29705b7 5730 {
29d6859f 5731 wait_one_event event = wait_one ();
d758e62c
PA
5732 if (handle_one (event))
5733 break;
372316f1
PA
5734 }
5735 }
5736 }
372316f1
PA
5737}
5738
21d48304
PA
5739/* Handle a TARGET_WAITKIND_NO_RESUMED event. Return true if we
5740 handled the event and should continue waiting. Return false if we
5741 should stop and report the event to the user. */
f4836ba9 5742
c4464ade 5743static bool
f4836ba9
PA
5744handle_no_resumed (struct execution_control_state *ecs)
5745{
3b12939d 5746 if (target_can_async_p ())
f4836ba9 5747 {
c4464ade 5748 bool any_sync = false;
f4836ba9 5749
2dab0c7b 5750 for (ui *ui : all_uis ())
3b12939d
PA
5751 {
5752 if (ui->prompt_state == PROMPT_BLOCKED)
5753 {
c4464ade 5754 any_sync = true;
3b12939d
PA
5755 break;
5756 }
5757 }
5758 if (!any_sync)
5759 {
5760 /* There were no unwaited-for children left in the target, but,
5761 we're not synchronously waiting for events either. Just
5762 ignore. */
5763
1eb8556f 5764 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
3b12939d 5765 prepare_to_wait (ecs);
c4464ade 5766 return true;
3b12939d 5767 }
f4836ba9
PA
5768 }
5769
5770 /* Otherwise, if we were running a synchronous execution command, we
5771 may need to cancel it and give the user back the terminal.
5772
5773 In non-stop mode, the target can't tell whether we've already
5774 consumed previous stop events, so it can end up sending us a
5775 no-resumed event like so:
5776
5777 #0 - thread 1 is left stopped
5778
5779 #1 - thread 2 is resumed and hits breakpoint
dda83cd7 5780 -> TARGET_WAITKIND_STOPPED
f4836ba9
PA
5781
5782 #2 - thread 3 is resumed and exits
dda83cd7 5783 this is the last resumed thread, so
f4836ba9
PA
5784 -> TARGET_WAITKIND_NO_RESUMED
5785
5786 #3 - gdb processes stop for thread 2 and decides to re-resume
dda83cd7 5787 it.
f4836ba9
PA
5788
5789 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
dda83cd7 5790 thread 2 is now resumed, so the event should be ignored.
f4836ba9
PA
5791
5792 IOW, if the stop for thread 2 doesn't end a foreground command,
5793 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5794 event. But it could be that the event meant that thread 2 itself
5795 (or whatever other thread was the last resumed thread) exited.
5796
5797 To address this we refresh the thread list and check whether we
5798 have resumed threads _now_. In the example above, this removes
5799 thread 3 from the thread list. If thread 2 was re-resumed, we
5800 ignore this event. If we find no thread resumed, then we cancel
7d3badc6
PA
5801 the synchronous command and show "no unwaited-for " to the
5802 user. */
f4836ba9 5803
d6cc5d98 5804 inferior *curr_inf = current_inferior ();
7d3badc6 5805
d6cc5d98 5806 scoped_restore_current_thread restore_thread;
1e864019 5807 update_thread_list ();
d6cc5d98
PA
5808
5809 /* If:
5810
5811 - the current target has no thread executing, and
5812 - the current inferior is native, and
5813 - the current inferior is the one which has the terminal, and
5814 - we did nothing,
5815
5816 then a Ctrl-C from this point on would remain stuck in the
5817 kernel, until a thread resumes and dequeues it. That would
5818 result in the GDB CLI not reacting to Ctrl-C, not able to
5819 interrupt the program. To address this, if the current inferior
5820 no longer has any thread executing, we give the terminal to some
5821 other inferior that has at least one thread executing. */
5822 bool swap_terminal = true;
5823
5824 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5825 whether to report it to the user. */
5826 bool ignore_event = false;
7d3badc6
PA
5827
5828 for (thread_info *thread : all_non_exited_threads ())
f4836ba9 5829 {
611841bb 5830 if (swap_terminal && thread->executing ())
d6cc5d98
PA
5831 {
5832 if (thread->inf != curr_inf)
5833 {
5834 target_terminal::ours ();
5835
5836 switch_to_thread (thread);
5837 target_terminal::inferior ();
5838 }
5839 swap_terminal = false;
5840 }
5841
4d772ea2 5842 if (!ignore_event && thread->resumed ())
f4836ba9 5843 {
7d3badc6
PA
5844 /* Either there were no unwaited-for children left in the
5845 target at some point, but there are now, or some target
5846 other than the eventing one has unwaited-for children
5847 left. Just ignore. */
1eb8556f
SM
5848 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5849 "(ignoring: found resumed)");
d6cc5d98
PA
5850
5851 ignore_event = true;
f4836ba9 5852 }
d6cc5d98
PA
5853
5854 if (ignore_event && !swap_terminal)
5855 break;
5856 }
5857
5858 if (ignore_event)
5859 {
5860 switch_to_inferior_no_thread (curr_inf);
5861 prepare_to_wait (ecs);
c4464ade 5862 return true;
f4836ba9
PA
5863 }
5864
5865 /* Go ahead and report the event. */
c4464ade 5866 return false;
f4836ba9
PA
5867}
5868
21d48304
PA
5869/* Handle a TARGET_WAITKIND_THREAD_EXITED event. Return true if we
5870 handled the event and should continue waiting. Return false if we
5871 should stop and report the event to the user. */
5872
5873static bool
5874handle_thread_exited (execution_control_state *ecs)
5875{
5876 context_switch (ecs);
5877
5878 /* Clear these so we don't re-start the thread stepping over a
5879 breakpoint/watchpoint. */
5880 ecs->event_thread->stepping_over_breakpoint = 0;
5881 ecs->event_thread->stepping_over_watchpoint = 0;
5882
9488c327
PA
5883 /* If the thread had an FSM, then abort the command. But only after
5884 finishing the step over, as in non-stop mode, aborting this
5885 thread's command should not interfere with other threads. We
5886 must check this before finish_step over, however, which may
5887 update the thread list and delete the event thread. */
5888 bool abort_cmd = (ecs->event_thread->thread_fsm () != nullptr);
5889
45fd40cf
PA
5890 /* Mark the thread exited right now, because finish_step_over may
5891 update the thread list and that may delete the thread silently
5892 (depending on target), while we always want to emit the "[Thread
5893 ... exited]" notification. Don't actually delete the thread yet,
5894 because we need to pass its pointer down to finish_step_over. */
5895 set_thread_exited (ecs->event_thread);
5896
21d48304
PA
5897 /* Maybe the thread was doing a step-over, if so release
5898 resources and start any further pending step-overs.
5899
5900 If we are on a non-stop target and the thread was doing an
5901 in-line step, this also restarts the other threads. */
5902 int ret = finish_step_over (ecs);
5903
5904 /* finish_step_over returns true if it moves ecs' wait status
5905 back into the thread, so that we go handle another pending
5906 event before this one. But we know it never does that if
5907 the event thread has exited. */
5908 gdb_assert (ret == 0);
5909
9488c327
PA
5910 if (abort_cmd)
5911 {
d0b59149
PA
5912 /* We're stopping for the thread exit event. Switch to the
5913 event thread again, as finish_step_over may have switched
5914 threads. */
5915 switch_to_thread (ecs->event_thread);
9488c327
PA
5916 ecs->event_thread = nullptr;
5917 return false;
5918 }
5919
21d48304
PA
5920 /* If finish_step_over started a new in-line step-over, don't
5921 try to restart anything else. */
5922 if (step_over_info_valid_p ())
5923 {
5924 delete_thread (ecs->event_thread);
5925 return true;
5926 }
5927
5928 /* Maybe we are on an all-stop target and we got this event
5929 while doing a step-like command on another thread. If so,
5930 go back to doing that. If this thread was stepping,
5931 switch_back_to_stepped_thread will consider that the thread
5932 was interrupted mid-step and will try keep stepping it. We
5933 don't want that, the thread is gone. So clear the proceed
5934 status so it doesn't do that. */
5935 clear_proceed_status_thread (ecs->event_thread);
5936 if (switch_back_to_stepped_thread (ecs))
5937 {
5938 delete_thread (ecs->event_thread);
5939 return true;
5940 }
5941
5942 inferior *inf = ecs->event_thread->inf;
5943 bool slock_applies = schedlock_applies (ecs->event_thread);
5944
5945 delete_thread (ecs->event_thread);
5946 ecs->event_thread = nullptr;
5947
5948 /* Continue handling the event as if we had gotten a
5949 TARGET_WAITKIND_NO_RESUMED. */
5950 auto handle_as_no_resumed = [ecs] ()
5951 {
5952 /* handle_no_resumed doesn't really look at the event kind, but
5953 normal_stop does. */
5954 ecs->ws.set_no_resumed ();
5955 ecs->event_thread = nullptr;
5956 ecs->ptid = minus_one_ptid;
5957
5958 /* Re-record the last target status. */
5959 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
5960
5961 return handle_no_resumed (ecs);
5962 };
5963
5964 /* If we are on an all-stop target, the target has stopped all
5965 threads to report the event. We don't actually want to
5966 stop, so restart the threads. */
5967 if (!target_is_non_stop_p ())
5968 {
5969 if (slock_applies)
5970 {
5971 /* Since the target is !non-stop, then everything is stopped
5972 at this point, and we can't assume we'll get further
5973 events until we resume the target again. Handle this
5974 event like if it were a TARGET_WAITKIND_NO_RESUMED. Note
5975 this refreshes the thread list and checks whether there
5976 are other resumed threads before deciding whether to
5977 print "no-unwaited-for left". This is important because
5978 the user could have done:
5979
5980 (gdb) set scheduler-locking on
5981 (gdb) thread 1
5982 (gdb) c&
5983 (gdb) thread 2
5984 (gdb) c
5985
5986 ... and only one of the threads exited. */
5987 return handle_as_no_resumed ();
5988 }
5989 else
5990 {
5991 /* Switch to the first non-exited thread we can find, and
5992 resume. */
5993 auto range = inf->non_exited_threads ();
5994 if (range.begin () == range.end ())
5995 {
5996 /* Looks like the target reported a
5997 TARGET_WAITKIND_THREAD_EXITED for its last known
5998 thread. */
5999 return handle_as_no_resumed ();
6000 }
6001 thread_info *non_exited_thread = *range.begin ();
6002 switch_to_thread (non_exited_thread);
6003 insert_breakpoints ();
6004 resume (GDB_SIGNAL_0);
6005 }
6006 }
6007
6008 prepare_to_wait (ecs);
6009 return true;
6010}
6011
05ba8510
PA
6012/* Given an execution control state that has been freshly filled in by
6013 an event from the inferior, figure out what it means and take
6014 appropriate action.
6015
6016 The alternatives are:
6017
22bcd14b 6018 1) stop_waiting and return; to really stop and return to the
05ba8510
PA
6019 debugger.
6020
6021 2) keep_going and return; to wait for the next event (set
6022 ecs->event_thread->stepping_over_breakpoint to 1 to single step
6023 once). */
c906108c 6024
ec9499be 6025static void
595915c1 6026handle_inferior_event (struct execution_control_state *ecs)
cd0fc7c3 6027{
595915c1
TT
6028 /* Make sure that all temporary struct value objects that were
6029 created during the handling of the event get deleted at the
6030 end. */
6031 scoped_value_mark free_values;
6032
7dca2ea7 6033 infrun_debug_printf ("%s", ecs->ws.to_string ().c_str ());
c29705b7 6034
183be222 6035 if (ecs->ws.kind () == TARGET_WAITKIND_IGNORE)
28736962
PA
6036 {
6037 /* We had an event in the inferior, but we are not interested in
6038 handling it at this level. The lower layers have already
6039 done what needs to be done, if anything.
6040
6041 One of the possible circumstances for this is when the
6042 inferior produces output for the console. The inferior has
6043 not stopped, and we are ignoring the event. Another possible
6044 circumstance is any event which the lower level knows will be
6045 reported multiple times without an intervening resume. */
28736962
PA
6046 prepare_to_wait (ecs);
6047 return;
6048 }
6049
183be222 6050 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED
f4836ba9
PA
6051 && handle_no_resumed (ecs))
6052 return;
0e5bf2a8 6053
5b6d1e4f
PA
6054 /* Cache the last target/ptid/waitstatus. */
6055 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
e02bc4cc 6056
ca005067 6057 /* Always clear state belonging to the previous time we stopped. */
aa7d318d 6058 stop_stack_dummy = STOP_NONE;
ca005067 6059
183be222 6060 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED)
0e5bf2a8
PA
6061 {
6062 /* No unwaited-for children left. IOW, all resumed children
6063 have exited. */
22bcd14b 6064 stop_waiting (ecs);
0e5bf2a8
PA
6065 return;
6066 }
6067
183be222
SM
6068 if (ecs->ws.kind () != TARGET_WAITKIND_EXITED
6069 && ecs->ws.kind () != TARGET_WAITKIND_SIGNALLED)
359f5fe6 6070 {
9213a6d7 6071 ecs->event_thread = ecs->target->find_thread (ecs->ptid);
359f5fe6 6072 /* If it's a new thread, add it to the thread database. */
03acd4d8 6073 if (ecs->event_thread == nullptr)
5b6d1e4f 6074 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
c1e36e3e
PA
6075
6076 /* Disable range stepping. If the next step request could use a
6077 range, this will be end up re-enabled then. */
6078 ecs->event_thread->control.may_range_step = 0;
359f5fe6 6079 }
88ed393a
JK
6080
6081 /* Dependent on valid ECS->EVENT_THREAD. */
c272a98c 6082 adjust_pc_after_break (ecs->event_thread, ecs->ws);
88ed393a
JK
6083
6084 /* Dependent on the current PC value modified by adjust_pc_after_break. */
6085 reinit_frame_cache ();
6086
28736962
PA
6087 breakpoint_retire_moribund ();
6088
2b009048
DJ
6089 /* First, distinguish signals caused by the debugger from signals
6090 that have to do with the program's own actions. Note that
6091 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
6092 on the operating system version. Here we detect when a SIGILL or
6093 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
6094 something similar for SIGSEGV, since a SIGSEGV will be generated
6095 when we're trying to execute a breakpoint instruction on a
6096 non-executable stack. This happens for call dummy breakpoints
6097 for architectures like SPARC that place call dummies on the
6098 stack. */
183be222
SM
6099 if (ecs->ws.kind () == TARGET_WAITKIND_STOPPED
6100 && (ecs->ws.sig () == GDB_SIGNAL_ILL
6101 || ecs->ws.sig () == GDB_SIGNAL_SEGV
6102 || ecs->ws.sig () == GDB_SIGNAL_EMT))
2b009048 6103 {
00431a78 6104 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
de0a0249 6105
f9582a22 6106 if (breakpoint_inserted_here_p (ecs->event_thread->inf->aspace.get (),
de0a0249
UW
6107 regcache_read_pc (regcache)))
6108 {
1eb8556f 6109 infrun_debug_printf ("Treating signal as SIGTRAP");
183be222 6110 ecs->ws.set_stopped (GDB_SIGNAL_TRAP);
de0a0249 6111 }
2b009048
DJ
6112 }
6113
293b3ebc 6114 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
8c90c137 6115
183be222 6116 switch (ecs->ws.kind ())
488f131b
JB
6117 {
6118 case TARGET_WAITKIND_LOADED:
72d383bb
SM
6119 {
6120 context_switch (ecs);
6121 /* Ignore gracefully during startup of the inferior, as it might
6122 be the shell which has just loaded some objects, otherwise
6123 add the symbols for the newly loaded objects. Also ignore at
6124 the beginning of an attach or remote session; we will query
6125 the full list of libraries once the connection is
6126 established. */
6127
6128 stop_kind stop_soon = get_inferior_stop_soon (ecs);
6129 if (stop_soon == NO_STOP_QUIETLY)
6130 {
6131 struct regcache *regcache;
edcc5120 6132
72d383bb 6133 regcache = get_thread_regcache (ecs->event_thread);
edcc5120 6134
72d383bb 6135 handle_solib_event ();
ab04a2af 6136
9279eb5c 6137 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
f9582a22 6138 address_space *aspace = ecs->event_thread->inf->aspace.get ();
72d383bb 6139 ecs->event_thread->control.stop_bpstat
f9582a22 6140 = bpstat_stop_status_nowatch (aspace,
d37e0847
PA
6141 ecs->event_thread->stop_pc (),
6142 ecs->event_thread, ecs->ws);
c65d6b55 6143
72d383bb 6144 if (handle_stop_requested (ecs))
94c57d6a 6145 return;
488f131b 6146
72d383bb
SM
6147 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6148 {
6149 /* A catchpoint triggered. */
6150 process_event_stop_test (ecs);
6151 return;
6152 }
55409f9d 6153
72d383bb
SM
6154 /* If requested, stop when the dynamic linker notifies
6155 gdb of events. This allows the user to get control
6156 and place breakpoints in initializer routines for
6157 dynamically loaded objects (among other things). */
1edb66d8 6158 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
72d383bb
SM
6159 if (stop_on_solib_events)
6160 {
6161 /* Make sure we print "Stopped due to solib-event" in
6162 normal_stop. */
6163 stop_print_frame = true;
b0f4b84b 6164
72d383bb
SM
6165 stop_waiting (ecs);
6166 return;
6167 }
6168 }
b0f4b84b 6169
72d383bb
SM
6170 /* If we are skipping through a shell, or through shared library
6171 loading that we aren't interested in, resume the program. If
6172 we're running the program normally, also resume. */
6173 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
6174 {
6175 /* Loading of shared libraries might have changed breakpoint
6176 addresses. Make sure new breakpoints are inserted. */
6177 if (stop_soon == NO_STOP_QUIETLY)
6178 insert_breakpoints ();
6179 resume (GDB_SIGNAL_0);
6180 prepare_to_wait (ecs);
6181 return;
6182 }
5c09a2c5 6183
72d383bb
SM
6184 /* But stop if we're attaching or setting up a remote
6185 connection. */
6186 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6187 || stop_soon == STOP_QUIETLY_REMOTE)
6188 {
6189 infrun_debug_printf ("quietly stopped");
6190 stop_waiting (ecs);
6191 return;
6192 }
6193
f34652de 6194 internal_error (_("unhandled stop_soon: %d"), (int) stop_soon);
72d383bb 6195 }
c5aa993b 6196
488f131b 6197 case TARGET_WAITKIND_SPURIOUS:
c65d6b55
PA
6198 if (handle_stop_requested (ecs))
6199 return;
00431a78 6200 context_switch (ecs);
64ce06e4 6201 resume (GDB_SIGNAL_0);
488f131b
JB
6202 prepare_to_wait (ecs);
6203 return;
c5aa993b 6204
65706a29 6205 case TARGET_WAITKIND_THREAD_CREATED:
c65d6b55
PA
6206 if (handle_stop_requested (ecs))
6207 return;
00431a78 6208 context_switch (ecs);
65706a29
PA
6209 if (!switch_back_to_stepped_thread (ecs))
6210 keep_going (ecs);
6211 return;
6212
21d48304
PA
6213 case TARGET_WAITKIND_THREAD_EXITED:
6214 if (handle_thread_exited (ecs))
6215 return;
6216 stop_waiting (ecs);
6217 break;
6218
488f131b 6219 case TARGET_WAITKIND_EXITED:
940c3c06 6220 case TARGET_WAITKIND_SIGNALLED:
18493a00
PA
6221 {
6222 /* Depending on the system, ecs->ptid may point to a thread or
6223 to a process. On some targets, target_mourn_inferior may
6224 need to have access to the just-exited thread. That is the
6225 case of GNU/Linux's "checkpoint" support, for example.
6226 Call the switch_to_xxx routine as appropriate. */
9213a6d7 6227 thread_info *thr = ecs->target->find_thread (ecs->ptid);
18493a00
PA
6228 if (thr != nullptr)
6229 switch_to_thread (thr);
6230 else
6231 {
6232 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
6233 switch_to_inferior_no_thread (inf);
6234 }
6235 }
6c95b8df 6236 handle_vfork_child_exec_or_exit (0);
223ffa71 6237 target_terminal::ours (); /* Must do this before mourn anyway. */
488f131b 6238
0c557179
SDJ
6239 /* Clearing any previous state of convenience variables. */
6240 clear_exit_convenience_vars ();
6241
183be222 6242 if (ecs->ws.kind () == TARGET_WAITKIND_EXITED)
940c3c06
PA
6243 {
6244 /* Record the exit code in the convenience variable $_exitcode, so
6245 that the user can inspect this again later. */
6246 set_internalvar_integer (lookup_internalvar ("_exitcode"),
183be222 6247 (LONGEST) ecs->ws.exit_status ());
940c3c06
PA
6248
6249 /* Also record this in the inferior itself. */
30220b46 6250 current_inferior ()->has_exit_code = true;
183be222 6251 current_inferior ()->exit_code = (LONGEST) ecs->ws.exit_status ();
8cf64490 6252
98eb56a4 6253 /* Support the --return-child-result option. */
183be222 6254 return_child_result_value = ecs->ws.exit_status ();
98eb56a4 6255
bf64d1d5 6256 interps_notify_exited (ecs->ws.exit_status ());
940c3c06
PA
6257 }
6258 else
0c557179 6259 {
27b1f19f 6260 struct gdbarch *gdbarch = current_inferior ()->arch ();
0c557179
SDJ
6261
6262 if (gdbarch_gdb_signal_to_target_p (gdbarch))
6263 {
6264 /* Set the value of the internal variable $_exitsignal,
6265 which holds the signal uncaught by the inferior. */
6266 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
6267 gdbarch_gdb_signal_to_target (gdbarch,
183be222 6268 ecs->ws.sig ()));
0c557179
SDJ
6269 }
6270 else
6271 {
6272 /* We don't have access to the target's method used for
6273 converting between signal numbers (GDB's internal
6274 representation <-> target's representation).
6275 Therefore, we cannot do a good job at displaying this
6276 information to the user. It's better to just warn
6277 her about it (if infrun debugging is enabled), and
6278 give up. */
1eb8556f
SM
6279 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
6280 "signal number.");
0c557179
SDJ
6281 }
6282
d6bd2ef5 6283 interps_notify_signal_exited (ecs->ws.sig ());
0c557179 6284 }
8cf64490 6285
488f131b 6286 gdb_flush (gdb_stdout);
bc1e6c81 6287 target_mourn_inferior (inferior_ptid);
c4464ade 6288 stop_print_frame = false;
22bcd14b 6289 stop_waiting (ecs);
488f131b 6290 return;
c5aa993b 6291
488f131b 6292 case TARGET_WAITKIND_FORKED:
deb3b17b 6293 case TARGET_WAITKIND_VFORKED:
0d36baa9
PA
6294 case TARGET_WAITKIND_THREAD_CLONED:
6295
6296 displaced_step_finish (ecs->event_thread, ecs->ws);
6297
6298 /* Start a new step-over in another thread if there's one that
6299 needs it. */
6300 start_step_over ();
e2d96639 6301
00431a78 6302 context_switch (ecs);
5a2901d9 6303
b242c3c2
PA
6304 /* Immediately detach breakpoints from the child before there's
6305 any chance of letting the user delete breakpoints from the
6306 breakpoint lists. If we don't do this early, it's easy to
6307 leave left over traps in the child, vis: "break foo; catch
6308 fork; c; <fork>; del; c; <child calls foo>". We only follow
6309 the fork on the last `continue', and by that time the
6310 breakpoint at "foo" is long gone from the breakpoint table.
6311 If we vforked, then we don't need to unpatch here, since both
6312 parent and child are sharing the same memory pages; we'll
6313 need to unpatch at follow/detach time instead to be certain
6314 that new breakpoints added between catchpoint hit time and
6315 vfork follow are detached. */
0d36baa9 6316 if (ecs->ws.kind () == TARGET_WAITKIND_FORKED)
b242c3c2 6317 {
b242c3c2
PA
6318 /* This won't actually modify the breakpoint list, but will
6319 physically remove the breakpoints from the child. */
183be222 6320 detach_breakpoints (ecs->ws.child_ptid ());
b242c3c2
PA
6321 }
6322
34b7e8a6 6323 delete_just_stopped_threads_single_step_breakpoints ();
d03285ec 6324
e58b0e63
PA
6325 /* In case the event is caught by a catchpoint, remember that
6326 the event is to be followed at the next resume of the thread,
6327 and not immediately. */
6328 ecs->event_thread->pending_follow = ecs->ws;
6329
1edb66d8
SM
6330 ecs->event_thread->set_stop_pc
6331 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
675bf4cb 6332
16c381f0 6333 ecs->event_thread->control.stop_bpstat
f9582a22 6334 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
d37e0847
PA
6335 ecs->event_thread->stop_pc (),
6336 ecs->event_thread, ecs->ws);
675bf4cb 6337
c65d6b55
PA
6338 if (handle_stop_requested (ecs))
6339 return;
6340
ce12b012
PA
6341 /* If no catchpoint triggered for this, then keep going. Note
6342 that we're interested in knowing the bpstat actually causes a
6343 stop, not just if it may explain the signal. Software
6344 watchpoints, for example, always appear in the bpstat. */
6345 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 6346 {
5ab2fbf1 6347 bool follow_child
0d36baa9
PA
6348 = (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6349 && follow_fork_mode_string == follow_fork_mode_child);
e58b0e63 6350
1edb66d8 6351 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
e58b0e63 6352
5b6d1e4f
PA
6353 process_stratum_target *targ
6354 = ecs->event_thread->inf->process_target ();
6355
0d36baa9
PA
6356 bool should_resume;
6357 if (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED)
6358 should_resume = follow_fork ();
6359 else
6360 {
6361 should_resume = true;
6362 inferior *inf = ecs->event_thread->inf;
6363 inf->top_target ()->follow_clone (ecs->ws.child_ptid ());
6364 ecs->event_thread->pending_follow.set_spurious ();
6365 }
e58b0e63 6366
5b6d1e4f
PA
6367 /* Note that one of these may be an invalid pointer,
6368 depending on detach_fork. */
00431a78 6369 thread_info *parent = ecs->event_thread;
9213a6d7 6370 thread_info *child = targ->find_thread (ecs->ws.child_ptid ());
6c95b8df 6371
a2077e25
PA
6372 /* At this point, the parent is marked running, and the
6373 child is marked stopped. */
6374
6375 /* If not resuming the parent, mark it stopped. */
0d36baa9
PA
6376 if (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6377 && follow_child && !detach_fork && !non_stop && !sched_multi)
00431a78 6378 parent->set_running (false);
a2077e25
PA
6379
6380 /* If resuming the child, mark it running. */
7ac958f2
PA
6381 if ((ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED
6382 && !schedlock_applies (ecs->event_thread))
6383 || (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6384 && (follow_child
6385 || (!detach_fork && (non_stop || sched_multi)))))
00431a78 6386 child->set_running (true);
a2077e25 6387
6c95b8df 6388 /* In non-stop mode, also resume the other branch. */
0d36baa9 6389 if ((ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED
7ac958f2
PA
6390 && target_is_non_stop_p ()
6391 && !schedlock_applies (ecs->event_thread))
6392 || (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6393 && (!detach_fork && (non_stop
6394 || (sched_multi
6395 && target_is_non_stop_p ())))))
6c95b8df
PA
6396 {
6397 if (follow_child)
6398 switch_to_thread (parent);
6399 else
6400 switch_to_thread (child);
6401
6402 ecs->event_thread = inferior_thread ();
6403 ecs->ptid = inferior_ptid;
6404 keep_going (ecs);
6405 }
6406
6407 if (follow_child)
6408 switch_to_thread (child);
6409 else
6410 switch_to_thread (parent);
6411
e58b0e63
PA
6412 ecs->event_thread = inferior_thread ();
6413 ecs->ptid = inferior_ptid;
6414
6415 if (should_resume)
27f9f649
SM
6416 {
6417 /* Never call switch_back_to_stepped_thread if we are waiting for
287de656 6418 vfork-done (waiting for an external vfork child to exec or
27f9f649
SM
6419 exit). We will resume only the vforking thread for the purpose
6420 of collecting the vfork-done event, and we will restart any
6421 step once the critical shared address space window is done. */
6422 if ((!follow_child
6423 && detach_fork
6424 && parent->inf->thread_waiting_for_vfork_done != nullptr)
6425 || !switch_back_to_stepped_thread (ecs))
6426 keep_going (ecs);
6427 }
e58b0e63 6428 else
22bcd14b 6429 stop_waiting (ecs);
04e68871
DJ
6430 return;
6431 }
94c57d6a
PA
6432 process_event_stop_test (ecs);
6433 return;
488f131b 6434
6c95b8df
PA
6435 case TARGET_WAITKIND_VFORK_DONE:
6436 /* Done with the shared memory region. Re-insert breakpoints in
6437 the parent, and keep going. */
6438
00431a78 6439 context_switch (ecs);
6c95b8df 6440
d8bbae6e
SM
6441 handle_vfork_done (ecs->event_thread);
6442 gdb_assert (inferior_thread () == ecs->event_thread);
c65d6b55
PA
6443
6444 if (handle_stop_requested (ecs))
6445 return;
6446
27f9f649
SM
6447 if (!switch_back_to_stepped_thread (ecs))
6448 {
6449 gdb_assert (inferior_thread () == ecs->event_thread);
6450 /* This also takes care of reinserting breakpoints in the
6451 previously locked inferior. */
6452 keep_going (ecs);
6453 }
6c95b8df
PA
6454 return;
6455
488f131b 6456 case TARGET_WAITKIND_EXECD:
488f131b 6457
cbd2b4e3
PA
6458 /* Note we can't read registers yet (the stop_pc), because we
6459 don't yet know the inferior's post-exec architecture.
6460 'stop_pc' is explicitly read below instead. */
00431a78 6461 switch_to_thread_no_regs (ecs->event_thread);
5a2901d9 6462
6c95b8df
PA
6463 /* Do whatever is necessary to the parent branch of the vfork. */
6464 handle_vfork_child_exec_or_exit (1);
6465
795e548f 6466 /* This causes the eventpoints and symbol table to be reset.
dda83cd7
SM
6467 Must do this now, before trying to determine whether to
6468 stop. */
183be222 6469 follow_exec (inferior_ptid, ecs->ws.execd_pathname ());
795e548f 6470
17d8546e
DB
6471 /* In follow_exec we may have deleted the original thread and
6472 created a new one. Make sure that the event thread is the
6473 execd thread for that case (this is a nop otherwise). */
6474 ecs->event_thread = inferior_thread ();
6475
1edb66d8
SM
6476 ecs->event_thread->set_stop_pc
6477 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
ecdc3a72 6478
16c381f0 6479 ecs->event_thread->control.stop_bpstat
f9582a22 6480 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
d37e0847
PA
6481 ecs->event_thread->stop_pc (),
6482 ecs->event_thread, ecs->ws);
795e548f 6483
c65d6b55
PA
6484 if (handle_stop_requested (ecs))
6485 return;
6486
04e68871 6487 /* If no catchpoint triggered for this, then keep going. */
ce12b012 6488 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 6489 {
1edb66d8 6490 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
04e68871
DJ
6491 keep_going (ecs);
6492 return;
6493 }
94c57d6a
PA
6494 process_event_stop_test (ecs);
6495 return;
488f131b 6496
b4dc5ffa 6497 /* Be careful not to try to gather much state about a thread
dda83cd7 6498 that's in a syscall. It's frequently a losing proposition. */
488f131b 6499 case TARGET_WAITKIND_SYSCALL_ENTRY:
1777feb0 6500 /* Getting the current syscall number. */
94c57d6a
PA
6501 if (handle_syscall_event (ecs) == 0)
6502 process_event_stop_test (ecs);
6503 return;
c906108c 6504
488f131b 6505 /* Before examining the threads further, step this thread to
dda83cd7
SM
6506 get it entirely out of the syscall. (We get notice of the
6507 event when the thread is just on the verge of exiting a
6508 syscall. Stepping one instruction seems to get it back
6509 into user code.) */
488f131b 6510 case TARGET_WAITKIND_SYSCALL_RETURN:
94c57d6a
PA
6511 if (handle_syscall_event (ecs) == 0)
6512 process_event_stop_test (ecs);
6513 return;
c906108c 6514
488f131b 6515 case TARGET_WAITKIND_STOPPED:
4f5d7f63
PA
6516 handle_signal_stop (ecs);
6517 return;
c906108c 6518
b2175913
MS
6519 case TARGET_WAITKIND_NO_HISTORY:
6520 /* Reverse execution: target ran out of history info. */
eab402df 6521
d1988021 6522 /* Switch to the stopped thread. */
00431a78 6523 context_switch (ecs);
1eb8556f 6524 infrun_debug_printf ("stopped");
d1988021 6525
34b7e8a6 6526 delete_just_stopped_threads_single_step_breakpoints ();
1edb66d8
SM
6527 ecs->event_thread->set_stop_pc
6528 (regcache_read_pc (get_thread_regcache (inferior_thread ())));
c65d6b55
PA
6529
6530 if (handle_stop_requested (ecs))
6531 return;
6532
2e5dbfab 6533 interps_notify_no_history ();
22bcd14b 6534 stop_waiting (ecs);
b2175913 6535 return;
488f131b 6536 }
4f5d7f63
PA
6537}
6538
372316f1 6539/* Restart threads back to what they were trying to do back when we
148cf134
SM
6540 paused them (because of an in-line step-over or vfork, for example).
6541 The EVENT_THREAD thread is ignored (not restarted).
6542
6543 If INF is non-nullptr, only resume threads from INF. */
4d9d9d04
PA
6544
6545static void
148cf134 6546restart_threads (struct thread_info *event_thread, inferior *inf)
372316f1 6547{
148cf134
SM
6548 INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d",
6549 event_thread->ptid.to_string ().c_str (),
6550 inf != nullptr ? inf->num : -1);
6551
2b718529
LS
6552 gdb_assert (!step_over_info_valid_p ());
6553
372316f1
PA
6554 /* In case the instruction just stepped spawned a new thread. */
6555 update_thread_list ();
6556
08036331 6557 for (thread_info *tp : all_non_exited_threads ())
372316f1 6558 {
148cf134
SM
6559 if (inf != nullptr && tp->inf != inf)
6560 continue;
6561
ac7d717c
PA
6562 if (tp->inf->detaching)
6563 {
6564 infrun_debug_printf ("restart threads: [%s] inferior detaching",
0fab7955 6565 tp->ptid.to_string ().c_str ());
ac7d717c
PA
6566 continue;
6567 }
6568
f3f8ece4
PA
6569 switch_to_thread_no_regs (tp);
6570
372316f1
PA
6571 if (tp == event_thread)
6572 {
1eb8556f 6573 infrun_debug_printf ("restart threads: [%s] is event thread",
0fab7955 6574 tp->ptid.to_string ().c_str ());
372316f1
PA
6575 continue;
6576 }
6577
6578 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
6579 {
1eb8556f 6580 infrun_debug_printf ("restart threads: [%s] not meant to be running",
0fab7955 6581 tp->ptid.to_string ().c_str ());
372316f1
PA
6582 continue;
6583 }
6584
7846f3aa 6585 if (tp->resumed ())
372316f1 6586 {
1eb8556f 6587 infrun_debug_printf ("restart threads: [%s] resumed",
0fab7955 6588 tp->ptid.to_string ().c_str ());
611841bb 6589 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
372316f1
PA
6590 continue;
6591 }
6592
6593 if (thread_is_in_step_over_chain (tp))
6594 {
1eb8556f 6595 infrun_debug_printf ("restart threads: [%s] needs step-over",
0fab7955 6596 tp->ptid.to_string ().c_str ());
7846f3aa 6597 gdb_assert (!tp->resumed ());
372316f1
PA
6598 continue;
6599 }
6600
6601
1edb66d8 6602 if (tp->has_pending_waitstatus ())
372316f1 6603 {
1eb8556f 6604 infrun_debug_printf ("restart threads: [%s] has pending status",
0fab7955 6605 tp->ptid.to_string ().c_str ());
7846f3aa 6606 tp->set_resumed (true);
372316f1
PA
6607 continue;
6608 }
6609
c65d6b55
PA
6610 gdb_assert (!tp->stop_requested);
6611
372316f1
PA
6612 /* If some thread needs to start a step-over at this point, it
6613 should still be in the step-over queue, and thus skipped
6614 above. */
6615 if (thread_still_needs_step_over (tp))
6616 {
f34652de 6617 internal_error ("thread [%s] needs a step-over, but not in "
372316f1 6618 "step-over queue\n",
0fab7955 6619 tp->ptid.to_string ().c_str ());
372316f1
PA
6620 }
6621
6622 if (currently_stepping (tp))
6623 {
1eb8556f 6624 infrun_debug_printf ("restart threads: [%s] was stepping",
0fab7955 6625 tp->ptid.to_string ().c_str ());
372316f1
PA
6626 keep_going_stepped_thread (tp);
6627 }
6628 else
6629 {
1eb8556f 6630 infrun_debug_printf ("restart threads: [%s] continuing",
0fab7955 6631 tp->ptid.to_string ().c_str ());
aa563d16 6632 execution_control_state ecs (tp);
00431a78 6633 switch_to_thread (tp);
aa563d16 6634 keep_going_pass_signal (&ecs);
372316f1
PA
6635 }
6636 }
6637}
6638
6639/* Callback for iterate_over_threads. Find a resumed thread that has
6640 a pending waitstatus. */
6641
6642static int
6643resumed_thread_with_pending_status (struct thread_info *tp,
6644 void *arg)
6645{
1edb66d8 6646 return tp->resumed () && tp->has_pending_waitstatus ();
372316f1
PA
6647}
6648
6649/* Called when we get an event that may finish an in-line or
6650 out-of-line (displaced stepping) step-over started previously.
6651 Return true if the event is processed and we should go back to the
6652 event loop; false if the caller should continue processing the
6653 event. */
6654
6655static int
4d9d9d04
PA
6656finish_step_over (struct execution_control_state *ecs)
6657{
58c01087 6658 displaced_step_finish (ecs->event_thread, ecs->ws);
4d9d9d04 6659
c4464ade 6660 bool had_step_over_info = step_over_info_valid_p ();
372316f1
PA
6661
6662 if (had_step_over_info)
4d9d9d04
PA
6663 {
6664 /* If we're stepping over a breakpoint with all threads locked,
6665 then only the thread that was stepped should be reporting
6666 back an event. */
6667 gdb_assert (ecs->event_thread->control.trap_expected);
6668
21d48304 6669 update_thread_events_after_step_over (ecs->event_thread, ecs->ws);
65c459ab 6670
c65d6b55 6671 clear_step_over_info ();
4d9d9d04
PA
6672 }
6673
fbea99ea 6674 if (!target_is_non_stop_p ())
372316f1 6675 return 0;
4d9d9d04
PA
6676
6677 /* Start a new step-over in another thread if there's one that
6678 needs it. */
6679 start_step_over ();
372316f1
PA
6680
6681 /* If we were stepping over a breakpoint before, and haven't started
6682 a new in-line step-over sequence, then restart all other threads
6683 (except the event thread). We can't do this in all-stop, as then
6684 e.g., we wouldn't be able to issue any other remote packet until
6685 these other threads stop. */
6686 if (had_step_over_info && !step_over_info_valid_p ())
6687 {
6688 struct thread_info *pending;
6689
6690 /* If we only have threads with pending statuses, the restart
6691 below won't restart any thread and so nothing re-inserts the
6692 breakpoint we just stepped over. But we need it inserted
6693 when we later process the pending events, otherwise if
6694 another thread has a pending event for this breakpoint too,
6695 we'd discard its event (because the breakpoint that
6696 originally caused the event was no longer inserted). */
00431a78 6697 context_switch (ecs);
372316f1
PA
6698 insert_breakpoints ();
6699
6700 restart_threads (ecs->event_thread);
6701
6702 /* If we have events pending, go through handle_inferior_event
6703 again, picking up a pending event at random. This avoids
6704 thread starvation. */
6705
6706 /* But not if we just stepped over a watchpoint in order to let
6707 the instruction execute so we can evaluate its expression.
6708 The set of watchpoints that triggered is recorded in the
6709 breakpoint objects themselves (see bp->watchpoint_triggered).
6710 If we processed another event first, that other event could
6711 clobber this info. */
6712 if (ecs->event_thread->stepping_over_watchpoint)
6713 return 0;
6714
21d48304
PA
6715 /* The code below is meant to avoid one thread hogging the event
6716 loop by doing constant in-line step overs. If the stepping
6717 thread exited, there's no risk for this to happen, so we can
6718 safely let our caller process the event immediately. */
6719 if (ecs->ws.kind () == TARGET_WAITKIND_THREAD_EXITED)
6720 return 0;
6721
372316f1 6722 pending = iterate_over_threads (resumed_thread_with_pending_status,
03acd4d8
CL
6723 nullptr);
6724 if (pending != nullptr)
372316f1
PA
6725 {
6726 struct thread_info *tp = ecs->event_thread;
6727 struct regcache *regcache;
6728
1eb8556f
SM
6729 infrun_debug_printf ("found resumed threads with "
6730 "pending events, saving status");
372316f1
PA
6731
6732 gdb_assert (pending != tp);
6733
6734 /* Record the event thread's event for later. */
c272a98c 6735 save_waitstatus (tp, ecs->ws);
372316f1
PA
6736 /* This was cleared early, by handle_inferior_event. Set it
6737 so this pending event is considered by
6738 do_target_wait. */
7846f3aa 6739 tp->set_resumed (true);
372316f1 6740
611841bb 6741 gdb_assert (!tp->executing ());
372316f1 6742
00431a78 6743 regcache = get_thread_regcache (tp);
1edb66d8 6744 tp->set_stop_pc (regcache_read_pc (regcache));
372316f1 6745
1eb8556f
SM
6746 infrun_debug_printf ("saved stop_pc=%s for %s "
6747 "(currently_stepping=%d)",
99d9c3b9
SM
6748 paddress (current_inferior ()->arch (),
6749 tp->stop_pc ()),
0fab7955 6750 tp->ptid.to_string ().c_str (),
1eb8556f 6751 currently_stepping (tp));
372316f1
PA
6752
6753 /* This in-line step-over finished; clear this so we won't
6754 start a new one. This is what handle_signal_stop would
6755 do, if we returned false. */
6756 tp->stepping_over_breakpoint = 0;
6757
6758 /* Wake up the event loop again. */
6759 mark_async_event_handler (infrun_async_inferior_event_token);
6760
6761 prepare_to_wait (ecs);
6762 return 1;
6763 }
6764 }
6765
6766 return 0;
4d9d9d04
PA
6767}
6768
3f75a984
SM
6769/* See infrun.h. */
6770
6771void
6772notify_signal_received (gdb_signal sig)
6773{
6774 interps_notify_signal_received (sig);
6775 gdb::observers::signal_received.notify (sig);
6776}
6777
87829267
SM
6778/* See infrun.h. */
6779
6780void
6781notify_normal_stop (bpstat *bs, int print_frame)
6782{
6783 interps_notify_normal_stop (bs, print_frame);
6784 gdb::observers::normal_stop.notify (bs, print_frame);
6785}
6786
77cd03e2
SM
6787/* See infrun.h. */
6788
6789void notify_user_selected_context_changed (user_selected_what selection)
6790{
6791 interps_notify_user_selected_context_changed (selection);
6792 gdb::observers::user_selected_context_changed.notify (selection);
6793}
6794
4f5d7f63
PA
6795/* Come here when the program has stopped with a signal. */
6796
6797static void
6798handle_signal_stop (struct execution_control_state *ecs)
6799{
bd2b40ac 6800 frame_info_ptr frame;
4f5d7f63
PA
6801 struct gdbarch *gdbarch;
6802 int stopped_by_watchpoint;
6803 enum stop_kind stop_soon;
6804 int random_signal;
c906108c 6805
183be222 6806 gdb_assert (ecs->ws.kind () == TARGET_WAITKIND_STOPPED);
f0407826 6807
183be222 6808 ecs->event_thread->set_stop_signal (ecs->ws.sig ());
c65d6b55 6809
f0407826
DE
6810 /* Do we need to clean up the state of a thread that has
6811 completed a displaced single-step? (Doing so usually affects
6812 the PC, so do it here, before we set stop_pc.) */
372316f1
PA
6813 if (finish_step_over (ecs))
6814 return;
f0407826
DE
6815
6816 /* If we either finished a single-step or hit a breakpoint, but
6817 the user wanted this thread to be stopped, pretend we got a
6818 SIG0 (generic unsignaled stop). */
6819 if (ecs->event_thread->stop_requested
1edb66d8
SM
6820 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6821 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
237fc4c9 6822
1edb66d8
SM
6823 ecs->event_thread->set_stop_pc
6824 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
488f131b 6825
2ab76a18
PA
6826 context_switch (ecs);
6827
6828 if (deprecated_context_hook)
6829 deprecated_context_hook (ecs->event_thread->global_num);
6830
527159b7 6831 if (debug_infrun)
237fc4c9 6832 {
00431a78 6833 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
b926417a 6834 struct gdbarch *reg_gdbarch = regcache->arch ();
7f82dfc7 6835
1edb66d8
SM
6836 infrun_debug_printf
6837 ("stop_pc=%s", paddress (reg_gdbarch, ecs->event_thread->stop_pc ()));
d92524f1 6838 if (target_stopped_by_watchpoint ())
237fc4c9 6839 {
dda83cd7 6840 CORE_ADDR addr;
abbb1732 6841
1eb8556f 6842 infrun_debug_printf ("stopped by watchpoint");
237fc4c9 6843
328d42d8
SM
6844 if (target_stopped_data_address (current_inferior ()->top_target (),
6845 &addr))
1eb8556f 6846 infrun_debug_printf ("stopped data address=%s",
dda83cd7
SM
6847 paddress (reg_gdbarch, addr));
6848 else
1eb8556f 6849 infrun_debug_printf ("(no data address available)");
237fc4c9
PA
6850 }
6851 }
527159b7 6852
36fa8042
PA
6853 /* This is originated from start_remote(), start_inferior() and
6854 shared libraries hook functions. */
00431a78 6855 stop_soon = get_inferior_stop_soon (ecs);
36fa8042
PA
6856 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
6857 {
1eb8556f 6858 infrun_debug_printf ("quietly stopped");
c4464ade 6859 stop_print_frame = true;
22bcd14b 6860 stop_waiting (ecs);
36fa8042
PA
6861 return;
6862 }
6863
36fa8042
PA
6864 /* This originates from attach_command(). We need to overwrite
6865 the stop_signal here, because some kernels don't ignore a
6866 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6867 See more comments in inferior.h. On the other hand, if we
6868 get a non-SIGSTOP, report it to the user - assume the backend
6869 will handle the SIGSTOP if it should show up later.
6870
6871 Also consider that the attach is complete when we see a
6872 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6873 target extended-remote report it instead of a SIGSTOP
6874 (e.g. gdbserver). We already rely on SIGTRAP being our
6875 signal, so this is no exception.
6876
6877 Also consider that the attach is complete when we see a
6878 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6879 the target to stop all threads of the inferior, in case the
6880 low level attach operation doesn't stop them implicitly. If
6881 they weren't stopped implicitly, then the stub will report a
6882 GDB_SIGNAL_0, meaning: stopped for no particular reason
6883 other than GDB's request. */
6884 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
1edb66d8
SM
6885 && (ecs->event_thread->stop_signal () == GDB_SIGNAL_STOP
6886 || ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6887 || ecs->event_thread->stop_signal () == GDB_SIGNAL_0))
36fa8042 6888 {
c4464ade 6889 stop_print_frame = true;
22bcd14b 6890 stop_waiting (ecs);
1edb66d8 6891 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
36fa8042
PA
6892 return;
6893 }
6894
568d6575
UW
6895 /* At this point, get hold of the now-current thread's frame. */
6896 frame = get_current_frame ();
6897 gdbarch = get_frame_arch (frame);
6898
2adfaa28 6899 /* Pull the single step breakpoints out of the target. */
1edb66d8 6900 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
488f131b 6901 {
af48d08f 6902 struct regcache *regcache;
af48d08f 6903 CORE_ADDR pc;
2adfaa28 6904
00431a78 6905 regcache = get_thread_regcache (ecs->event_thread);
f9582a22 6906 const address_space *aspace = ecs->event_thread->inf->aspace.get ();
8b86c959 6907
af48d08f 6908 pc = regcache_read_pc (regcache);
34b7e8a6 6909
af48d08f
PA
6910 /* However, before doing so, if this single-step breakpoint was
6911 actually for another thread, set this thread up for moving
6912 past it. */
6913 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
6914 aspace, pc))
6915 {
6916 if (single_step_breakpoint_inserted_here_p (aspace, pc))
2adfaa28 6917 {
1eb8556f
SM
6918 infrun_debug_printf ("[%s] hit another thread's single-step "
6919 "breakpoint",
0fab7955 6920 ecs->ptid.to_string ().c_str ());
af48d08f
PA
6921 ecs->hit_singlestep_breakpoint = 1;
6922 }
6923 }
6924 else
6925 {
1eb8556f 6926 infrun_debug_printf ("[%s] hit its single-step breakpoint",
0fab7955 6927 ecs->ptid.to_string ().c_str ());
2adfaa28 6928 }
488f131b 6929 }
af48d08f 6930 delete_just_stopped_threads_single_step_breakpoints ();
c906108c 6931
1edb66d8 6932 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
963f9c80
PA
6933 && ecs->event_thread->control.trap_expected
6934 && ecs->event_thread->stepping_over_watchpoint)
d983da9c
DJ
6935 stopped_by_watchpoint = 0;
6936 else
c272a98c 6937 stopped_by_watchpoint = watchpoints_triggered (ecs->ws);
d983da9c
DJ
6938
6939 /* If necessary, step over this watchpoint. We'll be back to display
6940 it in a moment. */
6941 if (stopped_by_watchpoint
9aed480c 6942 && (target_have_steppable_watchpoint ()
568d6575 6943 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
488f131b 6944 {
488f131b 6945 /* At this point, we are stopped at an instruction which has
dda83cd7
SM
6946 attempted to write to a piece of memory under control of
6947 a watchpoint. The instruction hasn't actually executed
6948 yet. If we were to evaluate the watchpoint expression
6949 now, we would get the old value, and therefore no change
6950 would seem to have occurred.
6951
6952 In order to make watchpoints work `right', we really need
6953 to complete the memory write, and then evaluate the
6954 watchpoint expression. We do this by single-stepping the
d983da9c
DJ
6955 target.
6956
7f89fd65 6957 It may not be necessary to disable the watchpoint to step over
d983da9c
DJ
6958 it. For example, the PA can (with some kernel cooperation)
6959 single step over a watchpoint without disabling the watchpoint.
6960
6961 It is far more common to need to disable a watchpoint to step
6962 the inferior over it. If we have non-steppable watchpoints,
6963 we must disable the current watchpoint; it's simplest to
963f9c80
PA
6964 disable all watchpoints.
6965
6966 Any breakpoint at PC must also be stepped over -- if there's
6967 one, it will have already triggered before the watchpoint
6968 triggered, and we either already reported it to the user, or
6969 it didn't cause a stop and we called keep_going. In either
6970 case, if there was a breakpoint at PC, we must be trying to
6971 step past it. */
6972 ecs->event_thread->stepping_over_watchpoint = 1;
6973 keep_going (ecs);
488f131b
JB
6974 return;
6975 }
6976
4e1c45ea 6977 ecs->event_thread->stepping_over_breakpoint = 0;
963f9c80 6978 ecs->event_thread->stepping_over_watchpoint = 0;
16c381f0
JK
6979 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
6980 ecs->event_thread->control.stop_step = 0;
c4464ade 6981 stop_print_frame = true;
488f131b 6982 stopped_by_random_signal = 0;
313f3b21 6983 bpstat *stop_chain = nullptr;
488f131b 6984
edb3359d
DJ
6985 /* Hide inlined functions starting here, unless we just performed stepi or
6986 nexti. After stepi and nexti, always show the innermost frame (not any
6987 inline function call sites). */
16c381f0 6988 if (ecs->event_thread->control.step_range_end != 1)
0574c78f 6989 {
f9582a22 6990 const address_space *aspace = ecs->event_thread->inf->aspace.get ();
0574c78f
GB
6991
6992 /* skip_inline_frames is expensive, so we avoid it if we can
6993 determine that the address is one where functions cannot have
6994 been inlined. This improves performance with inferiors that
6995 load a lot of shared libraries, because the solib event
6996 breakpoint is defined as the address of a function (i.e. not
6997 inline). Note that we have to check the previous PC as well
6998 as the current one to catch cases when we have just
6999 single-stepped off a breakpoint prior to reinstating it.
7000 Note that we're assuming that the code we single-step to is
7001 not inline, but that's not definitive: there's nothing
7002 preventing the event breakpoint function from containing
7003 inlined code, and the single-step ending up there. If the
7004 user had set a breakpoint on that inlined code, the missing
7005 skip_inline_frames call would break things. Fortunately
7006 that's an extremely unlikely scenario. */
f2ffa92b 7007 if (!pc_at_non_inline_function (aspace,
1edb66d8 7008 ecs->event_thread->stop_pc (),
c272a98c 7009 ecs->ws)
1edb66d8 7010 && !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
a210c238
MR
7011 && ecs->event_thread->control.trap_expected
7012 && pc_at_non_inline_function (aspace,
7013 ecs->event_thread->prev_pc,
c272a98c 7014 ecs->ws)))
1c5a993e 7015 {
f2ffa92b 7016 stop_chain = build_bpstat_chain (aspace,
1edb66d8 7017 ecs->event_thread->stop_pc (),
c272a98c 7018 ecs->ws);
00431a78 7019 skip_inline_frames (ecs->event_thread, stop_chain);
1c5a993e
MR
7020
7021 /* Re-fetch current thread's frame in case that invalidated
7022 the frame cache. */
7023 frame = get_current_frame ();
7024 gdbarch = get_frame_arch (frame);
7025 }
0574c78f 7026 }
edb3359d 7027
1edb66d8 7028 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
16c381f0 7029 && ecs->event_thread->control.trap_expected
568d6575 7030 && gdbarch_single_step_through_delay_p (gdbarch)
4e1c45ea 7031 && currently_stepping (ecs->event_thread))
3352ef37 7032 {
b50d7442 7033 /* We're trying to step off a breakpoint. Turns out that we're
3352ef37 7034 also on an instruction that needs to be stepped multiple
1777feb0 7035 times before it's been fully executing. E.g., architectures
3352ef37
AC
7036 with a delay slot. It needs to be stepped twice, once for
7037 the instruction and once for the delay slot. */
7038 int step_through_delay
568d6575 7039 = gdbarch_single_step_through_delay (gdbarch, frame);
abbb1732 7040
1eb8556f
SM
7041 if (step_through_delay)
7042 infrun_debug_printf ("step through delay");
7043
16c381f0
JK
7044 if (ecs->event_thread->control.step_range_end == 0
7045 && step_through_delay)
3352ef37
AC
7046 {
7047 /* The user issued a continue when stopped at a breakpoint.
7048 Set up for another trap and get out of here. */
dda83cd7
SM
7049 ecs->event_thread->stepping_over_breakpoint = 1;
7050 keep_going (ecs);
7051 return;
3352ef37
AC
7052 }
7053 else if (step_through_delay)
7054 {
7055 /* The user issued a step when stopped at a breakpoint.
7056 Maybe we should stop, maybe we should not - the delay
7057 slot *might* correspond to a line of source. In any
ca67fcb8
VP
7058 case, don't decide that here, just set
7059 ecs->stepping_over_breakpoint, making sure we
7060 single-step again before breakpoints are re-inserted. */
4e1c45ea 7061 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
7062 }
7063 }
7064
ab04a2af
TT
7065 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
7066 handles this event. */
7067 ecs->event_thread->control.stop_bpstat
f9582a22 7068 = bpstat_stop_status (ecs->event_thread->inf->aspace.get (),
1edb66d8 7069 ecs->event_thread->stop_pc (),
c272a98c 7070 ecs->event_thread, ecs->ws, stop_chain);
db82e815 7071
ab04a2af
TT
7072 /* Following in case break condition called a
7073 function. */
c4464ade 7074 stop_print_frame = true;
73dd234f 7075
ab04a2af
TT
7076 /* This is where we handle "moribund" watchpoints. Unlike
7077 software breakpoints traps, hardware watchpoint traps are
7078 always distinguishable from random traps. If no high-level
7079 watchpoint is associated with the reported stop data address
7080 anymore, then the bpstat does not explain the signal ---
7081 simply make sure to ignore it if `stopped_by_watchpoint' is
7082 set. */
7083
1edb66d8 7084 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
47591c29 7085 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
427cd150 7086 GDB_SIGNAL_TRAP)
ab04a2af 7087 && stopped_by_watchpoint)
1eb8556f
SM
7088 {
7089 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
7090 "ignoring");
7091 }
73dd234f 7092
bac7d97b 7093 /* NOTE: cagney/2003-03-29: These checks for a random signal
ab04a2af
TT
7094 at one stage in the past included checks for an inferior
7095 function call's call dummy's return breakpoint. The original
7096 comment, that went with the test, read:
03cebad2 7097
ab04a2af
TT
7098 ``End of a stack dummy. Some systems (e.g. Sony news) give
7099 another signal besides SIGTRAP, so check here as well as
7100 above.''
73dd234f 7101
ab04a2af
TT
7102 If someone ever tries to get call dummys on a
7103 non-executable stack to work (where the target would stop
7104 with something like a SIGSEGV), then those tests might need
7105 to be re-instated. Given, however, that the tests were only
7106 enabled when momentary breakpoints were not being used, I
7107 suspect that it won't be the case.
488f131b 7108
ab04a2af
TT
7109 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
7110 be necessary for call dummies on a non-executable stack on
7111 SPARC. */
488f131b 7112
bac7d97b 7113 /* See if the breakpoints module can explain the signal. */
47591c29
PA
7114 random_signal
7115 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
1edb66d8 7116 ecs->event_thread->stop_signal ());
bac7d97b 7117
1cf4d951
PA
7118 /* Maybe this was a trap for a software breakpoint that has since
7119 been removed. */
7120 if (random_signal && target_stopped_by_sw_breakpoint ())
7121 {
5133a315 7122 if (gdbarch_program_breakpoint_here_p (gdbarch,
1edb66d8 7123 ecs->event_thread->stop_pc ()))
1cf4d951
PA
7124 {
7125 struct regcache *regcache;
7126 int decr_pc;
7127
7128 /* Re-adjust PC to what the program would see if GDB was not
7129 debugging it. */
00431a78 7130 regcache = get_thread_regcache (ecs->event_thread);
527a273a 7131 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
1cf4d951
PA
7132 if (decr_pc != 0)
7133 {
6b09f134 7134 std::optional<scoped_restore_tmpl<int>>
07036511 7135 restore_operation_disable;
1cf4d951
PA
7136
7137 if (record_full_is_used ())
07036511
TT
7138 restore_operation_disable.emplace
7139 (record_full_gdb_operation_disable_set ());
1cf4d951 7140
f2ffa92b 7141 regcache_write_pc (regcache,
1edb66d8 7142 ecs->event_thread->stop_pc () + decr_pc);
1cf4d951
PA
7143 }
7144 }
7145 else
7146 {
7147 /* A delayed software breakpoint event. Ignore the trap. */
1eb8556f 7148 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
1cf4d951
PA
7149 random_signal = 0;
7150 }
7151 }
7152
7153 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
7154 has since been removed. */
7155 if (random_signal && target_stopped_by_hw_breakpoint ())
7156 {
7157 /* A delayed hardware breakpoint event. Ignore the trap. */
1eb8556f
SM
7158 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
7159 "trap, ignoring");
1cf4d951
PA
7160 random_signal = 0;
7161 }
7162
bac7d97b
PA
7163 /* If not, perhaps stepping/nexting can. */
7164 if (random_signal)
1edb66d8 7165 random_signal = !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
bac7d97b 7166 && currently_stepping (ecs->event_thread));
ab04a2af 7167
2adfaa28
PA
7168 /* Perhaps the thread hit a single-step breakpoint of _another_
7169 thread. Single-step breakpoints are transparent to the
7170 breakpoints module. */
7171 if (random_signal)
7172 random_signal = !ecs->hit_singlestep_breakpoint;
7173
bac7d97b
PA
7174 /* No? Perhaps we got a moribund watchpoint. */
7175 if (random_signal)
7176 random_signal = !stopped_by_watchpoint;
ab04a2af 7177
c65d6b55
PA
7178 /* Always stop if the user explicitly requested this thread to
7179 remain stopped. */
7180 if (ecs->event_thread->stop_requested)
7181 {
7182 random_signal = 1;
1eb8556f 7183 infrun_debug_printf ("user-requested stop");
c65d6b55
PA
7184 }
7185
488f131b
JB
7186 /* For the program's own signals, act according to
7187 the signal handling tables. */
7188
ce12b012 7189 if (random_signal)
488f131b
JB
7190 {
7191 /* Signal not for debugging purposes. */
1edb66d8 7192 enum gdb_signal stop_signal = ecs->event_thread->stop_signal ();
488f131b 7193
1eb8556f
SM
7194 infrun_debug_printf ("random signal (%s)",
7195 gdb_signal_to_symbol_string (stop_signal));
527159b7 7196
488f131b
JB
7197 stopped_by_random_signal = 1;
7198
252fbfc8
PA
7199 /* Always stop on signals if we're either just gaining control
7200 of the program, or the user explicitly requested this thread
7201 to remain stopped. */
d6b48e9c 7202 if (stop_soon != NO_STOP_QUIETLY
252fbfc8 7203 || ecs->event_thread->stop_requested
1edb66d8 7204 || signal_stop_state (ecs->event_thread->stop_signal ()))
488f131b 7205 {
22bcd14b 7206 stop_waiting (ecs);
488f131b
JB
7207 return;
7208 }
b57bacec
PA
7209
7210 /* Notify observers the signal has "handle print" set. Note we
7211 returned early above if stopping; normal_stop handles the
7212 printing in that case. */
1edb66d8 7213 if (signal_print[ecs->event_thread->stop_signal ()])
b57bacec
PA
7214 {
7215 /* The signal table tells us to print about this signal. */
223ffa71 7216 target_terminal::ours_for_output ();
3f75a984 7217 notify_signal_received (ecs->event_thread->stop_signal ());
223ffa71 7218 target_terminal::inferior ();
b57bacec 7219 }
488f131b
JB
7220
7221 /* Clear the signal if it should not be passed. */
1edb66d8
SM
7222 if (signal_program[ecs->event_thread->stop_signal ()] == 0)
7223 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
488f131b 7224
1edb66d8 7225 if (ecs->event_thread->prev_pc == ecs->event_thread->stop_pc ()
16c381f0 7226 && ecs->event_thread->control.trap_expected
03acd4d8 7227 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
68f53502
AC
7228 {
7229 /* We were just starting a new sequence, attempting to
7230 single-step off of a breakpoint and expecting a SIGTRAP.
237fc4c9 7231 Instead this signal arrives. This signal will take us out
68f53502
AC
7232 of the stepping range so GDB needs to remember to, when
7233 the signal handler returns, resume stepping off that
7234 breakpoint. */
7235 /* To simplify things, "continue" is forced to use the same
7236 code paths as single-step - set a breakpoint at the
7237 signal return address and then, once hit, step off that
7238 breakpoint. */
1eb8556f 7239 infrun_debug_printf ("signal arrived while stepping over breakpoint");
d3169d93 7240
2c03e5be 7241 insert_hp_step_resume_breakpoint_at_frame (frame);
4e1c45ea 7242 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
7243 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7244 ecs->event_thread->control.trap_expected = 0;
d137e6dc
PA
7245
7246 /* If we were nexting/stepping some other thread, switch to
7247 it, so that we don't continue it, losing control. */
7248 if (!switch_back_to_stepped_thread (ecs))
7249 keep_going (ecs);
9d799f85 7250 return;
68f53502 7251 }
9d799f85 7252
1edb66d8
SM
7253 if (ecs->event_thread->stop_signal () != GDB_SIGNAL_0
7254 && (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
f2ffa92b 7255 ecs->event_thread)
e5f8a7cc 7256 || ecs->event_thread->control.step_range_end == 1)
a0cbd650
TT
7257 && (get_stack_frame_id (frame)
7258 == ecs->event_thread->control.step_stack_frame_id)
03acd4d8 7259 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
d303a6c7
AC
7260 {
7261 /* The inferior is about to take a signal that will take it
7262 out of the single step range. Set a breakpoint at the
7263 current PC (which is presumably where the signal handler
7264 will eventually return) and then allow the inferior to
7265 run free.
7266
7267 Note that this is only needed for a signal delivered
7268 while in the single-step range. Nested signals aren't a
7269 problem as they eventually all return. */
1eb8556f 7270 infrun_debug_printf ("signal may take us out of single-step range");
237fc4c9 7271
372316f1 7272 clear_step_over_info ();
2c03e5be 7273 insert_hp_step_resume_breakpoint_at_frame (frame);
e5f8a7cc 7274 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
7275 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7276 ecs->event_thread->control.trap_expected = 0;
9d799f85
AC
7277 keep_going (ecs);
7278 return;
d303a6c7 7279 }
9d799f85 7280
85102364 7281 /* Note: step_resume_breakpoint may be non-NULL. This occurs
9d799f85
AC
7282 when either there's a nested signal, or when there's a
7283 pending signal enabled just as the signal handler returns
7284 (leaving the inferior at the step-resume-breakpoint without
7285 actually executing it). Either way continue until the
7286 breakpoint is really hit. */
c447ac0b
PA
7287
7288 if (!switch_back_to_stepped_thread (ecs))
7289 {
1eb8556f 7290 infrun_debug_printf ("random signal, keep going");
c447ac0b
PA
7291
7292 keep_going (ecs);
7293 }
7294 return;
488f131b 7295 }
94c57d6a
PA
7296
7297 process_event_stop_test (ecs);
7298}
7299
fe6356de
CL
7300/* Return the address for the beginning of the line. */
7301
7302CORE_ADDR
7303update_line_range_start (CORE_ADDR pc, struct execution_control_state *ecs)
7304{
7305 /* The line table may have multiple entries for the same source code line.
7306 Given the PC, check the line table and return the PC that corresponds
7307 to the line table entry for the source line that PC is in. */
7308 CORE_ADDR start_line_pc = ecs->event_thread->control.step_range_start;
7309 std::optional<CORE_ADDR> real_range_start;
7310
7311 /* Call find_line_range_start to get the smallest address in the
7312 linetable for multiple Line X entries in the line table. */
7313 real_range_start = find_line_range_start (pc);
7314
7315 if (real_range_start.has_value ())
7316 start_line_pc = *real_range_start;
7317
7318 return start_line_pc;
7319}
7320
94c57d6a
PA
7321/* Come here when we've got some debug event / signal we can explain
7322 (IOW, not a random signal), and test whether it should cause a
7323 stop, or whether we should resume the inferior (transparently).
7324 E.g., could be a breakpoint whose condition evaluates false; we
7325 could be still stepping within the line; etc. */
7326
7327static void
7328process_event_stop_test (struct execution_control_state *ecs)
7329{
7330 struct symtab_and_line stop_pc_sal;
bd2b40ac 7331 frame_info_ptr frame;
94c57d6a 7332 struct gdbarch *gdbarch;
cdaa5b73
PA
7333 CORE_ADDR jmp_buf_pc;
7334 struct bpstat_what what;
94c57d6a 7335
cdaa5b73 7336 /* Handle cases caused by hitting a breakpoint. */
611c83ae 7337
cdaa5b73
PA
7338 frame = get_current_frame ();
7339 gdbarch = get_frame_arch (frame);
fcf3daef 7340
cdaa5b73 7341 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
611c83ae 7342
cdaa5b73
PA
7343 if (what.call_dummy)
7344 {
7345 stop_stack_dummy = what.call_dummy;
7346 }
186c406b 7347
243a9253
PA
7348 /* A few breakpoint types have callbacks associated (e.g.,
7349 bp_jit_event). Run them now. */
7350 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
7351
cdaa5b73
PA
7352 /* If we hit an internal event that triggers symbol changes, the
7353 current frame will be invalidated within bpstat_what (e.g., if we
7354 hit an internal solib event). Re-fetch it. */
7355 frame = get_current_frame ();
7356 gdbarch = get_frame_arch (frame);
e2e4d78b 7357
bf2813af
GL
7358 /* Shorthand to make if statements smaller. */
7359 struct frame_id original_frame_id
7360 = ecs->event_thread->control.step_frame_id;
7361 struct frame_id curr_frame_id = get_frame_id (get_current_frame ());
7362
cdaa5b73
PA
7363 switch (what.main_action)
7364 {
7365 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
7366 /* If we hit the breakpoint at longjmp while stepping, we
7367 install a momentary breakpoint at the target of the
7368 jmp_buf. */
186c406b 7369
1eb8556f 7370 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
186c406b 7371
cdaa5b73 7372 ecs->event_thread->stepping_over_breakpoint = 1;
611c83ae 7373
cdaa5b73
PA
7374 if (what.is_longjmp)
7375 {
7376 struct value *arg_value;
7377
7378 /* If we set the longjmp breakpoint via a SystemTap probe,
7379 then use it to extract the arguments. The destination PC
7380 is the third argument to the probe. */
7381 arg_value = probe_safe_evaluate_at_pc (frame, 2);
7382 if (arg_value)
8fa0c4f8
AA
7383 {
7384 jmp_buf_pc = value_as_address (arg_value);
7385 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
7386 }
cdaa5b73
PA
7387 else if (!gdbarch_get_longjmp_target_p (gdbarch)
7388 || !gdbarch_get_longjmp_target (gdbarch,
7389 frame, &jmp_buf_pc))
e2e4d78b 7390 {
1eb8556f
SM
7391 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
7392 "(!gdbarch_get_longjmp_target)");
cdaa5b73
PA
7393 keep_going (ecs);
7394 return;
e2e4d78b 7395 }
e2e4d78b 7396
cdaa5b73
PA
7397 /* Insert a breakpoint at resume address. */
7398 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
7399 }
7400 else
7401 check_exception_resume (ecs, frame);
7402 keep_going (ecs);
7403 return;
e81a37f7 7404
cdaa5b73
PA
7405 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
7406 {
bd2b40ac 7407 frame_info_ptr init_frame;
e81a37f7 7408
cdaa5b73 7409 /* There are several cases to consider.
c906108c 7410
cdaa5b73
PA
7411 1. The initiating frame no longer exists. In this case we
7412 must stop, because the exception or longjmp has gone too
7413 far.
2c03e5be 7414
cdaa5b73
PA
7415 2. The initiating frame exists, and is the same as the
7416 current frame. We stop, because the exception or longjmp
7417 has been caught.
2c03e5be 7418
cdaa5b73
PA
7419 3. The initiating frame exists and is different from the
7420 current frame. This means the exception or longjmp has
7421 been caught beneath the initiating frame, so keep going.
c906108c 7422
cdaa5b73
PA
7423 4. longjmp breakpoint has been placed just to protect
7424 against stale dummy frames and user is not interested in
7425 stopping around longjmps. */
c5aa993b 7426
1eb8556f 7427 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
c5aa993b 7428
cdaa5b73 7429 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
03acd4d8 7430 != nullptr);
cdaa5b73 7431 delete_exception_resume_breakpoint (ecs->event_thread);
c5aa993b 7432
cdaa5b73
PA
7433 if (what.is_longjmp)
7434 {
b67a2c6f 7435 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
c5aa993b 7436
cdaa5b73 7437 if (!frame_id_p (ecs->event_thread->initiating_frame))
e5ef252a 7438 {
cdaa5b73
PA
7439 /* Case 4. */
7440 keep_going (ecs);
7441 return;
e5ef252a 7442 }
cdaa5b73 7443 }
c5aa993b 7444
cdaa5b73 7445 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
527159b7 7446
cdaa5b73
PA
7447 if (init_frame)
7448 {
fb84fbf8 7449 if (curr_frame_id == ecs->event_thread->initiating_frame)
cdaa5b73
PA
7450 {
7451 /* Case 2. Fall through. */
7452 }
7453 else
7454 {
7455 /* Case 3. */
7456 keep_going (ecs);
7457 return;
7458 }
68f53502 7459 }
488f131b 7460
cdaa5b73
PA
7461 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
7462 exists. */
7463 delete_step_resume_breakpoint (ecs->event_thread);
e5ef252a 7464
bdc36728 7465 end_stepping_range (ecs);
cdaa5b73
PA
7466 }
7467 return;
e5ef252a 7468
cdaa5b73 7469 case BPSTAT_WHAT_SINGLE:
1eb8556f 7470 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
cdaa5b73
PA
7471 ecs->event_thread->stepping_over_breakpoint = 1;
7472 /* Still need to check other stuff, at least the case where we
7473 are stepping and step out of the right range. */
7474 break;
e5ef252a 7475
cdaa5b73 7476 case BPSTAT_WHAT_STEP_RESUME:
1eb8556f 7477 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
b22548dd 7478
b986eec5
CL
7479 delete_step_resume_breakpoint (ecs->event_thread);
7480 if (ecs->event_thread->control.proceed_to_finish
7481 && execution_direction == EXEC_REVERSE)
cdaa5b73
PA
7482 {
7483 struct thread_info *tp = ecs->event_thread;
b22548dd 7484
b986eec5
CL
7485 /* We are finishing a function in reverse, and just hit the
7486 step-resume breakpoint at the start address of the
7487 function, and we're almost there -- just need to back up
7488 by one more single-step, which should take us back to the
7489 function call. */
7490 tp->control.step_range_start = tp->control.step_range_end = 1;
7491 keep_going (ecs);
7492 return;
7493 }
7494 fill_in_stop_func (gdbarch, ecs);
7495 if (ecs->event_thread->stop_pc () == ecs->stop_func_start
7496 && execution_direction == EXEC_REVERSE)
7497 {
7498 /* We are stepping over a function call in reverse, and just
7499 hit the step-resume breakpoint at the start address of
7500 the function. Go back to single-stepping, which should
7501 take us back to the function call. */
7502 ecs->event_thread->stepping_over_breakpoint = 1;
cdaa5b73
PA
7503 keep_going (ecs);
7504 return;
7505 }
7506 break;
e5ef252a 7507
cdaa5b73 7508 case BPSTAT_WHAT_STOP_NOISY:
1eb8556f 7509 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
c4464ade 7510 stop_print_frame = true;
e5ef252a 7511
33bf4c5c 7512 /* Assume the thread stopped for a breakpoint. We'll still check
99619bea
PA
7513 whether a/the breakpoint is there when the thread is next
7514 resumed. */
7515 ecs->event_thread->stepping_over_breakpoint = 1;
e5ef252a 7516
22bcd14b 7517 stop_waiting (ecs);
cdaa5b73 7518 return;
e5ef252a 7519
cdaa5b73 7520 case BPSTAT_WHAT_STOP_SILENT:
1eb8556f 7521 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
c4464ade 7522 stop_print_frame = false;
e5ef252a 7523
33bf4c5c 7524 /* Assume the thread stopped for a breakpoint. We'll still check
99619bea
PA
7525 whether a/the breakpoint is there when the thread is next
7526 resumed. */
7527 ecs->event_thread->stepping_over_breakpoint = 1;
22bcd14b 7528 stop_waiting (ecs);
cdaa5b73
PA
7529 return;
7530
7531 case BPSTAT_WHAT_HP_STEP_RESUME:
1eb8556f 7532 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
cdaa5b73
PA
7533
7534 delete_step_resume_breakpoint (ecs->event_thread);
7535 if (ecs->event_thread->step_after_step_resume_breakpoint)
7536 {
7537 /* Back when the step-resume breakpoint was inserted, we
7538 were trying to single-step off a breakpoint. Go back to
7539 doing that. */
7540 ecs->event_thread->step_after_step_resume_breakpoint = 0;
7541 ecs->event_thread->stepping_over_breakpoint = 1;
7542 keep_going (ecs);
7543 return;
e5ef252a 7544 }
cdaa5b73
PA
7545 break;
7546
7547 case BPSTAT_WHAT_KEEP_CHECKING:
7548 break;
e5ef252a 7549 }
c906108c 7550
af48d08f
PA
7551 /* If we stepped a permanent breakpoint and we had a high priority
7552 step-resume breakpoint for the address we stepped, but we didn't
7553 hit it, then we must have stepped into the signal handler. The
7554 step-resume was only necessary to catch the case of _not_
7555 stepping into the handler, so delete it, and fall through to
7556 checking whether the step finished. */
7557 if (ecs->event_thread->stepped_breakpoint)
7558 {
7559 struct breakpoint *sr_bp
7560 = ecs->event_thread->control.step_resume_breakpoint;
7561
03acd4d8 7562 if (sr_bp != nullptr
f5951b9f 7563 && sr_bp->first_loc ().permanent
af48d08f 7564 && sr_bp->type == bp_hp_step_resume
f5951b9f 7565 && sr_bp->first_loc ().address == ecs->event_thread->prev_pc)
af48d08f 7566 {
1eb8556f 7567 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
af48d08f
PA
7568 delete_step_resume_breakpoint (ecs->event_thread);
7569 ecs->event_thread->step_after_step_resume_breakpoint = 0;
7570 }
7571 }
7572
cdaa5b73
PA
7573 /* We come here if we hit a breakpoint but should not stop for it.
7574 Possibly we also were stepping and should stop for that. So fall
7575 through and test for stepping. But, if not stepping, do not
7576 stop. */
c906108c 7577
a7212384
UW
7578 /* In all-stop mode, if we're currently stepping but have stopped in
7579 some other thread, we need to switch back to the stepped thread. */
c447ac0b
PA
7580 if (switch_back_to_stepped_thread (ecs))
7581 return;
776f04fa 7582
8358c15c 7583 if (ecs->event_thread->control.step_resume_breakpoint)
488f131b 7584 {
1eb8556f 7585 infrun_debug_printf ("step-resume breakpoint is inserted");
527159b7 7586
488f131b 7587 /* Having a step-resume breakpoint overrides anything
dda83cd7
SM
7588 else having to do with stepping commands until
7589 that breakpoint is reached. */
488f131b
JB
7590 keep_going (ecs);
7591 return;
7592 }
c5aa993b 7593
16c381f0 7594 if (ecs->event_thread->control.step_range_end == 0)
488f131b 7595 {
1eb8556f 7596 infrun_debug_printf ("no stepping, continue");
488f131b 7597 /* Likewise if we aren't even stepping. */
488f131b
JB
7598 keep_going (ecs);
7599 return;
7600 }
c5aa993b 7601
4b7703ad
JB
7602 /* Re-fetch current thread's frame in case the code above caused
7603 the frame cache to be re-initialized, making our FRAME variable
7604 a dangling pointer. */
7605 frame = get_current_frame ();
628fe4e4 7606 gdbarch = get_frame_arch (frame);
7e324e48 7607 fill_in_stop_func (gdbarch, ecs);
4b7703ad 7608
488f131b 7609 /* If stepping through a line, keep going if still within it.
c906108c 7610
488f131b
JB
7611 Note that step_range_end is the address of the first instruction
7612 beyond the step range, and NOT the address of the last instruction
31410e84
MS
7613 within it!
7614
7615 Note also that during reverse execution, we may be stepping
7616 through a function epilogue and therefore must detect when
7617 the current-frame changes in the middle of a line. */
7618
1edb66d8 7619 if (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
f2ffa92b 7620 ecs->event_thread)
31410e84 7621 && (execution_direction != EXEC_REVERSE
fb84fbf8 7622 || curr_frame_id == original_frame_id))
488f131b 7623 {
1eb8556f
SM
7624 infrun_debug_printf
7625 ("stepping inside range [%s-%s]",
7626 paddress (gdbarch, ecs->event_thread->control.step_range_start),
7627 paddress (gdbarch, ecs->event_thread->control.step_range_end));
b2175913 7628
c1e36e3e
PA
7629 /* Tentatively re-enable range stepping; `resume' disables it if
7630 necessary (e.g., if we're stepping over a breakpoint or we
7631 have software watchpoints). */
7632 ecs->event_thread->control.may_range_step = 1;
7633
b2175913
MS
7634 /* When stepping backward, stop at beginning of line range
7635 (unless it's the function entry point, in which case
7636 keep going back to the call point). */
1edb66d8 7637 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
16c381f0 7638 if (stop_pc == ecs->event_thread->control.step_range_start
15d2b36c 7639 && stop_pc != ecs->stop_func_start
b2175913 7640 && execution_direction == EXEC_REVERSE)
bdc36728 7641 end_stepping_range (ecs);
b2175913
MS
7642 else
7643 keep_going (ecs);
7644
488f131b
JB
7645 return;
7646 }
c5aa993b 7647
488f131b 7648 /* We stepped out of the stepping range. */
c906108c 7649
488f131b 7650 /* If we are stepping at the source level and entered the runtime
388a8562
MS
7651 loader dynamic symbol resolution code...
7652
7653 EXEC_FORWARD: we keep on single stepping until we exit the run
7654 time loader code and reach the callee's address.
7655
7656 EXEC_REVERSE: we've already executed the callee (backward), and
7657 the runtime loader code is handled just like any other
7658 undebuggable function call. Now we need only keep stepping
7659 backward through the trampoline code, and that's handled further
7660 down, so there is nothing for us to do here. */
7661
7662 if (execution_direction != EXEC_REVERSE
16c381f0 7663 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
be6276e0 7664 && in_solib_dynsym_resolve_code (ecs->event_thread->stop_pc ())
bafcc335
LS
7665 && (ecs->event_thread->control.step_start_function == nullptr
7666 || !in_solib_dynsym_resolve_code (
7667 ecs->event_thread->control.step_start_function->value_block ()
7668 ->entry_pc ())))
488f131b 7669 {
4c8c40e6 7670 CORE_ADDR pc_after_resolver =
1edb66d8 7671 gdbarch_skip_solib_resolver (gdbarch, ecs->event_thread->stop_pc ());
c906108c 7672
1eb8556f 7673 infrun_debug_printf ("stepped into dynsym resolve code");
527159b7 7674
488f131b
JB
7675 if (pc_after_resolver)
7676 {
7677 /* Set up a step-resume breakpoint at the address
7678 indicated by SKIP_SOLIB_RESOLVER. */
51abb421 7679 symtab_and_line sr_sal;
488f131b 7680 sr_sal.pc = pc_after_resolver;
6c95b8df 7681 sr_sal.pspace = get_frame_program_space (frame);
488f131b 7682
a6d9a66e
UW
7683 insert_step_resume_breakpoint_at_sal (gdbarch,
7684 sr_sal, null_frame_id);
c5aa993b 7685 }
c906108c 7686
488f131b
JB
7687 keep_going (ecs);
7688 return;
7689 }
c906108c 7690
1d509aa6
MM
7691 /* Step through an indirect branch thunk. */
7692 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
f2ffa92b 7693 && gdbarch_in_indirect_branch_thunk (gdbarch,
1edb66d8 7694 ecs->event_thread->stop_pc ()))
1d509aa6 7695 {
1eb8556f 7696 infrun_debug_printf ("stepped into indirect branch thunk");
1d509aa6
MM
7697 keep_going (ecs);
7698 return;
7699 }
7700
16c381f0
JK
7701 if (ecs->event_thread->control.step_range_end != 1
7702 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7703 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
568d6575 7704 && get_frame_type (frame) == SIGTRAMP_FRAME)
488f131b 7705 {
1eb8556f 7706 infrun_debug_printf ("stepped into signal trampoline");
42edda50 7707 /* The inferior, while doing a "step" or "next", has ended up in
dda83cd7
SM
7708 a signal trampoline (either by a signal being delivered or by
7709 the signal handler returning). Just single-step until the
7710 inferior leaves the trampoline (either by calling the handler
7711 or returning). */
488f131b
JB
7712 keep_going (ecs);
7713 return;
7714 }
c906108c 7715
14132e89
MR
7716 /* If we're in the return path from a shared library trampoline,
7717 we want to proceed through the trampoline when stepping. */
7718 /* macro/2012-04-25: This needs to come before the subroutine
7719 call check below as on some targets return trampolines look
7720 like subroutine calls (MIPS16 return thunks). */
7721 if (gdbarch_in_solib_return_trampoline (gdbarch,
1edb66d8 7722 ecs->event_thread->stop_pc (),
f2ffa92b 7723 ecs->stop_func_name)
14132e89
MR
7724 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
7725 {
7726 /* Determine where this trampoline returns. */
1edb66d8 7727 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
f2ffa92b
PA
7728 CORE_ADDR real_stop_pc
7729 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
14132e89 7730
1eb8556f 7731 infrun_debug_printf ("stepped into solib return tramp");
14132e89
MR
7732
7733 /* Only proceed through if we know where it's going. */
7734 if (real_stop_pc)
7735 {
7736 /* And put the step-breakpoint there and go until there. */
51abb421 7737 symtab_and_line sr_sal;
14132e89
MR
7738 sr_sal.pc = real_stop_pc;
7739 sr_sal.section = find_pc_overlay (sr_sal.pc);
7740 sr_sal.pspace = get_frame_program_space (frame);
7741
7742 /* Do not specify what the fp should be when we stop since
7743 on some machines the prologue is where the new fp value
7744 is established. */
7745 insert_step_resume_breakpoint_at_sal (gdbarch,
7746 sr_sal, null_frame_id);
7747
7748 /* Restart without fiddling with the step ranges or
7749 other state. */
7750 keep_going (ecs);
7751 return;
7752 }
7753 }
7754
c17eaafe
DJ
7755 /* Check for subroutine calls. The check for the current frame
7756 equalling the step ID is not necessary - the check of the
7757 previous frame's ID is sufficient - but it is a common case and
7758 cheaper than checking the previous frame's ID.
14e60db5 7759
a0cbd650 7760 NOTE: frame_id::operator== will never report two invalid frame IDs as
14e60db5
DJ
7761 being equal, so to get into this block, both the current and
7762 previous frame must have valid frame IDs. */
005ca36a
JB
7763 /* The outer_frame_id check is a heuristic to detect stepping
7764 through startup code. If we step over an instruction which
7765 sets the stack pointer from an invalid value to a valid value,
7766 we may detect that as a subroutine call from the mythical
7767 "outermost" function. This could be fixed by marking
7768 outermost frames as !stack_p,code_p,special_p. Then the
7769 initial outermost frame, before sp was valid, would
a0cbd650 7770 have code_addr == &_start. See the comment in frame_id::operator==
005ca36a 7771 for more. */
1bd70cb9
CL
7772
7773 /* We want "nexti" to step into, not over, signal handlers invoked
7774 by the kernel, therefore this subroutine check should not trigger
7775 for a signal handler invocation. On most platforms, this is already
7776 not the case, as the kernel puts a signal trampoline frame onto the
7777 stack to handle proper return after the handler, and therefore at this
7778 point, the current frame is a grandchild of the step frame, not a
7779 child. However, on some platforms, the kernel actually uses a
7780 trampoline to handle *invocation* of the handler. In that case,
7781 when executing the first instruction of the trampoline, this check
7782 would erroneously detect the trampoline invocation as a subroutine
7783 call. Fix this by checking for SIGTRAMP_FRAME. */
a0cbd650
TT
7784 if ((get_stack_frame_id (frame)
7785 != ecs->event_thread->control.step_stack_frame_id)
1bd70cb9 7786 && get_frame_type (frame) != SIGTRAMP_FRAME
a0cbd650
TT
7787 && ((frame_unwind_caller_id (get_current_frame ())
7788 == ecs->event_thread->control.step_stack_frame_id)
7789 && ((ecs->event_thread->control.step_stack_frame_id
7790 != outer_frame_id)
885eeb5b 7791 || (ecs->event_thread->control.step_start_function
1edb66d8 7792 != find_pc_function (ecs->event_thread->stop_pc ())))))
488f131b 7793 {
1edb66d8 7794 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
95918acb 7795 CORE_ADDR real_stop_pc;
8fb3e588 7796
1eb8556f 7797 infrun_debug_printf ("stepped into subroutine");
527159b7 7798
b7a084be 7799 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
95918acb
AC
7800 {
7801 /* I presume that step_over_calls is only 0 when we're
7802 supposed to be stepping at the assembly language level
7803 ("stepi"). Just stop. */
388a8562 7804 /* And this works the same backward as frontward. MVS */
bdc36728 7805 end_stepping_range (ecs);
95918acb
AC
7806 return;
7807 }
8fb3e588 7808
388a8562
MS
7809 /* Reverse stepping through solib trampolines. */
7810
7811 if (execution_direction == EXEC_REVERSE
16c381f0 7812 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
388a8562
MS
7813 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7814 || (ecs->stop_func_start == 0
7815 && in_solib_dynsym_resolve_code (stop_pc))))
7816 {
7817 /* Any solib trampoline code can be handled in reverse
7818 by simply continuing to single-step. We have already
7819 executed the solib function (backwards), and a few
7820 steps will take us back through the trampoline to the
7821 caller. */
7822 keep_going (ecs);
7823 return;
7824 }
7825
16c381f0 7826 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
8567c30f 7827 {
b2175913
MS
7828 /* We're doing a "next".
7829
7830 Normal (forward) execution: set a breakpoint at the
7831 callee's return address (the address at which the caller
7832 will resume).
7833
7834 Reverse (backward) execution. set the step-resume
7835 breakpoint at the start of the function that we just
7836 stepped into (backwards), and continue to there. When we
6130d0b7 7837 get there, we'll need to single-step back to the caller. */
b2175913
MS
7838
7839 if (execution_direction == EXEC_REVERSE)
7840 {
acf9414f
JK
7841 /* If we're already at the start of the function, we've either
7842 just stepped backward into a single instruction function,
7843 or stepped back out of a signal handler to the first instruction
7844 of the function. Just keep going, which will single-step back
7845 to the caller. */
58c48e72 7846 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
acf9414f 7847 {
acf9414f 7848 /* Normal function call return (static or dynamic). */
51abb421 7849 symtab_and_line sr_sal;
acf9414f
JK
7850 sr_sal.pc = ecs->stop_func_start;
7851 sr_sal.pspace = get_frame_program_space (frame);
7852 insert_step_resume_breakpoint_at_sal (gdbarch,
1f3e37e0 7853 sr_sal, get_stack_frame_id (frame));
acf9414f 7854 }
b2175913
MS
7855 }
7856 else
568d6575 7857 insert_step_resume_breakpoint_at_caller (frame);
b2175913 7858
8567c30f
AC
7859 keep_going (ecs);
7860 return;
7861 }
a53c66de 7862
95918acb 7863 /* If we are in a function call trampoline (a stub between the
dda83cd7
SM
7864 calling routine and the real function), locate the real
7865 function. That's what tells us (a) whether we want to step
7866 into it at all, and (b) what prologue we want to run to the
7867 end of, if we do step into it. */
568d6575 7868 real_stop_pc = skip_language_trampoline (frame, stop_pc);
95918acb 7869 if (real_stop_pc == 0)
568d6575 7870 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
95918acb
AC
7871 if (real_stop_pc != 0)
7872 ecs->stop_func_start = real_stop_pc;
8fb3e588 7873
db5f024e 7874 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
1b2bfbb9 7875 {
51abb421 7876 symtab_and_line sr_sal;
1b2bfbb9 7877 sr_sal.pc = ecs->stop_func_start;
6c95b8df 7878 sr_sal.pspace = get_frame_program_space (frame);
1b2bfbb9 7879
a6d9a66e
UW
7880 insert_step_resume_breakpoint_at_sal (gdbarch,
7881 sr_sal, null_frame_id);
8fb3e588
AC
7882 keep_going (ecs);
7883 return;
1b2bfbb9
RC
7884 }
7885
95918acb 7886 /* If we have line number information for the function we are
1bfeeb0f
JL
7887 thinking of stepping into and the function isn't on the skip
7888 list, step into it.
95918acb 7889
dda83cd7
SM
7890 If there are several symtabs at that PC (e.g. with include
7891 files), just want to know whether *any* of them have line
7892 numbers. find_pc_line handles this. */
95918acb
AC
7893 {
7894 struct symtab_and_line tmp_sal;
8fb3e588 7895
95918acb 7896 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
2b914b52 7897 if (tmp_sal.line != 0
85817405 7898 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4a4c04f1
BE
7899 tmp_sal)
7900 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
95918acb 7901 {
b2175913 7902 if (execution_direction == EXEC_REVERSE)
568d6575 7903 handle_step_into_function_backward (gdbarch, ecs);
b2175913 7904 else
568d6575 7905 handle_step_into_function (gdbarch, ecs);
95918acb
AC
7906 return;
7907 }
7908 }
7909
7910 /* If we have no line number and the step-stop-if-no-debug is
dda83cd7
SM
7911 set, we stop the step so that the user has a chance to switch
7912 in assembly mode. */
16c381f0 7913 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
078130d0 7914 && step_stop_if_no_debug)
95918acb 7915 {
bdc36728 7916 end_stepping_range (ecs);
95918acb
AC
7917 return;
7918 }
7919
b2175913
MS
7920 if (execution_direction == EXEC_REVERSE)
7921 {
acf9414f
JK
7922 /* If we're already at the start of the function, we've either just
7923 stepped backward into a single instruction function without line
7924 number info, or stepped back out of a signal handler to the first
7925 instruction of the function without line number info. Just keep
7926 going, which will single-step back to the caller. */
7927 if (ecs->stop_func_start != stop_pc)
7928 {
7929 /* Set a breakpoint at callee's start address.
7930 From there we can step once and be back in the caller. */
51abb421 7931 symtab_and_line sr_sal;
acf9414f
JK
7932 sr_sal.pc = ecs->stop_func_start;
7933 sr_sal.pspace = get_frame_program_space (frame);
7934 insert_step_resume_breakpoint_at_sal (gdbarch,
7935 sr_sal, null_frame_id);
7936 }
b2175913
MS
7937 }
7938 else
7939 /* Set a breakpoint at callee's return address (the address
7940 at which the caller will resume). */
568d6575 7941 insert_step_resume_breakpoint_at_caller (frame);
b2175913 7942
95918acb 7943 keep_going (ecs);
488f131b 7944 return;
488f131b 7945 }
c906108c 7946
fdd654f3
MS
7947 /* Reverse stepping through solib trampolines. */
7948
7949 if (execution_direction == EXEC_REVERSE
16c381f0 7950 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
fdd654f3 7951 {
1edb66d8 7952 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
f2ffa92b 7953
fdd654f3
MS
7954 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7955 || (ecs->stop_func_start == 0
7956 && in_solib_dynsym_resolve_code (stop_pc)))
7957 {
7958 /* Any solib trampoline code can be handled in reverse
7959 by simply continuing to single-step. We have already
7960 executed the solib function (backwards), and a few
7961 steps will take us back through the trampoline to the
7962 caller. */
7963 keep_going (ecs);
7964 return;
7965 }
7966 else if (in_solib_dynsym_resolve_code (stop_pc))
7967 {
7968 /* Stepped backward into the solib dynsym resolver.
7969 Set a breakpoint at its start and continue, then
7970 one more step will take us out. */
51abb421 7971 symtab_and_line sr_sal;
fdd654f3 7972 sr_sal.pc = ecs->stop_func_start;
9d1807c3 7973 sr_sal.pspace = get_frame_program_space (frame);
fdd654f3
MS
7974 insert_step_resume_breakpoint_at_sal (gdbarch,
7975 sr_sal, null_frame_id);
7976 keep_going (ecs);
7977 return;
7978 }
7979 }
7980
8c95582d
AB
7981 /* This always returns the sal for the inner-most frame when we are in a
7982 stack of inlined frames, even if GDB actually believes that it is in a
7983 more outer frame. This is checked for below by calls to
7984 inline_skipped_frames. */
1edb66d8 7985 stop_pc_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
7ed0fe66 7986
1b2bfbb9
RC
7987 /* NOTE: tausq/2004-05-24: This if block used to be done before all
7988 the trampoline processing logic, however, there are some trampolines
7989 that have no names, so we should do trampoline handling first. */
16c381f0 7990 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
03acd4d8 7991 && ecs->stop_func_name == nullptr
2afb61aa 7992 && stop_pc_sal.line == 0)
1b2bfbb9 7993 {
1eb8556f 7994 infrun_debug_printf ("stepped into undebuggable function");
527159b7 7995
1b2bfbb9 7996 /* The inferior just stepped into, or returned to, an
dda83cd7
SM
7997 undebuggable function (where there is no debugging information
7998 and no line number corresponding to the address where the
7999 inferior stopped). Since we want to skip this kind of code,
8000 we keep going until the inferior returns from this
8001 function - unless the user has asked us not to (via
8002 set step-mode) or we no longer know how to get back
8003 to the call site. */
14e60db5 8004 if (step_stop_if_no_debug
c7ce8faa 8005 || !frame_id_p (frame_unwind_caller_id (frame)))
1b2bfbb9
RC
8006 {
8007 /* If we have no line number and the step-stop-if-no-debug
8008 is set, we stop the step so that the user has a chance to
8009 switch in assembly mode. */
bdc36728 8010 end_stepping_range (ecs);
1b2bfbb9
RC
8011 return;
8012 }
8013 else
8014 {
8015 /* Set a breakpoint at callee's return address (the address
8016 at which the caller will resume). */
568d6575 8017 insert_step_resume_breakpoint_at_caller (frame);
1b2bfbb9
RC
8018 keep_going (ecs);
8019 return;
8020 }
8021 }
8022
2a8339b7
CL
8023 if (execution_direction == EXEC_REVERSE
8024 && ecs->event_thread->control.proceed_to_finish
8025 && ecs->event_thread->stop_pc () >= ecs->stop_func_alt_start
8026 && ecs->event_thread->stop_pc () < ecs->stop_func_start)
8027 {
8028 /* We are executing the reverse-finish command.
8029 If the system supports multiple entry points and we are finishing a
3bfdcabb 8030 function in reverse. If we are between the entry points single-step
2a8339b7
CL
8031 back to the alternate entry point. If we are at the alternate entry
8032 point -- just need to back up by one more single-step, which
8033 should take us back to the function call. */
8034 ecs->event_thread->control.step_range_start
8035 = ecs->event_thread->control.step_range_end = 1;
8036 keep_going (ecs);
8037 return;
8038
8039 }
8040
16c381f0 8041 if (ecs->event_thread->control.step_range_end == 1)
1b2bfbb9
RC
8042 {
8043 /* It is stepi or nexti. We always want to stop stepping after
dda83cd7 8044 one instruction. */
1eb8556f 8045 infrun_debug_printf ("stepi/nexti");
bdc36728 8046 end_stepping_range (ecs);
1b2bfbb9
RC
8047 return;
8048 }
8049
2afb61aa 8050 if (stop_pc_sal.line == 0)
488f131b
JB
8051 {
8052 /* We have no line number information. That means to stop
dda83cd7
SM
8053 stepping (does this always happen right after one instruction,
8054 when we do "s" in a function with no line numbers,
8055 or can this happen as a result of a return or longjmp?). */
1eb8556f 8056 infrun_debug_printf ("line number info");
bdc36728 8057 end_stepping_range (ecs);
488f131b
JB
8058 return;
8059 }
c906108c 8060
edb3359d
DJ
8061 /* Look for "calls" to inlined functions, part one. If the inline
8062 frame machinery detected some skipped call sites, we have entered
8063 a new inline function. */
8064
fb84fbf8 8065 if ((curr_frame_id == original_frame_id)
00431a78 8066 && inline_skipped_frames (ecs->event_thread))
edb3359d 8067 {
1eb8556f 8068 infrun_debug_printf ("stepped into inlined function");
edb3359d 8069
51abb421 8070 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
edb3359d 8071
16c381f0 8072 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
edb3359d
DJ
8073 {
8074 /* For "step", we're going to stop. But if the call site
8075 for this inlined function is on the same source line as
8076 we were previously stepping, go down into the function
8077 first. Otherwise stop at the call site. */
8078
8079 if (call_sal.line == ecs->event_thread->current_line
8080 && call_sal.symtab == ecs->event_thread->current_symtab)
4a4c04f1
BE
8081 {
8082 step_into_inline_frame (ecs->event_thread);
8083 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
8084 {
8085 keep_going (ecs);
8086 return;
8087 }
8088 }
edb3359d 8089
bdc36728 8090 end_stepping_range (ecs);
edb3359d
DJ
8091 return;
8092 }
8093 else
8094 {
8095 /* For "next", we should stop at the call site if it is on a
8096 different source line. Otherwise continue through the
8097 inlined function. */
8098 if (call_sal.line == ecs->event_thread->current_line
8099 && call_sal.symtab == ecs->event_thread->current_symtab)
8100 keep_going (ecs);
8101 else
bdc36728 8102 end_stepping_range (ecs);
edb3359d
DJ
8103 return;
8104 }
8105 }
8106
8107 /* Look for "calls" to inlined functions, part two. If we are still
8108 in the same real function we were stepping through, but we have
8109 to go further up to find the exact frame ID, we are stepping
8110 through a more inlined call beyond its call site. */
8111
8112 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
fb84fbf8
GL
8113 && (curr_frame_id != original_frame_id)
8114 && stepped_in_from (get_current_frame (), original_frame_id))
edb3359d 8115 {
1eb8556f 8116 infrun_debug_printf ("stepping through inlined function");
edb3359d 8117
4a4c04f1
BE
8118 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
8119 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
edb3359d
DJ
8120 keep_going (ecs);
8121 else
bdc36728 8122 end_stepping_range (ecs);
edb3359d
DJ
8123 return;
8124 }
8125
8c95582d 8126 bool refresh_step_info = true;
1edb66d8 8127 if ((ecs->event_thread->stop_pc () == stop_pc_sal.pc)
4e1c45ea 8128 && (ecs->event_thread->current_line != stop_pc_sal.line
24b21115 8129 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
488f131b 8130 {
ebde6f2d
TV
8131 /* We are at a different line. */
8132
8c95582d
AB
8133 if (stop_pc_sal.is_stmt)
8134 {
fe6356de
CL
8135 if (execution_direction == EXEC_REVERSE)
8136 {
8137 /* We are stepping backwards make sure we have reached the
8138 beginning of the line. */
8139 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
8140 CORE_ADDR start_line_pc
8141 = update_line_range_start (stop_pc, ecs);
8142
8143 if (stop_pc != start_line_pc)
8144 {
8145 /* Have not reached the beginning of the source code line.
8146 Set a step range. Execution should stop in any function
8147 calls we execute back into before reaching the beginning
8148 of the line. */
8149 ecs->event_thread->control.step_range_start
8150 = start_line_pc;
8151 ecs->event_thread->control.step_range_end = stop_pc;
8152 set_step_info (ecs->event_thread, frame, stop_pc_sal);
8153 keep_going (ecs);
8154 return;
8155 }
8156 }
8157
ebde6f2d
TV
8158 /* We are at the start of a statement.
8159
8160 So stop. Note that we don't stop if we step into the middle of a
8161 statement. That is said to make things like for (;;) statements
8162 work better. */
1eb8556f 8163 infrun_debug_printf ("stepped to a different line");
8c95582d
AB
8164 end_stepping_range (ecs);
8165 return;
8166 }
fb84fbf8 8167 else if (curr_frame_id == original_frame_id)
8c95582d 8168 {
ebde6f2d
TV
8169 /* We are not at the start of a statement, and we have not changed
8170 frame.
8171
8172 We ignore this line table entry, and continue stepping forward,
8c95582d
AB
8173 looking for a better place to stop. */
8174 refresh_step_info = false;
1eb8556f
SM
8175 infrun_debug_printf ("stepped to a different line, but "
8176 "it's not the start of a statement");
8c95582d 8177 }
ebde6f2d
TV
8178 else
8179 {
8180 /* We are not the start of a statement, and we have changed frame.
8181
8182 We ignore this line table entry, and continue stepping forward,
8183 looking for a better place to stop. Keep refresh_step_info at
8184 true to note that the frame has changed, but ignore the line
8185 number to make sure we don't ignore a subsequent entry with the
8186 same line number. */
8187 stop_pc_sal.line = 0;
8188 infrun_debug_printf ("stepped to a different frame, but "
8189 "it's not the start of a statement");
8190 }
488f131b 8191 }
bf2813af
GL
8192 else if (execution_direction == EXEC_REVERSE
8193 && curr_frame_id != original_frame_id
8194 && original_frame_id.code_addr_p && curr_frame_id.code_addr_p
8195 && original_frame_id.code_addr == curr_frame_id.code_addr)
8196 {
8197 /* If we enter here, we're leaving a recursive function call. In this
8198 situation, we shouldn't refresh the step information, because if we
8199 do, we'll lose the frame_id of when we started stepping, and this
8200 will make GDB not know we need to print frame information. */
8201 refresh_step_info = false;
8202 infrun_debug_printf ("reverse stepping, left a recursive call, don't "
8203 "update step info so we remember we left a frame");
8204 }
c906108c 8205
488f131b 8206 /* We aren't done stepping.
c906108c 8207
488f131b
JB
8208 Optimize by setting the stepping range to the line.
8209 (We might not be in the original line, but if we entered a
8210 new line in mid-statement, we continue stepping. This makes
8c95582d
AB
8211 things like for(;;) statements work better.)
8212
8213 If we entered a SAL that indicates a non-statement line table entry,
8214 then we update the stepping range, but we don't update the step info,
8215 which includes things like the line number we are stepping away from.
8216 This means we will stop when we find a line table entry that is marked
8217 as is-statement, even if it matches the non-statement one we just
8218 stepped into. */
c906108c 8219
16c381f0
JK
8220 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
8221 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
c1e36e3e 8222 ecs->event_thread->control.may_range_step = 1;
c8353d68
AB
8223 infrun_debug_printf
8224 ("updated step range, start = %s, end = %s, may_range_step = %d",
8225 paddress (gdbarch, ecs->event_thread->control.step_range_start),
8226 paddress (gdbarch, ecs->event_thread->control.step_range_end),
8227 ecs->event_thread->control.may_range_step);
8c95582d
AB
8228 if (refresh_step_info)
8229 set_step_info (ecs->event_thread, frame, stop_pc_sal);
488f131b 8230
1eb8556f 8231 infrun_debug_printf ("keep going");
fe6356de
CL
8232
8233 if (execution_direction == EXEC_REVERSE)
8234 {
8235 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
8236
8237 /* Make sure the stop_pc is set to the beginning of the line. */
8238 if (stop_pc != ecs->event_thread->control.step_range_start)
8239 ecs->event_thread->control.step_range_start
8240 = update_line_range_start (stop_pc, ecs);
8241 }
8242
488f131b 8243 keep_going (ecs);
104c1213
JM
8244}
8245
408f6686
PA
8246static bool restart_stepped_thread (process_stratum_target *resume_target,
8247 ptid_t resume_ptid);
8248
c447ac0b
PA
8249/* In all-stop mode, if we're currently stepping but have stopped in
8250 some other thread, we may need to switch back to the stepped
8251 thread. Returns true we set the inferior running, false if we left
8252 it stopped (and the event needs further processing). */
8253
c4464ade 8254static bool
c447ac0b
PA
8255switch_back_to_stepped_thread (struct execution_control_state *ecs)
8256{
fbea99ea 8257 if (!target_is_non_stop_p ())
c447ac0b 8258 {
99619bea
PA
8259 /* If any thread is blocked on some internal breakpoint, and we
8260 simply need to step over that breakpoint to get it going
8261 again, do that first. */
8262
8263 /* However, if we see an event for the stepping thread, then we
8264 know all other threads have been moved past their breakpoints
8265 already. Let the caller check whether the step is finished,
8266 etc., before deciding to move it past a breakpoint. */
8267 if (ecs->event_thread->control.step_range_end != 0)
c4464ade 8268 return false;
99619bea
PA
8269
8270 /* Check if the current thread is blocked on an incomplete
8271 step-over, interrupted by a random signal. */
8272 if (ecs->event_thread->control.trap_expected
1edb66d8 8273 && ecs->event_thread->stop_signal () != GDB_SIGNAL_TRAP)
c447ac0b 8274 {
1eb8556f
SM
8275 infrun_debug_printf
8276 ("need to finish step-over of [%s]",
0fab7955 8277 ecs->event_thread->ptid.to_string ().c_str ());
99619bea 8278 keep_going (ecs);
c4464ade 8279 return true;
99619bea 8280 }
2adfaa28 8281
99619bea
PA
8282 /* Check if the current thread is blocked by a single-step
8283 breakpoint of another thread. */
8284 if (ecs->hit_singlestep_breakpoint)
8285 {
1eb8556f 8286 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
0fab7955 8287 ecs->ptid.to_string ().c_str ());
99619bea 8288 keep_going (ecs);
c4464ade 8289 return true;
99619bea
PA
8290 }
8291
4d9d9d04
PA
8292 /* If this thread needs yet another step-over (e.g., stepping
8293 through a delay slot), do it first before moving on to
8294 another thread. */
8295 if (thread_still_needs_step_over (ecs->event_thread))
8296 {
1eb8556f
SM
8297 infrun_debug_printf
8298 ("thread [%s] still needs step-over",
0fab7955 8299 ecs->event_thread->ptid.to_string ().c_str ());
4d9d9d04 8300 keep_going (ecs);
c4464ade 8301 return true;
4d9d9d04 8302 }
70509625 8303
483805cf
PA
8304 /* If scheduler locking applies even if not stepping, there's no
8305 need to walk over threads. Above we've checked whether the
8306 current thread is stepping. If some other thread not the
8307 event thread is stepping, then it must be that scheduler
8308 locking is not in effect. */
856e7dd6 8309 if (schedlock_applies (ecs->event_thread))
c4464ade 8310 return false;
483805cf 8311
4d9d9d04
PA
8312 /* Otherwise, we no longer expect a trap in the current thread.
8313 Clear the trap_expected flag before switching back -- this is
8314 what keep_going does as well, if we call it. */
8315 ecs->event_thread->control.trap_expected = 0;
8316
8317 /* Likewise, clear the signal if it should not be passed. */
1edb66d8
SM
8318 if (!signal_program[ecs->event_thread->stop_signal ()])
8319 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
4d9d9d04 8320
408f6686 8321 if (restart_stepped_thread (ecs->target, ecs->ptid))
4d9d9d04
PA
8322 {
8323 prepare_to_wait (ecs);
c4464ade 8324 return true;
4d9d9d04
PA
8325 }
8326
408f6686
PA
8327 switch_to_thread (ecs->event_thread);
8328 }
4d9d9d04 8329
408f6686
PA
8330 return false;
8331}
f3f8ece4 8332
408f6686
PA
8333/* Look for the thread that was stepping, and resume it.
8334 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
8335 is resuming. Return true if a thread was started, false
8336 otherwise. */
483805cf 8337
408f6686
PA
8338static bool
8339restart_stepped_thread (process_stratum_target *resume_target,
8340 ptid_t resume_ptid)
8341{
8342 /* Do all pending step-overs before actually proceeding with
8343 step/next/etc. */
8344 if (start_step_over ())
8345 return true;
483805cf 8346
408f6686
PA
8347 for (thread_info *tp : all_threads_safe ())
8348 {
8349 if (tp->state == THREAD_EXITED)
8350 continue;
8351
1edb66d8 8352 if (tp->has_pending_waitstatus ())
408f6686 8353 continue;
483805cf 8354
408f6686
PA
8355 /* Ignore threads of processes the caller is not
8356 resuming. */
8357 if (!sched_multi
8358 && (tp->inf->process_target () != resume_target
8359 || tp->inf->pid != resume_ptid.pid ()))
8360 continue;
483805cf 8361
408f6686
PA
8362 if (tp->control.trap_expected)
8363 {
8364 infrun_debug_printf ("switching back to stepped thread (step-over)");
483805cf 8365
408f6686
PA
8366 if (keep_going_stepped_thread (tp))
8367 return true;
99619bea 8368 }
408f6686
PA
8369 }
8370
8371 for (thread_info *tp : all_threads_safe ())
8372 {
8373 if (tp->state == THREAD_EXITED)
8374 continue;
8375
1edb66d8 8376 if (tp->has_pending_waitstatus ())
408f6686 8377 continue;
99619bea 8378
408f6686
PA
8379 /* Ignore threads of processes the caller is not
8380 resuming. */
8381 if (!sched_multi
8382 && (tp->inf->process_target () != resume_target
8383 || tp->inf->pid != resume_ptid.pid ()))
8384 continue;
8385
8386 /* Did we find the stepping thread? */
8387 if (tp->control.step_range_end)
99619bea 8388 {
408f6686 8389 infrun_debug_printf ("switching back to stepped thread (stepping)");
c447ac0b 8390
408f6686
PA
8391 if (keep_going_stepped_thread (tp))
8392 return true;
2ac7589c
PA
8393 }
8394 }
2adfaa28 8395
c4464ade 8396 return false;
2ac7589c 8397}
2adfaa28 8398
408f6686
PA
8399/* See infrun.h. */
8400
8401void
8402restart_after_all_stop_detach (process_stratum_target *proc_target)
8403{
8404 /* Note we don't check target_is_non_stop_p() here, because the
8405 current inferior may no longer have a process_stratum target
8406 pushed, as we just detached. */
8407
8408 /* See if we have a THREAD_RUNNING thread that need to be
8409 re-resumed. If we have any thread that is already executing,
8410 then we don't need to resume the target -- it is already been
8411 resumed. With the remote target (in all-stop), it's even
8412 impossible to issue another resumption if the target is already
8413 resumed, until the target reports a stop. */
8414 for (thread_info *thr : all_threads (proc_target))
8415 {
8416 if (thr->state != THREAD_RUNNING)
8417 continue;
8418
8419 /* If we have any thread that is already executing, then we
8420 don't need to resume the target -- it is already been
8421 resumed. */
611841bb 8422 if (thr->executing ())
408f6686
PA
8423 return;
8424
8425 /* If we have a pending event to process, skip resuming the
8426 target and go straight to processing it. */
1edb66d8 8427 if (thr->resumed () && thr->has_pending_waitstatus ())
408f6686
PA
8428 return;
8429 }
8430
8431 /* Alright, we need to re-resume the target. If a thread was
8432 stepping, we need to restart it stepping. */
8433 if (restart_stepped_thread (proc_target, minus_one_ptid))
8434 return;
8435
8436 /* Otherwise, find the first THREAD_RUNNING thread and resume
8437 it. */
8438 for (thread_info *thr : all_threads (proc_target))
8439 {
8440 if (thr->state != THREAD_RUNNING)
8441 continue;
8442
aa563d16 8443 execution_control_state ecs (thr);
408f6686
PA
8444 switch_to_thread (thr);
8445 keep_going (&ecs);
8446 return;
8447 }
8448}
8449
2ac7589c
PA
8450/* Set a previously stepped thread back to stepping. Returns true on
8451 success, false if the resume is not possible (e.g., the thread
8452 vanished). */
8453
c4464ade 8454static bool
2ac7589c
PA
8455keep_going_stepped_thread (struct thread_info *tp)
8456{
bd2b40ac 8457 frame_info_ptr frame;
2adfaa28 8458
2ac7589c
PA
8459 /* If the stepping thread exited, then don't try to switch back and
8460 resume it, which could fail in several different ways depending
8461 on the target. Instead, just keep going.
2adfaa28 8462
2ac7589c
PA
8463 We can find a stepping dead thread in the thread list in two
8464 cases:
2adfaa28 8465
2ac7589c
PA
8466 - The target supports thread exit events, and when the target
8467 tries to delete the thread from the thread list, inferior_ptid
8468 pointed at the exiting thread. In such case, calling
8469 delete_thread does not really remove the thread from the list;
8470 instead, the thread is left listed, with 'exited' state.
64ce06e4 8471
2ac7589c
PA
8472 - The target's debug interface does not support thread exit
8473 events, and so we have no idea whatsoever if the previously
8474 stepping thread is still alive. For that reason, we need to
8475 synchronously query the target now. */
2adfaa28 8476
00431a78 8477 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
2ac7589c 8478 {
1eb8556f
SM
8479 infrun_debug_printf ("not resuming previously stepped thread, it has "
8480 "vanished");
2ac7589c 8481
00431a78 8482 delete_thread (tp);
c4464ade 8483 return false;
c447ac0b 8484 }
2ac7589c 8485
1eb8556f 8486 infrun_debug_printf ("resuming previously stepped thread");
2ac7589c 8487
aa563d16 8488 execution_control_state ecs (tp);
00431a78 8489 switch_to_thread (tp);
2ac7589c 8490
1edb66d8 8491 tp->set_stop_pc (regcache_read_pc (get_thread_regcache (tp)));
2ac7589c 8492 frame = get_current_frame ();
2ac7589c
PA
8493
8494 /* If the PC of the thread we were trying to single-step has
8495 changed, then that thread has trapped or been signaled, but the
8496 event has not been reported to GDB yet. Re-poll the target
8497 looking for this particular thread's event (i.e. temporarily
8498 enable schedlock) by:
8499
8500 - setting a break at the current PC
8501 - resuming that particular thread, only (by setting trap
8502 expected)
8503
8504 This prevents us continuously moving the single-step breakpoint
8505 forward, one instruction at a time, overstepping. */
8506
1edb66d8 8507 if (tp->stop_pc () != tp->prev_pc)
2ac7589c
PA
8508 {
8509 ptid_t resume_ptid;
8510
1eb8556f 8511 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
99d9c3b9
SM
8512 paddress (current_inferior ()->arch (), tp->prev_pc),
8513 paddress (current_inferior ()->arch (),
8514 tp->stop_pc ()));
2ac7589c
PA
8515
8516 /* Clear the info of the previous step-over, as it's no longer
8517 valid (if the thread was trying to step over a breakpoint, it
8518 has already succeeded). It's what keep_going would do too,
8519 if we called it. Do this before trying to insert the sss
8520 breakpoint, otherwise if we were previously trying to step
8521 over this exact address in another thread, the breakpoint is
8522 skipped. */
8523 clear_step_over_info ();
8524 tp->control.trap_expected = 0;
8525
8526 insert_single_step_breakpoint (get_frame_arch (frame),
8527 get_frame_address_space (frame),
1edb66d8 8528 tp->stop_pc ());
2ac7589c 8529
7846f3aa 8530 tp->set_resumed (true);
fbea99ea 8531 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
c4464ade 8532 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
2ac7589c
PA
8533 }
8534 else
8535 {
1eb8556f 8536 infrun_debug_printf ("expected thread still hasn't advanced");
2ac7589c 8537
aa563d16 8538 keep_going_pass_signal (&ecs);
2ac7589c 8539 }
c4464ade
SM
8540
8541 return true;
c447ac0b
PA
8542}
8543
8b061563
PA
8544/* Is thread TP in the middle of (software or hardware)
8545 single-stepping? (Note the result of this function must never be
8546 passed directly as target_resume's STEP parameter.) */
104c1213 8547
c4464ade 8548static bool
b3444185 8549currently_stepping (struct thread_info *tp)
a7212384 8550{
8358c15c 8551 return ((tp->control.step_range_end
03acd4d8 8552 && tp->control.step_resume_breakpoint == nullptr)
8358c15c 8553 || tp->control.trap_expected
af48d08f 8554 || tp->stepped_breakpoint
8358c15c 8555 || bpstat_should_step ());
a7212384
UW
8556}
8557
b2175913
MS
8558/* Inferior has stepped into a subroutine call with source code that
8559 we should not step over. Do step to the first line of code in
8560 it. */
c2c6d25f
JM
8561
8562static void
568d6575
UW
8563handle_step_into_function (struct gdbarch *gdbarch,
8564 struct execution_control_state *ecs)
c2c6d25f 8565{
7e324e48
GB
8566 fill_in_stop_func (gdbarch, ecs);
8567
f2ffa92b 8568 compunit_symtab *cust
1edb66d8 8569 = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
03acd4d8 8570 if (cust != nullptr && cust->language () != language_asm)
46a62268
YQ
8571 ecs->stop_func_start
8572 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
c2c6d25f 8573
51abb421 8574 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
c2c6d25f
JM
8575 /* Use the step_resume_break to step until the end of the prologue,
8576 even if that involves jumps (as it seems to on the vax under
8577 4.2). */
8578 /* If the prologue ends in the middle of a source line, continue to
8579 the end of that source line (if it is still within the function).
8580 Otherwise, just go to end of prologue. */
2afb61aa
PA
8581 if (stop_func_sal.end
8582 && stop_func_sal.pc != ecs->stop_func_start
8583 && stop_func_sal.end < ecs->stop_func_end)
8584 ecs->stop_func_start = stop_func_sal.end;
c2c6d25f 8585
2dbd5e30
KB
8586 /* Architectures which require breakpoint adjustment might not be able
8587 to place a breakpoint at the computed address. If so, the test
8588 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
8589 ecs->stop_func_start to an address at which a breakpoint may be
8590 legitimately placed.
8fb3e588 8591
2dbd5e30
KB
8592 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
8593 made, GDB will enter an infinite loop when stepping through
8594 optimized code consisting of VLIW instructions which contain
8595 subinstructions corresponding to different source lines. On
8596 FR-V, it's not permitted to place a breakpoint on any but the
8597 first subinstruction of a VLIW instruction. When a breakpoint is
8598 set, GDB will adjust the breakpoint address to the beginning of
8599 the VLIW instruction. Thus, we need to make the corresponding
8600 adjustment here when computing the stop address. */
8fb3e588 8601
568d6575 8602 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
2dbd5e30
KB
8603 {
8604 ecs->stop_func_start
568d6575 8605 = gdbarch_adjust_breakpoint_address (gdbarch,
8fb3e588 8606 ecs->stop_func_start);
2dbd5e30
KB
8607 }
8608
1edb66d8 8609 if (ecs->stop_func_start == ecs->event_thread->stop_pc ())
c2c6d25f
JM
8610 {
8611 /* We are already there: stop now. */
bdc36728 8612 end_stepping_range (ecs);
c2c6d25f
JM
8613 return;
8614 }
8615 else
8616 {
8617 /* Put the step-breakpoint there and go until there. */
51abb421 8618 symtab_and_line sr_sal;
c2c6d25f
JM
8619 sr_sal.pc = ecs->stop_func_start;
8620 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
6c95b8df 8621 sr_sal.pspace = get_frame_program_space (get_current_frame ());
44cbf7b5 8622
c2c6d25f 8623 /* Do not specify what the fp should be when we stop since on
dda83cd7
SM
8624 some machines the prologue is where the new fp value is
8625 established. */
a6d9a66e 8626 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
c2c6d25f
JM
8627
8628 /* And make sure stepping stops right away then. */
16c381f0 8629 ecs->event_thread->control.step_range_end
dda83cd7 8630 = ecs->event_thread->control.step_range_start;
c2c6d25f
JM
8631 }
8632 keep_going (ecs);
8633}
d4f3574e 8634
b2175913
MS
8635/* Inferior has stepped backward into a subroutine call with source
8636 code that we should not step over. Do step to the beginning of the
8637 last line of code in it. */
8638
8639static void
568d6575
UW
8640handle_step_into_function_backward (struct gdbarch *gdbarch,
8641 struct execution_control_state *ecs)
b2175913 8642{
43f3e411 8643 struct compunit_symtab *cust;
167e4384 8644 struct symtab_and_line stop_func_sal;
b2175913 8645
7e324e48
GB
8646 fill_in_stop_func (gdbarch, ecs);
8647
1edb66d8 8648 cust = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
03acd4d8 8649 if (cust != nullptr && cust->language () != language_asm)
46a62268
YQ
8650 ecs->stop_func_start
8651 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
b2175913 8652
1edb66d8 8653 stop_func_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
b2175913
MS
8654
8655 /* OK, we're just going to keep stepping here. */
1edb66d8 8656 if (stop_func_sal.pc == ecs->event_thread->stop_pc ())
b2175913
MS
8657 {
8658 /* We're there already. Just stop stepping now. */
bdc36728 8659 end_stepping_range (ecs);
b2175913
MS
8660 }
8661 else
8662 {
8663 /* Else just reset the step range and keep going.
8664 No step-resume breakpoint, they don't work for
8665 epilogues, which can have multiple entry paths. */
16c381f0
JK
8666 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
8667 ecs->event_thread->control.step_range_end = stop_func_sal.end;
b2175913
MS
8668 keep_going (ecs);
8669 }
8670 return;
8671}
8672
d3169d93 8673/* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
44cbf7b5
AC
8674 This is used to both functions and to skip over code. */
8675
8676static void
2c03e5be
PA
8677insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
8678 struct symtab_and_line sr_sal,
8679 struct frame_id sr_id,
8680 enum bptype sr_type)
44cbf7b5 8681{
611c83ae
PA
8682 /* There should never be more than one step-resume or longjmp-resume
8683 breakpoint per thread, so we should never be setting a new
44cbf7b5 8684 step_resume_breakpoint when one is already active. */
03acd4d8 8685 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == nullptr);
2c03e5be 8686 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
d3169d93 8687
1eb8556f
SM
8688 infrun_debug_printf ("inserting step-resume breakpoint at %s",
8689 paddress (gdbarch, sr_sal.pc));
d3169d93 8690
8358c15c 8691 inferior_thread ()->control.step_resume_breakpoint
454dafbd 8692 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
2c03e5be
PA
8693}
8694
9da8c2a0 8695void
2c03e5be
PA
8696insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
8697 struct symtab_and_line sr_sal,
8698 struct frame_id sr_id)
8699{
8700 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
8701 sr_sal, sr_id,
8702 bp_step_resume);
44cbf7b5 8703}
7ce450bd 8704
2c03e5be
PA
8705/* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
8706 This is used to skip a potential signal handler.
7ce450bd 8707
14e60db5
DJ
8708 This is called with the interrupted function's frame. The signal
8709 handler, when it returns, will resume the interrupted function at
8710 RETURN_FRAME.pc. */
d303a6c7
AC
8711
8712static void
bd2b40ac 8713insert_hp_step_resume_breakpoint_at_frame (frame_info_ptr return_frame)
d303a6c7 8714{
03acd4d8 8715 gdb_assert (return_frame != nullptr);
d303a6c7 8716
51abb421
PA
8717 struct gdbarch *gdbarch = get_frame_arch (return_frame);
8718
8719 symtab_and_line sr_sal;
568d6575 8720 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
d303a6c7 8721 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 8722 sr_sal.pspace = get_frame_program_space (return_frame);
d303a6c7 8723
2c03e5be
PA
8724 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
8725 get_stack_frame_id (return_frame),
8726 bp_hp_step_resume);
d303a6c7
AC
8727}
8728
2c03e5be
PA
8729/* Insert a "step-resume breakpoint" at the previous frame's PC. This
8730 is used to skip a function after stepping into it (for "next" or if
8731 the called function has no debugging information).
14e60db5
DJ
8732
8733 The current function has almost always been reached by single
8734 stepping a call or return instruction. NEXT_FRAME belongs to the
8735 current function, and the breakpoint will be set at the caller's
8736 resume address.
8737
8738 This is a separate function rather than reusing
2c03e5be 8739 insert_hp_step_resume_breakpoint_at_frame in order to avoid
14e60db5 8740 get_prev_frame, which may stop prematurely (see the implementation
c7ce8faa 8741 of frame_unwind_caller_id for an example). */
14e60db5
DJ
8742
8743static void
bd2b40ac 8744insert_step_resume_breakpoint_at_caller (frame_info_ptr next_frame)
14e60db5 8745{
14e60db5
DJ
8746 /* We shouldn't have gotten here if we don't know where the call site
8747 is. */
c7ce8faa 8748 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
14e60db5 8749
51abb421 8750 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
14e60db5 8751
51abb421 8752 symtab_and_line sr_sal;
c7ce8faa
DJ
8753 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
8754 frame_unwind_caller_pc (next_frame));
14e60db5 8755 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 8756 sr_sal.pspace = frame_unwind_program_space (next_frame);
14e60db5 8757
a6d9a66e 8758 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
c7ce8faa 8759 frame_unwind_caller_id (next_frame));
14e60db5
DJ
8760}
8761
611c83ae
PA
8762/* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
8763 new breakpoint at the target of a jmp_buf. The handling of
8764 longjmp-resume uses the same mechanisms used for handling
8765 "step-resume" breakpoints. */
8766
8767static void
a6d9a66e 8768insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
611c83ae 8769{
e81a37f7
TT
8770 /* There should never be more than one longjmp-resume breakpoint per
8771 thread, so we should never be setting a new
611c83ae 8772 longjmp_resume_breakpoint when one is already active. */
03acd4d8 8773 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == nullptr);
611c83ae 8774
1eb8556f
SM
8775 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
8776 paddress (gdbarch, pc));
611c83ae 8777
e81a37f7 8778 inferior_thread ()->control.exception_resume_breakpoint =
454dafbd 8779 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
611c83ae
PA
8780}
8781
186c406b
TT
8782/* Insert an exception resume breakpoint. TP is the thread throwing
8783 the exception. The block B is the block of the unwinder debug hook
8784 function. FRAME is the frame corresponding to the call to this
8785 function. SYM is the symbol of the function argument holding the
8786 target PC of the exception. */
8787
8788static void
8789insert_exception_resume_breakpoint (struct thread_info *tp,
3977b71f 8790 const struct block *b,
bd2b40ac 8791 frame_info_ptr frame,
186c406b
TT
8792 struct symbol *sym)
8793{
a70b8144 8794 try
186c406b 8795 {
63e43d3a 8796 struct block_symbol vsym;
186c406b
TT
8797 struct value *value;
8798 CORE_ADDR handler;
8799 struct breakpoint *bp;
8800
987012b8 8801 vsym = lookup_symbol_search_name (sym->search_name (),
de63c46b 8802 b, VAR_DOMAIN);
63e43d3a 8803 value = read_var_value (vsym.symbol, vsym.block, frame);
186c406b 8804 /* If the value was optimized out, revert to the old behavior. */
d00664db 8805 if (! value->optimized_out ())
186c406b
TT
8806 {
8807 handler = value_as_address (value);
8808
1eb8556f
SM
8809 infrun_debug_printf ("exception resume at %lx",
8810 (unsigned long) handler);
186c406b 8811
60a13bbc
AB
8812 /* set_momentary_breakpoint_at_pc creates a thread-specific
8813 breakpoint for the current inferior thread. */
8814 gdb_assert (tp == inferior_thread ());
186c406b 8815 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd
TT
8816 handler,
8817 bp_exception_resume).release ();
c70a6932
JK
8818
8819 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
03acd4d8 8820 frame = nullptr;
c70a6932 8821
60a13bbc 8822 tp->control.exception_resume_breakpoint = bp;
186c406b
TT
8823 }
8824 }
230d2906 8825 catch (const gdb_exception_error &e)
492d29ea
PA
8826 {
8827 /* We want to ignore errors here. */
8828 }
186c406b
TT
8829}
8830
28106bc2
SDJ
8831/* A helper for check_exception_resume that sets an
8832 exception-breakpoint based on a SystemTap probe. */
8833
8834static void
8835insert_exception_resume_from_probe (struct thread_info *tp,
729662a5 8836 const struct bound_probe *probe,
bd2b40ac 8837 frame_info_ptr frame)
28106bc2
SDJ
8838{
8839 struct value *arg_value;
8840 CORE_ADDR handler;
8841 struct breakpoint *bp;
8842
8843 arg_value = probe_safe_evaluate_at_pc (frame, 1);
8844 if (!arg_value)
8845 return;
8846
8847 handler = value_as_address (arg_value);
8848
1eb8556f
SM
8849 infrun_debug_printf ("exception resume at %s",
8850 paddress (probe->objfile->arch (), handler));
28106bc2 8851
60a13bbc
AB
8852 /* set_momentary_breakpoint_at_pc creates a thread-specific breakpoint
8853 for the current inferior thread. */
8854 gdb_assert (tp == inferior_thread ());
28106bc2 8855 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd 8856 handler, bp_exception_resume).release ();
60a13bbc 8857 tp->control.exception_resume_breakpoint = bp;
28106bc2
SDJ
8858}
8859
186c406b
TT
8860/* This is called when an exception has been intercepted. Check to
8861 see whether the exception's destination is of interest, and if so,
8862 set an exception resume breakpoint there. */
8863
8864static void
8865check_exception_resume (struct execution_control_state *ecs,
bd2b40ac 8866 frame_info_ptr frame)
186c406b 8867{
729662a5 8868 struct bound_probe probe;
28106bc2
SDJ
8869 struct symbol *func;
8870
8871 /* First see if this exception unwinding breakpoint was set via a
8872 SystemTap probe point. If so, the probe has two arguments: the
8873 CFA and the HANDLER. We ignore the CFA, extract the handler, and
8874 set a breakpoint there. */
6bac7473 8875 probe = find_probe_by_pc (get_frame_pc (frame));
935676c9 8876 if (probe.prob)
28106bc2 8877 {
729662a5 8878 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
28106bc2
SDJ
8879 return;
8880 }
8881
8882 func = get_frame_function (frame);
8883 if (!func)
8884 return;
186c406b 8885
a70b8144 8886 try
186c406b 8887 {
3977b71f 8888 const struct block *b;
186c406b
TT
8889 int argno = 0;
8890
8891 /* The exception breakpoint is a thread-specific breakpoint on
8892 the unwinder's debug hook, declared as:
8893
8894 void _Unwind_DebugHook (void *cfa, void *handler);
8895
8896 The CFA argument indicates the frame to which control is
8897 about to be transferred. HANDLER is the destination PC.
8898
8899 We ignore the CFA and set a temporary breakpoint at HANDLER.
8900 This is not extremely efficient but it avoids issues in gdb
8901 with computing the DWARF CFA, and it also works even in weird
8902 cases such as throwing an exception from inside a signal
8903 handler. */
8904
4aeddc50 8905 b = func->value_block ();
548a89df 8906 for (struct symbol *sym : block_iterator_range (b))
186c406b 8907 {
d9743061 8908 if (!sym->is_argument ())
186c406b
TT
8909 continue;
8910
8911 if (argno == 0)
8912 ++argno;
8913 else
8914 {
8915 insert_exception_resume_breakpoint (ecs->event_thread,
8916 b, frame, sym);
8917 break;
8918 }
8919 }
8920 }
230d2906 8921 catch (const gdb_exception_error &e)
492d29ea
PA
8922 {
8923 }
186c406b
TT
8924}
8925
104c1213 8926static void
22bcd14b 8927stop_waiting (struct execution_control_state *ecs)
104c1213 8928{
1eb8556f 8929 infrun_debug_printf ("stop_waiting");
527159b7 8930
cd0fc7c3
SS
8931 /* Let callers know we don't want to wait for the inferior anymore. */
8932 ecs->wait_some_more = 0;
8933}
8934
4d9d9d04
PA
8935/* Like keep_going, but passes the signal to the inferior, even if the
8936 signal is set to nopass. */
d4f3574e
SS
8937
8938static void
4d9d9d04 8939keep_going_pass_signal (struct execution_control_state *ecs)
d4f3574e 8940{
d7e15655 8941 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
7846f3aa 8942 gdb_assert (!ecs->event_thread->resumed ());
4d9d9d04 8943
d4f3574e 8944 /* Save the pc before execution, to compare with pc after stop. */
fb14de7b 8945 ecs->event_thread->prev_pc
fc75c28b 8946 = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
d4f3574e 8947
4d9d9d04 8948 if (ecs->event_thread->control.trap_expected)
d4f3574e 8949 {
4d9d9d04
PA
8950 struct thread_info *tp = ecs->event_thread;
8951
1eb8556f
SM
8952 infrun_debug_printf ("%s has trap_expected set, "
8953 "resuming to collect trap",
0fab7955 8954 tp->ptid.to_string ().c_str ());
4d9d9d04 8955
a9ba6bae
PA
8956 /* We haven't yet gotten our trap, and either: intercepted a
8957 non-signal event (e.g., a fork); or took a signal which we
8958 are supposed to pass through to the inferior. Simply
8959 continue. */
1edb66d8 8960 resume (ecs->event_thread->stop_signal ());
d4f3574e 8961 }
372316f1
PA
8962 else if (step_over_info_valid_p ())
8963 {
8964 /* Another thread is stepping over a breakpoint in-line. If
8965 this thread needs a step-over too, queue the request. In
8966 either case, this resume must be deferred for later. */
8967 struct thread_info *tp = ecs->event_thread;
8968
8969 if (ecs->hit_singlestep_breakpoint
8970 || thread_still_needs_step_over (tp))
8971 {
1eb8556f
SM
8972 infrun_debug_printf ("step-over already in progress: "
8973 "step-over for %s deferred",
0fab7955 8974 tp->ptid.to_string ().c_str ());
28d5518b 8975 global_thread_step_over_chain_enqueue (tp);
372316f1
PA
8976 }
8977 else
0fab7955
SM
8978 infrun_debug_printf ("step-over in progress: resume of %s deferred",
8979 tp->ptid.to_string ().c_str ());
372316f1 8980 }
d4f3574e
SS
8981 else
8982 {
9c742269 8983 regcache *regcache = get_thread_regcache (ecs->event_thread);
963f9c80
PA
8984 int remove_bp;
8985 int remove_wps;
8d297bbf 8986 step_over_what step_what;
31e77af2 8987
d4f3574e 8988 /* Either the trap was not expected, but we are continuing
a9ba6bae
PA
8989 anyway (if we got a signal, the user asked it be passed to
8990 the child)
8991 -- or --
8992 We got our expected trap, but decided we should resume from
8993 it.
d4f3574e 8994
a9ba6bae 8995 We're going to run this baby now!
d4f3574e 8996
c36b740a
VP
8997 Note that insert_breakpoints won't try to re-insert
8998 already inserted breakpoints. Therefore, we don't
8999 care if breakpoints were already inserted, or not. */
a9ba6bae 9000
31e77af2
PA
9001 /* If we need to step over a breakpoint, and we're not using
9002 displaced stepping to do so, insert all breakpoints
9003 (watchpoints, etc.) but the one we're stepping over, step one
9004 instruction, and then re-insert the breakpoint when that step
9005 is finished. */
963f9c80 9006
6c4cfb24
PA
9007 step_what = thread_still_needs_step_over (ecs->event_thread);
9008
963f9c80 9009 remove_bp = (ecs->hit_singlestep_breakpoint
6c4cfb24
PA
9010 || (step_what & STEP_OVER_BREAKPOINT));
9011 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
963f9c80 9012
cb71640d
PA
9013 /* We can't use displaced stepping if we need to step past a
9014 watchpoint. The instruction copied to the scratch pad would
9015 still trigger the watchpoint. */
9016 if (remove_bp
3fc8eb30 9017 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
45e8c884 9018 {
f9582a22 9019 set_step_over_info (ecs->event_thread->inf->aspace.get (),
21edc42f
YQ
9020 regcache_read_pc (regcache), remove_wps,
9021 ecs->event_thread->global_num);
45e8c884 9022 }
963f9c80 9023 else if (remove_wps)
03acd4d8 9024 set_step_over_info (nullptr, 0, remove_wps, -1);
372316f1
PA
9025
9026 /* If we now need to do an in-line step-over, we need to stop
9027 all other threads. Note this must be done before
9028 insert_breakpoints below, because that removes the breakpoint
9029 we're about to step over, otherwise other threads could miss
9030 it. */
fbea99ea 9031 if (step_over_info_valid_p () && target_is_non_stop_p ())
4f5539f0 9032 stop_all_threads ("starting in-line step-over");
abbb1732 9033
31e77af2 9034 /* Stop stepping if inserting breakpoints fails. */
a70b8144 9035 try
31e77af2
PA
9036 {
9037 insert_breakpoints ();
9038 }
230d2906 9039 catch (const gdb_exception_error &e)
31e77af2
PA
9040 {
9041 exception_print (gdb_stderr, e);
22bcd14b 9042 stop_waiting (ecs);
bdf2a94a 9043 clear_step_over_info ();
31e77af2 9044 return;
d4f3574e
SS
9045 }
9046
963f9c80 9047 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
d4f3574e 9048
1edb66d8 9049 resume (ecs->event_thread->stop_signal ());
d4f3574e
SS
9050 }
9051
488f131b 9052 prepare_to_wait (ecs);
d4f3574e
SS
9053}
9054
4d9d9d04
PA
9055/* Called when we should continue running the inferior, because the
9056 current event doesn't cause a user visible stop. This does the
9057 resuming part; waiting for the next event is done elsewhere. */
9058
9059static void
9060keep_going (struct execution_control_state *ecs)
9061{
9062 if (ecs->event_thread->control.trap_expected
1edb66d8 9063 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
4d9d9d04
PA
9064 ecs->event_thread->control.trap_expected = 0;
9065
1edb66d8
SM
9066 if (!signal_program[ecs->event_thread->stop_signal ()])
9067 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
4d9d9d04
PA
9068 keep_going_pass_signal (ecs);
9069}
9070
104c1213
JM
9071/* This function normally comes after a resume, before
9072 handle_inferior_event exits. It takes care of any last bits of
9073 housekeeping, and sets the all-important wait_some_more flag. */
cd0fc7c3 9074
104c1213
JM
9075static void
9076prepare_to_wait (struct execution_control_state *ecs)
cd0fc7c3 9077{
1eb8556f 9078 infrun_debug_printf ("prepare_to_wait");
104c1213 9079
104c1213 9080 ecs->wait_some_more = 1;
0b333c5e 9081
42bd97a6
PA
9082 /* If the target can't async, emulate it by marking the infrun event
9083 handler such that as soon as we get back to the event-loop, we
9084 immediately end up in fetch_inferior_event again calling
9085 target_wait. */
9086 if (!target_can_async_p ())
0b333c5e 9087 mark_infrun_async_event_handler ();
c906108c 9088}
11cf8741 9089
fd664c91 9090/* We are done with the step range of a step/next/si/ni command.
b57bacec 9091 Called once for each n of a "step n" operation. */
fd664c91
PA
9092
9093static void
bdc36728 9094end_stepping_range (struct execution_control_state *ecs)
fd664c91 9095{
bdc36728 9096 ecs->event_thread->control.stop_step = 1;
bdc36728 9097 stop_waiting (ecs);
fd664c91
PA
9098}
9099
33d62d64
JK
9100/* Several print_*_reason functions to print why the inferior has stopped.
9101 We always print something when the inferior exits, or receives a signal.
9102 The rest of the cases are dealt with later on in normal_stop and
9103 print_it_typical. Ideally there should be a call to one of these
9104 print_*_reason functions functions from handle_inferior_event each time
22bcd14b 9105 stop_waiting is called.
33d62d64 9106
fd664c91
PA
9107 Note that we don't call these directly, instead we delegate that to
9108 the interpreters, through observers. Interpreters then call these
9109 with whatever uiout is right. */
33d62d64 9110
fd664c91
PA
9111void
9112print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
11cf8741 9113{
33d62d64 9114 annotate_signalled ();
112e8700
SM
9115 if (uiout->is_mi_like_p ())
9116 uiout->field_string
9117 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
9118 uiout->text ("\nProgram terminated with signal ");
33d62d64 9119 annotate_signal_name ();
112e8700 9120 uiout->field_string ("signal-name",
2ea28649 9121 gdb_signal_to_name (siggnal));
33d62d64 9122 annotate_signal_name_end ();
112e8700 9123 uiout->text (", ");
33d62d64 9124 annotate_signal_string ();
112e8700 9125 uiout->field_string ("signal-meaning",
2ea28649 9126 gdb_signal_to_string (siggnal));
33d62d64 9127 annotate_signal_string_end ();
112e8700
SM
9128 uiout->text (".\n");
9129 uiout->text ("The program no longer exists.\n");
33d62d64
JK
9130}
9131
fd664c91
PA
9132void
9133print_exited_reason (struct ui_out *uiout, int exitstatus)
33d62d64 9134{
fda326dd 9135 struct inferior *inf = current_inferior ();
a068643d 9136 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
fda326dd 9137
33d62d64
JK
9138 annotate_exited (exitstatus);
9139 if (exitstatus)
9140 {
112e8700
SM
9141 if (uiout->is_mi_like_p ())
9142 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
6a831f06
PA
9143 std::string exit_code_str
9144 = string_printf ("0%o", (unsigned int) exitstatus);
9145 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
9146 plongest (inf->num), pidstr.c_str (),
9147 string_field ("exit-code", exit_code_str.c_str ()));
33d62d64
JK
9148 }
9149 else
11cf8741 9150 {
112e8700
SM
9151 if (uiout->is_mi_like_p ())
9152 uiout->field_string
9153 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6a831f06
PA
9154 uiout->message ("[Inferior %s (%s) exited normally]\n",
9155 plongest (inf->num), pidstr.c_str ());
33d62d64 9156 }
33d62d64
JK
9157}
9158
fd664c91
PA
9159void
9160print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
33d62d64 9161{
f303dbd6
PA
9162 struct thread_info *thr = inferior_thread ();
9163
bb079752
AB
9164 infrun_debug_printf ("signal = %s", gdb_signal_to_string (siggnal));
9165
33d62d64
JK
9166 annotate_signal ();
9167
112e8700 9168 if (uiout->is_mi_like_p ())
f303dbd6
PA
9169 ;
9170 else if (show_thread_that_caused_stop ())
33d62d64 9171 {
112e8700 9172 uiout->text ("\nThread ");
33eca680 9173 uiout->field_string ("thread-id", print_thread_id (thr));
f303dbd6 9174
25558938 9175 const char *name = thread_name (thr);
03acd4d8 9176 if (name != nullptr)
f303dbd6 9177 {
112e8700 9178 uiout->text (" \"");
33eca680 9179 uiout->field_string ("name", name);
112e8700 9180 uiout->text ("\"");
f303dbd6 9181 }
33d62d64 9182 }
f303dbd6 9183 else
112e8700 9184 uiout->text ("\nProgram");
f303dbd6 9185
112e8700
SM
9186 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
9187 uiout->text (" stopped");
33d62d64
JK
9188 else
9189 {
112e8700 9190 uiout->text (" received signal ");
8b93c638 9191 annotate_signal_name ();
112e8700
SM
9192 if (uiout->is_mi_like_p ())
9193 uiout->field_string
9194 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
9195 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8b93c638 9196 annotate_signal_name_end ();
112e8700 9197 uiout->text (", ");
8b93c638 9198 annotate_signal_string ();
112e8700 9199 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
012b3a21 9200
9c742269 9201 regcache *regcache = get_thread_regcache (thr);
272bb05c
JB
9202 struct gdbarch *gdbarch = regcache->arch ();
9203 if (gdbarch_report_signal_info_p (gdbarch))
9204 gdbarch_report_signal_info (gdbarch, uiout, siggnal);
9205
8b93c638 9206 annotate_signal_string_end ();
33d62d64 9207 }
112e8700 9208 uiout->text (".\n");
33d62d64 9209}
252fbfc8 9210
fd664c91
PA
9211void
9212print_no_history_reason (struct ui_out *uiout)
33d62d64 9213{
37f54063
BL
9214 if (uiout->is_mi_like_p ())
9215 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_NO_HISTORY));
9216 else
9217 uiout->text ("\nNo more reverse-execution history.\n");
11cf8741 9218}
43ff13b4 9219
0c7e1a46
PA
9220/* Print current location without a level number, if we have changed
9221 functions or hit a breakpoint. Print source line if we have one.
9222 bpstat_print contains the logic deciding in detail what to print,
9223 based on the event(s) that just occurred. */
9224
243a9253 9225static void
c272a98c 9226print_stop_location (const target_waitstatus &ws)
0c7e1a46
PA
9227{
9228 int bpstat_ret;
f486487f 9229 enum print_what source_flag;
0c7e1a46
PA
9230 int do_frame_printing = 1;
9231 struct thread_info *tp = inferior_thread ();
9232
c272a98c 9233 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws.kind ());
0c7e1a46
PA
9234 switch (bpstat_ret)
9235 {
9236 case PRINT_UNKNOWN:
9237 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
9238 should) carry around the function and does (or should) use
9239 that when doing a frame comparison. */
9240 if (tp->control.stop_step
a0cbd650
TT
9241 && (tp->control.step_frame_id
9242 == get_frame_id (get_current_frame ()))
f2ffa92b 9243 && (tp->control.step_start_function
1edb66d8 9244 == find_pc_function (tp->stop_pc ())))
0c7e1a46
PA
9245 {
9246 /* Finished step, just print source line. */
9247 source_flag = SRC_LINE;
9248 }
9249 else
9250 {
9251 /* Print location and source line. */
9252 source_flag = SRC_AND_LOC;
9253 }
9254 break;
9255 case PRINT_SRC_AND_LOC:
9256 /* Print location and source line. */
9257 source_flag = SRC_AND_LOC;
9258 break;
9259 case PRINT_SRC_ONLY:
9260 source_flag = SRC_LINE;
9261 break;
9262 case PRINT_NOTHING:
9263 /* Something bogus. */
9264 source_flag = SRC_LINE;
9265 do_frame_printing = 0;
9266 break;
9267 default:
f34652de 9268 internal_error (_("Unknown value."));
0c7e1a46
PA
9269 }
9270
9271 /* The behavior of this routine with respect to the source
9272 flag is:
9273 SRC_LINE: Print only source line
9274 LOCATION: Print only location
9275 SRC_AND_LOC: Print location and source line. */
9276 if (do_frame_printing)
03acd4d8 9277 print_stack_frame (get_selected_frame (nullptr), 0, source_flag, 1);
243a9253
PA
9278}
9279
243a9253
PA
9280/* See infrun.h. */
9281
9282void
4c7d57e7 9283print_stop_event (struct ui_out *uiout, bool displays)
243a9253 9284{
243a9253 9285 struct target_waitstatus last;
243a9253
PA
9286 struct thread_info *tp;
9287
5b6d1e4f 9288 get_last_target_status (nullptr, nullptr, &last);
243a9253 9289
67ad9399
TT
9290 {
9291 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
0c7e1a46 9292
c272a98c 9293 print_stop_location (last);
243a9253 9294
67ad9399 9295 /* Display the auto-display expressions. */
4c7d57e7
TT
9296 if (displays)
9297 do_displays ();
67ad9399 9298 }
243a9253
PA
9299
9300 tp = inferior_thread ();
573269a8
LS
9301 if (tp->thread_fsm () != nullptr
9302 && tp->thread_fsm ()->finished_p ())
243a9253
PA
9303 {
9304 struct return_value_info *rv;
9305
573269a8
LS
9306 rv = tp->thread_fsm ()->return_value ();
9307 if (rv != nullptr)
243a9253
PA
9308 print_return_value (uiout, rv);
9309 }
0c7e1a46
PA
9310}
9311
388a7084
PA
9312/* See infrun.h. */
9313
9314void
9315maybe_remove_breakpoints (void)
9316{
55f6301a 9317 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
388a7084
PA
9318 {
9319 if (remove_breakpoints ())
9320 {
223ffa71 9321 target_terminal::ours_for_output ();
6cb06a8c
TT
9322 gdb_printf (_("Cannot remove breakpoints because "
9323 "program is no longer writable.\nFurther "
9324 "execution is probably impossible.\n"));
388a7084
PA
9325 }
9326 }
9327}
9328
4c2f2a79
PA
9329/* The execution context that just caused a normal stop. */
9330
9331struct stop_context
9332{
2d844eaf 9333 stop_context ();
2d844eaf
TT
9334
9335 DISABLE_COPY_AND_ASSIGN (stop_context);
9336
9337 bool changed () const;
9338
4c2f2a79
PA
9339 /* The stop ID. */
9340 ULONGEST stop_id;
c906108c 9341
4c2f2a79 9342 /* The event PTID. */
c906108c 9343
4c2f2a79
PA
9344 ptid_t ptid;
9345
9346 /* If stopp for a thread event, this is the thread that caused the
9347 stop. */
d634cd0b 9348 thread_info_ref thread;
4c2f2a79
PA
9349
9350 /* The inferior that caused the stop. */
9351 int inf_num;
9352};
9353
2d844eaf 9354/* Initializes a new stop context. If stopped for a thread event, this
4c2f2a79
PA
9355 takes a strong reference to the thread. */
9356
2d844eaf 9357stop_context::stop_context ()
4c2f2a79 9358{
2d844eaf
TT
9359 stop_id = get_stop_id ();
9360 ptid = inferior_ptid;
9361 inf_num = current_inferior ()->num;
4c2f2a79 9362
d7e15655 9363 if (inferior_ptid != null_ptid)
4c2f2a79
PA
9364 {
9365 /* Take a strong reference so that the thread can't be deleted
9366 yet. */
d634cd0b 9367 thread = thread_info_ref::new_reference (inferior_thread ());
4c2f2a79 9368 }
4c2f2a79
PA
9369}
9370
9371/* Return true if the current context no longer matches the saved stop
9372 context. */
9373
2d844eaf
TT
9374bool
9375stop_context::changed () const
9376{
9377 if (ptid != inferior_ptid)
9378 return true;
9379 if (inf_num != current_inferior ()->num)
9380 return true;
03acd4d8 9381 if (thread != nullptr && thread->state != THREAD_STOPPED)
2d844eaf
TT
9382 return true;
9383 if (get_stop_id () != stop_id)
9384 return true;
9385 return false;
4c2f2a79
PA
9386}
9387
9388/* See infrun.h. */
9389
8dd08de7
AB
9390bool
9391normal_stop ()
c906108c 9392{
73b65bb0 9393 struct target_waitstatus last;
73b65bb0 9394
5b6d1e4f 9395 get_last_target_status (nullptr, nullptr, &last);
73b65bb0 9396
4c2f2a79
PA
9397 new_stop_id ();
9398
29f49a6a
PA
9399 /* If an exception is thrown from this point on, make sure to
9400 propagate GDB's knowledge of the executing state to the
9401 frontend/user running state. A QUIT is an easy exception to see
9402 here, so do this before any filtered output. */
731f534f 9403
5b6d1e4f 9404 ptid_t finish_ptid = null_ptid;
731f534f 9405
c35b1492 9406 if (!non_stop)
5b6d1e4f 9407 finish_ptid = minus_one_ptid;
183be222
SM
9408 else if (last.kind () == TARGET_WAITKIND_SIGNALLED
9409 || last.kind () == TARGET_WAITKIND_EXITED)
e1316e60
PA
9410 {
9411 /* On some targets, we may still have live threads in the
9412 inferior when we get a process exit event. E.g., for
9413 "checkpoint", when the current checkpoint/fork exits,
9414 linux-fork.c automatically switches to another fork from
9415 within target_mourn_inferior. */
731f534f 9416 if (inferior_ptid != null_ptid)
5b6d1e4f 9417 finish_ptid = ptid_t (inferior_ptid.pid ());
e1316e60 9418 }
9488c327
PA
9419 else if (last.kind () != TARGET_WAITKIND_NO_RESUMED
9420 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
5b6d1e4f
PA
9421 finish_ptid = inferior_ptid;
9422
6b09f134 9423 std::optional<scoped_finish_thread_state> maybe_finish_thread_state;
5b6d1e4f
PA
9424 if (finish_ptid != null_ptid)
9425 {
9426 maybe_finish_thread_state.emplace
9427 (user_visible_resume_target (finish_ptid), finish_ptid);
9428 }
29f49a6a 9429
b57bacec
PA
9430 /* As we're presenting a stop, and potentially removing breakpoints,
9431 update the thread list so we can tell whether there are threads
9432 running on the target. With target remote, for example, we can
9433 only learn about new threads when we explicitly update the thread
9434 list. Do this before notifying the interpreters about signal
9435 stops, end of stepping ranges, etc., so that the "new thread"
9436 output is emitted before e.g., "Program received signal FOO",
9437 instead of after. */
9438 update_thread_list ();
9439
183be222 9440 if (last.kind () == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
3f75a984 9441 notify_signal_received (inferior_thread ()->stop_signal ());
b57bacec 9442
c906108c
SS
9443 /* As with the notification of thread events, we want to delay
9444 notifying the user that we've switched thread context until
9445 the inferior actually stops.
9446
73b65bb0
DJ
9447 There's no point in saying anything if the inferior has exited.
9448 Note that SIGNALLED here means "exited with a signal", not
b65dc60b
PA
9449 "received a signal".
9450
9451 Also skip saying anything in non-stop mode. In that mode, as we
9452 don't want GDB to switch threads behind the user's back, to avoid
9453 races where the user is typing a command to apply to thread x,
9454 but GDB switches to thread y before the user finishes entering
9455 the command, fetch_inferior_event installs a cleanup to restore
9456 the current thread back to the thread the user had selected right
9457 after this event is handled, so we're not really switching, only
9458 informing of a stop. */
a81871f7 9459 if (!non_stop)
c906108c 9460 {
a81871f7
PA
9461 if ((last.kind () != TARGET_WAITKIND_SIGNALLED
9462 && last.kind () != TARGET_WAITKIND_EXITED
9488c327
PA
9463 && last.kind () != TARGET_WAITKIND_NO_RESUMED
9464 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
a81871f7
PA
9465 && target_has_execution ()
9466 && previous_thread != inferior_thread ())
3b12939d 9467 {
a81871f7
PA
9468 SWITCH_THRU_ALL_UIS ()
9469 {
9470 target_terminal::ours_for_output ();
9471 gdb_printf (_("[Switching to %s]\n"),
9472 target_pid_to_str (inferior_ptid).c_str ());
9473 annotate_thread_changed ();
9474 }
3b12939d 9475 }
a81871f7
PA
9476
9477 update_previous_thread ();
c906108c 9478 }
c906108c 9479
9488c327
PA
9480 if (last.kind () == TARGET_WAITKIND_NO_RESUMED
9481 || last.kind () == TARGET_WAITKIND_THREAD_EXITED)
0e5bf2a8 9482 {
21d48304
PA
9483 stop_print_frame = false;
9484
0e454242 9485 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
9486 if (current_ui->prompt_state == PROMPT_BLOCKED)
9487 {
223ffa71 9488 target_terminal::ours_for_output ();
9488c327
PA
9489 if (last.kind () == TARGET_WAITKIND_NO_RESUMED)
9490 gdb_printf (_("No unwaited-for children left.\n"));
9491 else if (last.kind () == TARGET_WAITKIND_THREAD_EXITED)
9492 gdb_printf (_("Command aborted, thread exited.\n"));
9493 else
9494 gdb_assert_not_reached ("unhandled");
3b12939d 9495 }
0e5bf2a8
PA
9496 }
9497
b57bacec 9498 /* Note: this depends on the update_thread_list call above. */
388a7084 9499 maybe_remove_breakpoints ();
c906108c 9500
c906108c
SS
9501 /* If an auto-display called a function and that got a signal,
9502 delete that auto-display to avoid an infinite recursion. */
9503
9504 if (stopped_by_random_signal)
9505 disable_current_display ();
9506
0e454242 9507 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
9508 {
9509 async_enable_stdin ();
9510 }
c906108c 9511
388a7084 9512 /* Let the user/frontend see the threads as stopped. */
731f534f 9513 maybe_finish_thread_state.reset ();
388a7084
PA
9514
9515 /* Select innermost stack frame - i.e., current frame is frame 0,
9516 and current location is based on that. Handle the case where the
9517 dummy call is returning after being stopped. E.g. the dummy call
9518 previously hit a breakpoint. (If the dummy call returns
9519 normally, we won't reach here.) Do this before the stop hook is
9520 run, so that it doesn't get to see the temporary dummy frame,
9521 which is not where we'll present the stop. */
9522 if (has_stack_frames ())
9523 {
9524 if (stop_stack_dummy == STOP_STACK_DUMMY)
9525 {
9526 /* Pop the empty frame that contains the stack dummy. This
9527 also restores inferior state prior to the call (struct
9528 infcall_suspend_state). */
bd2b40ac 9529 frame_info_ptr frame = get_current_frame ();
388a7084
PA
9530
9531 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
9532 frame_pop (frame);
9533 /* frame_pop calls reinit_frame_cache as the last thing it
9534 does which means there's now no selected frame. */
9535 }
9536
9537 select_frame (get_current_frame ());
9538
9539 /* Set the current source location. */
9540 set_current_sal_from_frame (get_current_frame ());
9541 }
dd7e2d2b
PA
9542
9543 /* Look up the hook_stop and run it (CLI internally handles problem
9544 of stop_command's pre-hook not existing). */
49a82d50 9545 stop_context saved_context;
4c2f2a79 9546
49a82d50
TT
9547 try
9548 {
9549 execute_cmd_pre_hook (stop_command);
4c2f2a79 9550 }
b1ffd112 9551 catch (const gdb_exception_error &ex)
49a82d50
TT
9552 {
9553 exception_fprintf (gdb_stderr, ex,
9554 "Error while running hook_stop:\n");
9555 }
9556
9557 /* If the stop hook resumes the target, then there's no point in
9558 trying to notify about the previous stop; its context is
9559 gone. Likewise if the command switches thread or inferior --
9560 the observers would print a stop for the wrong
9561 thread/inferior. */
9562 if (saved_context.changed ())
8dd08de7 9563 return true;
dd7e2d2b 9564
388a7084
PA
9565 /* Notify observers about the stop. This is where the interpreters
9566 print the stop event. */
87829267
SM
9567 notify_normal_stop ((inferior_ptid != null_ptid
9568 ? inferior_thread ()->control.stop_bpstat
9569 : nullptr),
9570 stop_print_frame);
243a9253
PA
9571 annotate_stopped ();
9572
55f6301a 9573 if (target_has_execution ())
48844aa6 9574 {
183be222
SM
9575 if (last.kind () != TARGET_WAITKIND_SIGNALLED
9576 && last.kind () != TARGET_WAITKIND_EXITED
9488c327
PA
9577 && last.kind () != TARGET_WAITKIND_NO_RESUMED
9578 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
48844aa6
PA
9579 /* Delete the breakpoint we stopped at, if it wants to be deleted.
9580 Delete any breakpoint that is to be deleted at the next stop. */
16c381f0 9581 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
94cc34af 9582 }
6c95b8df 9583
8dd08de7 9584 return false;
c906108c 9585}
c906108c 9586\f
c5aa993b 9587int
96baa820 9588signal_stop_state (int signo)
c906108c 9589{
d6b48e9c 9590 return signal_stop[signo];
c906108c
SS
9591}
9592
c5aa993b 9593int
96baa820 9594signal_print_state (int signo)
c906108c
SS
9595{
9596 return signal_print[signo];
9597}
9598
c5aa993b 9599int
96baa820 9600signal_pass_state (int signo)
c906108c
SS
9601{
9602 return signal_program[signo];
9603}
9604
2455069d
UW
9605static void
9606signal_cache_update (int signo)
9607{
9608 if (signo == -1)
9609 {
a493e3e2 9610 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
2455069d
UW
9611 signal_cache_update (signo);
9612
9613 return;
9614 }
9615
9616 signal_pass[signo] = (signal_stop[signo] == 0
9617 && signal_print[signo] == 0
ab04a2af
TT
9618 && signal_program[signo] == 1
9619 && signal_catch[signo] == 0);
2455069d
UW
9620}
9621
488f131b 9622int
7bda5e4a 9623signal_stop_update (int signo, int state)
d4f3574e
SS
9624{
9625 int ret = signal_stop[signo];
abbb1732 9626
d4f3574e 9627 signal_stop[signo] = state;
2455069d 9628 signal_cache_update (signo);
d4f3574e
SS
9629 return ret;
9630}
9631
488f131b 9632int
7bda5e4a 9633signal_print_update (int signo, int state)
d4f3574e
SS
9634{
9635 int ret = signal_print[signo];
abbb1732 9636
d4f3574e 9637 signal_print[signo] = state;
2455069d 9638 signal_cache_update (signo);
d4f3574e
SS
9639 return ret;
9640}
9641
488f131b 9642int
7bda5e4a 9643signal_pass_update (int signo, int state)
d4f3574e
SS
9644{
9645 int ret = signal_program[signo];
abbb1732 9646
d4f3574e 9647 signal_program[signo] = state;
2455069d 9648 signal_cache_update (signo);
d4f3574e
SS
9649 return ret;
9650}
9651
ab04a2af
TT
9652/* Update the global 'signal_catch' from INFO and notify the
9653 target. */
9654
9655void
9656signal_catch_update (const unsigned int *info)
9657{
9658 int i;
9659
9660 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
9661 signal_catch[i] = info[i] > 0;
9662 signal_cache_update (-1);
adc6a863 9663 target_pass_signals (signal_pass);
ab04a2af
TT
9664}
9665
c906108c 9666static void
96baa820 9667sig_print_header (void)
c906108c 9668{
6cb06a8c
TT
9669 gdb_printf (_("Signal Stop\tPrint\tPass "
9670 "to program\tDescription\n"));
c906108c
SS
9671}
9672
9673static void
2ea28649 9674sig_print_info (enum gdb_signal oursig)
c906108c 9675{
2ea28649 9676 const char *name = gdb_signal_to_name (oursig);
c906108c 9677 int name_padding = 13 - strlen (name);
96baa820 9678
c906108c
SS
9679 if (name_padding <= 0)
9680 name_padding = 0;
9681
6cb06a8c
TT
9682 gdb_printf ("%s", name);
9683 gdb_printf ("%*.*s ", name_padding, name_padding, " ");
9684 gdb_printf ("%s\t", signal_stop[oursig] ? "Yes" : "No");
9685 gdb_printf ("%s\t", signal_print[oursig] ? "Yes" : "No");
9686 gdb_printf ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
9687 gdb_printf ("%s\n", gdb_signal_to_string (oursig));
c906108c
SS
9688}
9689
9690/* Specify how various signals in the inferior should be handled. */
9691
9692static void
0b39b52e 9693handle_command (const char *args, int from_tty)
c906108c 9694{
c906108c 9695 int digits, wordlen;
b926417a 9696 int sigfirst, siglast;
2ea28649 9697 enum gdb_signal oursig;
c906108c 9698 int allsigs;
c906108c 9699
03acd4d8 9700 if (args == nullptr)
c906108c 9701 {
e2e0b3e5 9702 error_no_arg (_("signal to handle"));
c906108c
SS
9703 }
9704
1777feb0 9705 /* Allocate and zero an array of flags for which signals to handle. */
c906108c 9706
adc6a863
PA
9707 const size_t nsigs = GDB_SIGNAL_LAST;
9708 unsigned char sigs[nsigs] {};
c906108c 9709
1777feb0 9710 /* Break the command line up into args. */
c906108c 9711
773a1edc 9712 gdb_argv built_argv (args);
c906108c
SS
9713
9714 /* Walk through the args, looking for signal oursigs, signal names, and
9715 actions. Signal numbers and signal names may be interspersed with
9716 actions, with the actions being performed for all signals cumulatively
1777feb0 9717 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
c906108c 9718
773a1edc 9719 for (char *arg : built_argv)
c906108c 9720 {
773a1edc
TT
9721 wordlen = strlen (arg);
9722 for (digits = 0; isdigit (arg[digits]); digits++)
c906108c
SS
9723 {;
9724 }
9725 allsigs = 0;
9726 sigfirst = siglast = -1;
9727
773a1edc 9728 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
c906108c
SS
9729 {
9730 /* Apply action to all signals except those used by the
1777feb0 9731 debugger. Silently skip those. */
c906108c
SS
9732 allsigs = 1;
9733 sigfirst = 0;
9734 siglast = nsigs - 1;
9735 }
773a1edc 9736 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
c906108c
SS
9737 {
9738 SET_SIGS (nsigs, sigs, signal_stop);
9739 SET_SIGS (nsigs, sigs, signal_print);
9740 }
773a1edc 9741 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
c906108c
SS
9742 {
9743 UNSET_SIGS (nsigs, sigs, signal_program);
9744 }
773a1edc 9745 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
c906108c
SS
9746 {
9747 SET_SIGS (nsigs, sigs, signal_print);
9748 }
773a1edc 9749 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
c906108c
SS
9750 {
9751 SET_SIGS (nsigs, sigs, signal_program);
9752 }
773a1edc 9753 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
c906108c
SS
9754 {
9755 UNSET_SIGS (nsigs, sigs, signal_stop);
9756 }
773a1edc 9757 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
c906108c
SS
9758 {
9759 SET_SIGS (nsigs, sigs, signal_program);
9760 }
773a1edc 9761 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
c906108c
SS
9762 {
9763 UNSET_SIGS (nsigs, sigs, signal_print);
9764 UNSET_SIGS (nsigs, sigs, signal_stop);
9765 }
773a1edc 9766 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
c906108c
SS
9767 {
9768 UNSET_SIGS (nsigs, sigs, signal_program);
9769 }
9770 else if (digits > 0)
9771 {
9772 /* It is numeric. The numeric signal refers to our own
9773 internal signal numbering from target.h, not to host/target
9774 signal number. This is a feature; users really should be
9775 using symbolic names anyway, and the common ones like
9776 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
9777
9778 sigfirst = siglast = (int)
773a1edc
TT
9779 gdb_signal_from_command (atoi (arg));
9780 if (arg[digits] == '-')
c906108c
SS
9781 {
9782 siglast = (int)
773a1edc 9783 gdb_signal_from_command (atoi (arg + digits + 1));
c906108c
SS
9784 }
9785 if (sigfirst > siglast)
9786 {
1777feb0 9787 /* Bet he didn't figure we'd think of this case... */
b926417a 9788 std::swap (sigfirst, siglast);
c906108c
SS
9789 }
9790 }
9791 else
9792 {
773a1edc 9793 oursig = gdb_signal_from_name (arg);
a493e3e2 9794 if (oursig != GDB_SIGNAL_UNKNOWN)
c906108c
SS
9795 {
9796 sigfirst = siglast = (int) oursig;
9797 }
9798 else
9799 {
9800 /* Not a number and not a recognized flag word => complain. */
773a1edc 9801 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
c906108c
SS
9802 }
9803 }
9804
9805 /* If any signal numbers or symbol names were found, set flags for
dda83cd7 9806 which signals to apply actions to. */
c906108c 9807
b926417a 9808 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
c906108c 9809 {
2ea28649 9810 switch ((enum gdb_signal) signum)
c906108c 9811 {
a493e3e2
PA
9812 case GDB_SIGNAL_TRAP:
9813 case GDB_SIGNAL_INT:
c906108c
SS
9814 if (!allsigs && !sigs[signum])
9815 {
9e2f0ad4 9816 if (query (_("%s is used by the debugger.\n\
3e43a32a 9817Are you sure you want to change it? "),
2ea28649 9818 gdb_signal_to_name ((enum gdb_signal) signum)))
c906108c
SS
9819 {
9820 sigs[signum] = 1;
9821 }
9822 else
6cb06a8c 9823 gdb_printf (_("Not confirmed, unchanged.\n"));
c906108c
SS
9824 }
9825 break;
a493e3e2
PA
9826 case GDB_SIGNAL_0:
9827 case GDB_SIGNAL_DEFAULT:
9828 case GDB_SIGNAL_UNKNOWN:
c906108c
SS
9829 /* Make sure that "all" doesn't print these. */
9830 break;
9831 default:
9832 sigs[signum] = 1;
9833 break;
9834 }
9835 }
c906108c
SS
9836 }
9837
b926417a 9838 for (int signum = 0; signum < nsigs; signum++)
3a031f65
PA
9839 if (sigs[signum])
9840 {
2455069d 9841 signal_cache_update (-1);
adc6a863
PA
9842 target_pass_signals (signal_pass);
9843 target_program_signals (signal_program);
c906108c 9844
3a031f65
PA
9845 if (from_tty)
9846 {
9847 /* Show the results. */
9848 sig_print_header ();
9849 for (; signum < nsigs; signum++)
9850 if (sigs[signum])
aead7601 9851 sig_print_info ((enum gdb_signal) signum);
3a031f65
PA
9852 }
9853
9854 break;
9855 }
c906108c
SS
9856}
9857
de0bea00
MF
9858/* Complete the "handle" command. */
9859
eb3ff9a5 9860static void
de0bea00 9861handle_completer (struct cmd_list_element *ignore,
eb3ff9a5 9862 completion_tracker &tracker,
6f937416 9863 const char *text, const char *word)
de0bea00 9864{
de0bea00
MF
9865 static const char * const keywords[] =
9866 {
9867 "all",
9868 "stop",
9869 "ignore",
9870 "print",
9871 "pass",
9872 "nostop",
9873 "noignore",
9874 "noprint",
9875 "nopass",
03acd4d8 9876 nullptr,
de0bea00
MF
9877 };
9878
eb3ff9a5
PA
9879 signal_completer (ignore, tracker, text, word);
9880 complete_on_enum (tracker, keywords, word, word);
de0bea00
MF
9881}
9882
2ea28649
PA
9883enum gdb_signal
9884gdb_signal_from_command (int num)
ed01b82c
PA
9885{
9886 if (num >= 1 && num <= 15)
2ea28649 9887 return (enum gdb_signal) num;
ed01b82c
PA
9888 error (_("Only signals 1-15 are valid as numeric signals.\n\
9889Use \"info signals\" for a list of symbolic signals."));
9890}
9891
c906108c
SS
9892/* Print current contents of the tables set by the handle command.
9893 It is possible we should just be printing signals actually used
9894 by the current target (but for things to work right when switching
9895 targets, all signals should be in the signal tables). */
9896
9897static void
1d12d88f 9898info_signals_command (const char *signum_exp, int from_tty)
c906108c 9899{
2ea28649 9900 enum gdb_signal oursig;
abbb1732 9901
c906108c
SS
9902 sig_print_header ();
9903
9904 if (signum_exp)
9905 {
9906 /* First see if this is a symbol name. */
2ea28649 9907 oursig = gdb_signal_from_name (signum_exp);
a493e3e2 9908 if (oursig == GDB_SIGNAL_UNKNOWN)
c906108c
SS
9909 {
9910 /* No, try numeric. */
9911 oursig =
2ea28649 9912 gdb_signal_from_command (parse_and_eval_long (signum_exp));
c906108c
SS
9913 }
9914 sig_print_info (oursig);
9915 return;
9916 }
9917
6cb06a8c 9918 gdb_printf ("\n");
c906108c 9919 /* These ugly casts brought to you by the native VAX compiler. */
a493e3e2
PA
9920 for (oursig = GDB_SIGNAL_FIRST;
9921 (int) oursig < (int) GDB_SIGNAL_LAST;
2ea28649 9922 oursig = (enum gdb_signal) ((int) oursig + 1))
c906108c
SS
9923 {
9924 QUIT;
9925
a493e3e2
PA
9926 if (oursig != GDB_SIGNAL_UNKNOWN
9927 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
c906108c
SS
9928 sig_print_info (oursig);
9929 }
9930
6cb06a8c
TT
9931 gdb_printf (_("\nUse the \"handle\" command "
9932 "to change these tables.\n"));
c906108c 9933}
4aa995e1
PA
9934
9935/* The $_siginfo convenience variable is a bit special. We don't know
9936 for sure the type of the value until we actually have a chance to
7a9dd1b2 9937 fetch the data. The type can change depending on gdbarch, so it is
4aa995e1
PA
9938 also dependent on which thread you have selected.
9939
9940 1. making $_siginfo be an internalvar that creates a new value on
9941 access.
9942
9943 2. making the value of $_siginfo be an lval_computed value. */
9944
9945/* This function implements the lval_computed support for reading a
9946 $_siginfo value. */
9947
9948static void
9949siginfo_value_read (struct value *v)
9950{
9951 LONGEST transferred;
9952
a911d87a
PA
9953 /* If we can access registers, so can we access $_siginfo. Likewise
9954 vice versa. */
9955 validate_registers_access ();
c709acd1 9956
4aa995e1 9957 transferred =
328d42d8
SM
9958 target_read (current_inferior ()->top_target (),
9959 TARGET_OBJECT_SIGNAL_INFO,
03acd4d8 9960 nullptr,
bbe912ba 9961 v->contents_all_raw ().data (),
76675c4d 9962 v->offset (),
d0c97917 9963 v->type ()->length ());
4aa995e1 9964
d0c97917 9965 if (transferred != v->type ()->length ())
4aa995e1
PA
9966 error (_("Unable to read siginfo"));
9967}
9968
9969/* This function implements the lval_computed support for writing a
9970 $_siginfo value. */
9971
9972static void
9973siginfo_value_write (struct value *v, struct value *fromval)
9974{
9975 LONGEST transferred;
9976
a911d87a
PA
9977 /* If we can access registers, so can we access $_siginfo. Likewise
9978 vice versa. */
9979 validate_registers_access ();
c709acd1 9980
328d42d8 9981 transferred = target_write (current_inferior ()->top_target (),
4aa995e1 9982 TARGET_OBJECT_SIGNAL_INFO,
03acd4d8 9983 nullptr,
bbe912ba 9984 fromval->contents_all_raw ().data (),
76675c4d 9985 v->offset (),
d0c97917 9986 fromval->type ()->length ());
4aa995e1 9987
d0c97917 9988 if (transferred != fromval->type ()->length ())
4aa995e1
PA
9989 error (_("Unable to write siginfo"));
9990}
9991
c8f2448a 9992static const struct lval_funcs siginfo_value_funcs =
4aa995e1
PA
9993 {
9994 siginfo_value_read,
9995 siginfo_value_write
9996 };
9997
9998/* Return a new value with the correct type for the siginfo object of
78267919
UW
9999 the current thread using architecture GDBARCH. Return a void value
10000 if there's no object available. */
4aa995e1 10001
2c0b251b 10002static struct value *
22d2b532
SDJ
10003siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
10004 void *ignore)
4aa995e1 10005{
841de120 10006 if (target_has_stack ()
d7e15655 10007 && inferior_ptid != null_ptid
78267919 10008 && gdbarch_get_siginfo_type_p (gdbarch))
4aa995e1 10009 {
78267919 10010 struct type *type = gdbarch_get_siginfo_type (gdbarch);
abbb1732 10011
b64e2602 10012 return value::allocate_computed (type, &siginfo_value_funcs, nullptr);
4aa995e1
PA
10013 }
10014
317c3ed9 10015 return value::allocate (builtin_type (gdbarch)->builtin_void);
4aa995e1
PA
10016}
10017
c906108c 10018\f
16c381f0
JK
10019/* infcall_suspend_state contains state about the program itself like its
10020 registers and any signal it received when it last stopped.
10021 This state must be restored regardless of how the inferior function call
10022 ends (either successfully, or after it hits a breakpoint or signal)
10023 if the program is to properly continue where it left off. */
10024
6bf78e29 10025class infcall_suspend_state
7a292a7a 10026{
6bf78e29
AB
10027public:
10028 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
10029 once the inferior function call has finished. */
10030 infcall_suspend_state (struct gdbarch *gdbarch,
dda83cd7
SM
10031 const struct thread_info *tp,
10032 struct regcache *regcache)
1edb66d8 10033 : m_registers (new readonly_detached_regcache (*regcache))
6bf78e29 10034 {
1edb66d8
SM
10035 tp->save_suspend_to (m_thread_suspend);
10036
6bf78e29
AB
10037 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
10038
10039 if (gdbarch_get_siginfo_type_p (gdbarch))
10040 {
dda83cd7 10041 struct type *type = gdbarch_get_siginfo_type (gdbarch);
df86565b 10042 size_t len = type->length ();
6bf78e29 10043
dda83cd7 10044 siginfo_data.reset ((gdb_byte *) xmalloc (len));
6bf78e29 10045
328d42d8 10046 if (target_read (current_inferior ()->top_target (),
03acd4d8 10047 TARGET_OBJECT_SIGNAL_INFO, nullptr,
dda83cd7
SM
10048 siginfo_data.get (), 0, len) != len)
10049 {
10050 /* Errors ignored. */
10051 siginfo_data.reset (nullptr);
10052 }
6bf78e29
AB
10053 }
10054
10055 if (siginfo_data)
10056 {
dda83cd7
SM
10057 m_siginfo_gdbarch = gdbarch;
10058 m_siginfo_data = std::move (siginfo_data);
6bf78e29
AB
10059 }
10060 }
10061
10062 /* Return a pointer to the stored register state. */
16c381f0 10063
6bf78e29
AB
10064 readonly_detached_regcache *registers () const
10065 {
10066 return m_registers.get ();
10067 }
10068
10069 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
10070
10071 void restore (struct gdbarch *gdbarch,
dda83cd7
SM
10072 struct thread_info *tp,
10073 struct regcache *regcache) const
6bf78e29 10074 {
1edb66d8 10075 tp->restore_suspend_from (m_thread_suspend);
6bf78e29
AB
10076
10077 if (m_siginfo_gdbarch == gdbarch)
10078 {
dda83cd7 10079 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6bf78e29 10080
dda83cd7 10081 /* Errors ignored. */
328d42d8 10082 target_write (current_inferior ()->top_target (),
03acd4d8 10083 TARGET_OBJECT_SIGNAL_INFO, nullptr,
df86565b 10084 m_siginfo_data.get (), 0, type->length ());
6bf78e29
AB
10085 }
10086
10087 /* The inferior can be gone if the user types "print exit(0)"
10088 (and perhaps other times). */
55f6301a 10089 if (target_has_execution ())
6bf78e29
AB
10090 /* NB: The register write goes through to the target. */
10091 regcache->restore (registers ());
10092 }
10093
10094private:
10095 /* How the current thread stopped before the inferior function call was
10096 executed. */
10097 struct thread_suspend_state m_thread_suspend;
10098
10099 /* The registers before the inferior function call was executed. */
10100 std::unique_ptr<readonly_detached_regcache> m_registers;
1736ad11 10101
35515841 10102 /* Format of SIGINFO_DATA or NULL if it is not present. */
6bf78e29 10103 struct gdbarch *m_siginfo_gdbarch = nullptr;
1736ad11
JK
10104
10105 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
df86565b 10106 gdbarch_get_siginfo_type ()->length (). For different gdbarch the
1736ad11 10107 content would be invalid. */
6bf78e29 10108 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
b89667eb
DE
10109};
10110
cb524840
TT
10111infcall_suspend_state_up
10112save_infcall_suspend_state ()
b89667eb 10113{
b89667eb 10114 struct thread_info *tp = inferior_thread ();
9c742269 10115 regcache *regcache = get_thread_regcache (tp);
ac7936df 10116 struct gdbarch *gdbarch = regcache->arch ();
1736ad11 10117
6bf78e29
AB
10118 infcall_suspend_state_up inf_state
10119 (new struct infcall_suspend_state (gdbarch, tp, regcache));
1736ad11 10120
6bf78e29
AB
10121 /* Having saved the current state, adjust the thread state, discarding
10122 any stop signal information. The stop signal is not useful when
10123 starting an inferior function call, and run_inferior_call will not use
10124 the signal due to its `proceed' call with GDB_SIGNAL_0. */
1edb66d8 10125 tp->set_stop_signal (GDB_SIGNAL_0);
35515841 10126
b89667eb
DE
10127 return inf_state;
10128}
10129
10130/* Restore inferior session state to INF_STATE. */
10131
10132void
16c381f0 10133restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb
DE
10134{
10135 struct thread_info *tp = inferior_thread ();
9c742269 10136 regcache *regcache = get_thread_regcache (inferior_thread ());
ac7936df 10137 struct gdbarch *gdbarch = regcache->arch ();
b89667eb 10138
6bf78e29 10139 inf_state->restore (gdbarch, tp, regcache);
16c381f0 10140 discard_infcall_suspend_state (inf_state);
b89667eb
DE
10141}
10142
b89667eb 10143void
16c381f0 10144discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb 10145{
dd848631 10146 delete inf_state;
b89667eb
DE
10147}
10148
daf6667d 10149readonly_detached_regcache *
16c381f0 10150get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
b89667eb 10151{
6bf78e29 10152 return inf_state->registers ();
b89667eb
DE
10153}
10154
16c381f0
JK
10155/* infcall_control_state contains state regarding gdb's control of the
10156 inferior itself like stepping control. It also contains session state like
10157 the user's currently selected frame. */
b89667eb 10158
16c381f0 10159struct infcall_control_state
b89667eb 10160{
16c381f0
JK
10161 struct thread_control_state thread_control;
10162 struct inferior_control_state inferior_control;
d82142e2
JK
10163
10164 /* Other fields: */
ee841dd8
TT
10165 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
10166 int stopped_by_random_signal = 0;
7a292a7a 10167
79952e69
PA
10168 /* ID and level of the selected frame when the inferior function
10169 call was made. */
ee841dd8 10170 struct frame_id selected_frame_id {};
79952e69 10171 int selected_frame_level = -1;
7a292a7a
SS
10172};
10173
c906108c 10174/* Save all of the information associated with the inferior<==>gdb
b89667eb 10175 connection. */
c906108c 10176
cb524840
TT
10177infcall_control_state_up
10178save_infcall_control_state ()
c906108c 10179{
cb524840 10180 infcall_control_state_up inf_status (new struct infcall_control_state);
4e1c45ea 10181 struct thread_info *tp = inferior_thread ();
d6b48e9c 10182 struct inferior *inf = current_inferior ();
7a292a7a 10183
16c381f0
JK
10184 inf_status->thread_control = tp->control;
10185 inf_status->inferior_control = inf->control;
d82142e2 10186
03acd4d8
CL
10187 tp->control.step_resume_breakpoint = nullptr;
10188 tp->control.exception_resume_breakpoint = nullptr;
8358c15c 10189
16c381f0
JK
10190 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
10191 chain. If caller's caller is walking the chain, they'll be happier if we
10192 hand them back the original chain when restore_infcall_control_state is
10193 called. */
10194 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
d82142e2
JK
10195
10196 /* Other fields: */
10197 inf_status->stop_stack_dummy = stop_stack_dummy;
10198 inf_status->stopped_by_random_signal = stopped_by_random_signal;
c5aa993b 10199
79952e69
PA
10200 save_selected_frame (&inf_status->selected_frame_id,
10201 &inf_status->selected_frame_level);
b89667eb 10202
7a292a7a 10203 return inf_status;
c906108c
SS
10204}
10205
b89667eb
DE
10206/* Restore inferior session state to INF_STATUS. */
10207
c906108c 10208void
16c381f0 10209restore_infcall_control_state (struct infcall_control_state *inf_status)
c906108c 10210{
4e1c45ea 10211 struct thread_info *tp = inferior_thread ();
d6b48e9c 10212 struct inferior *inf = current_inferior ();
4e1c45ea 10213
8358c15c
JK
10214 if (tp->control.step_resume_breakpoint)
10215 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
10216
5b79abe7
TT
10217 if (tp->control.exception_resume_breakpoint)
10218 tp->control.exception_resume_breakpoint->disposition
10219 = disp_del_at_next_stop;
10220
d82142e2 10221 /* Handle the bpstat_copy of the chain. */
16c381f0 10222 bpstat_clear (&tp->control.stop_bpstat);
d82142e2 10223
16c381f0
JK
10224 tp->control = inf_status->thread_control;
10225 inf->control = inf_status->inferior_control;
d82142e2
JK
10226
10227 /* Other fields: */
10228 stop_stack_dummy = inf_status->stop_stack_dummy;
10229 stopped_by_random_signal = inf_status->stopped_by_random_signal;
c906108c 10230
841de120 10231 if (target_has_stack ())
c906108c 10232 {
79952e69
PA
10233 restore_selected_frame (inf_status->selected_frame_id,
10234 inf_status->selected_frame_level);
c906108c 10235 }
c906108c 10236
ee841dd8 10237 delete inf_status;
7a292a7a 10238}
c906108c
SS
10239
10240void
16c381f0 10241discard_infcall_control_state (struct infcall_control_state *inf_status)
7a292a7a 10242{
8358c15c
JK
10243 if (inf_status->thread_control.step_resume_breakpoint)
10244 inf_status->thread_control.step_resume_breakpoint->disposition
10245 = disp_del_at_next_stop;
10246
5b79abe7
TT
10247 if (inf_status->thread_control.exception_resume_breakpoint)
10248 inf_status->thread_control.exception_resume_breakpoint->disposition
10249 = disp_del_at_next_stop;
10250
1777feb0 10251 /* See save_infcall_control_state for info on stop_bpstat. */
16c381f0 10252 bpstat_clear (&inf_status->thread_control.stop_bpstat);
8358c15c 10253
ee841dd8 10254 delete inf_status;
7a292a7a 10255}
b89667eb 10256\f
7f89fd65 10257/* See infrun.h. */
0c557179
SDJ
10258
10259void
10260clear_exit_convenience_vars (void)
10261{
10262 clear_internalvar (lookup_internalvar ("_exitsignal"));
10263 clear_internalvar (lookup_internalvar ("_exitcode"));
10264}
c5aa993b 10265\f
488f131b 10266
b2175913
MS
10267/* User interface for reverse debugging:
10268 Set exec-direction / show exec-direction commands
10269 (returns error unless target implements to_set_exec_direction method). */
10270
170742de 10271enum exec_direction_kind execution_direction = EXEC_FORWARD;
b2175913
MS
10272static const char exec_forward[] = "forward";
10273static const char exec_reverse[] = "reverse";
10274static const char *exec_direction = exec_forward;
40478521 10275static const char *const exec_direction_names[] = {
b2175913
MS
10276 exec_forward,
10277 exec_reverse,
03acd4d8 10278 nullptr
b2175913
MS
10279};
10280
10281static void
eb4c3f4a 10282set_exec_direction_func (const char *args, int from_tty,
b2175913
MS
10283 struct cmd_list_element *cmd)
10284{
05374cfd 10285 if (target_can_execute_reverse ())
b2175913
MS
10286 {
10287 if (!strcmp (exec_direction, exec_forward))
10288 execution_direction = EXEC_FORWARD;
10289 else if (!strcmp (exec_direction, exec_reverse))
10290 execution_direction = EXEC_REVERSE;
10291 }
8bbed405
MS
10292 else
10293 {
10294 exec_direction = exec_forward;
10295 error (_("Target does not support this operation."));
10296 }
b2175913
MS
10297}
10298
10299static void
10300show_exec_direction_func (struct ui_file *out, int from_tty,
10301 struct cmd_list_element *cmd, const char *value)
10302{
10303 switch (execution_direction) {
10304 case EXEC_FORWARD:
6cb06a8c 10305 gdb_printf (out, _("Forward.\n"));
b2175913
MS
10306 break;
10307 case EXEC_REVERSE:
6cb06a8c 10308 gdb_printf (out, _("Reverse.\n"));
b2175913 10309 break;
b2175913 10310 default:
f34652de 10311 internal_error (_("bogus execution_direction value: %d"),
d8b34453 10312 (int) execution_direction);
b2175913
MS
10313 }
10314}
10315
d4db2f36
PA
10316static void
10317show_schedule_multiple (struct ui_file *file, int from_tty,
10318 struct cmd_list_element *c, const char *value)
10319{
6cb06a8c
TT
10320 gdb_printf (file, _("Resuming the execution of threads "
10321 "of all processes is %s.\n"), value);
d4db2f36 10322}
ad52ddc6 10323
22d2b532
SDJ
10324/* Implementation of `siginfo' variable. */
10325
10326static const struct internalvar_funcs siginfo_funcs =
10327{
10328 siginfo_make_value,
03acd4d8 10329 nullptr,
22d2b532
SDJ
10330};
10331
372316f1
PA
10332/* Callback for infrun's target events source. This is marked when a
10333 thread has a pending status to process. */
10334
10335static void
10336infrun_async_inferior_event_handler (gdb_client_data data)
10337{
6b36ddeb 10338 clear_async_event_handler (infrun_async_inferior_event_token);
b1a35af2 10339 inferior_event_handler (INF_REG_EVENT);
372316f1
PA
10340}
10341
8087c3fa 10342#if GDB_SELF_TEST
b161a60d
SM
10343namespace selftests
10344{
10345
10346/* Verify that when two threads with the same ptid exist (from two different
10347 targets) and one of them changes ptid, we only update inferior_ptid if
10348 it is appropriate. */
10349
10350static void
10351infrun_thread_ptid_changed ()
10352{
27b1f19f 10353 gdbarch *arch = current_inferior ()->arch ();
b161a60d
SM
10354
10355 /* The thread which inferior_ptid represents changes ptid. */
10356 {
10357 scoped_restore_current_pspace_and_thread restore;
10358
10359 scoped_mock_context<test_target_ops> target1 (arch);
10360 scoped_mock_context<test_target_ops> target2 (arch);
b161a60d
SM
10361
10362 ptid_t old_ptid (111, 222);
10363 ptid_t new_ptid (111, 333);
10364
10365 target1.mock_inferior.pid = old_ptid.pid ();
10366 target1.mock_thread.ptid = old_ptid;
922cc93d
SM
10367 target1.mock_inferior.ptid_thread_map.clear ();
10368 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
10369
b161a60d
SM
10370 target2.mock_inferior.pid = old_ptid.pid ();
10371 target2.mock_thread.ptid = old_ptid;
922cc93d
SM
10372 target2.mock_inferior.ptid_thread_map.clear ();
10373 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
b161a60d
SM
10374
10375 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
10376 set_current_inferior (&target1.mock_inferior);
10377
10378 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
10379
10380 gdb_assert (inferior_ptid == new_ptid);
10381 }
10382
10383 /* A thread with the same ptid as inferior_ptid, but from another target,
10384 changes ptid. */
10385 {
10386 scoped_restore_current_pspace_and_thread restore;
10387
10388 scoped_mock_context<test_target_ops> target1 (arch);
10389 scoped_mock_context<test_target_ops> target2 (arch);
b161a60d
SM
10390
10391 ptid_t old_ptid (111, 222);
10392 ptid_t new_ptid (111, 333);
10393
10394 target1.mock_inferior.pid = old_ptid.pid ();
10395 target1.mock_thread.ptid = old_ptid;
922cc93d
SM
10396 target1.mock_inferior.ptid_thread_map.clear ();
10397 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
10398
b161a60d
SM
10399 target2.mock_inferior.pid = old_ptid.pid ();
10400 target2.mock_thread.ptid = old_ptid;
922cc93d
SM
10401 target2.mock_inferior.ptid_thread_map.clear ();
10402 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
b161a60d
SM
10403
10404 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
10405 set_current_inferior (&target2.mock_inferior);
10406
10407 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
10408
10409 gdb_assert (inferior_ptid == old_ptid);
10410 }
10411}
10412
10413} /* namespace selftests */
10414
8087c3fa
JB
10415#endif /* GDB_SELF_TEST */
10416
6c265988 10417void _initialize_infrun ();
c906108c 10418void
6c265988 10419_initialize_infrun ()
c906108c 10420{
de0bea00 10421 struct cmd_list_element *c;
c906108c 10422
372316f1
PA
10423 /* Register extra event sources in the event loop. */
10424 infrun_async_inferior_event_token
03acd4d8 10425 = create_async_event_handler (infrun_async_inferior_event_handler, nullptr,
db20ebdf 10426 "infrun");
372316f1 10427
e0f25bd9
SM
10428 cmd_list_element *info_signals_cmd
10429 = add_info ("signals", info_signals_command, _("\
1bedd215
AC
10430What debugger does when program gets various signals.\n\
10431Specify a signal as argument to print info on that signal only."));
e0f25bd9 10432 add_info_alias ("handle", info_signals_cmd, 0);
c906108c 10433
de0bea00 10434 c = add_com ("handle", class_run, handle_command, _("\
dfbd5e7b 10435Specify how to handle signals.\n\
486c7739 10436Usage: handle SIGNAL [ACTIONS]\n\
c906108c 10437Args are signals and actions to apply to those signals.\n\
dfbd5e7b 10438If no actions are specified, the current settings for the specified signals\n\
486c7739
MF
10439will be displayed instead.\n\
10440\n\
c906108c
SS
10441Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
10442from 1-15 are allowed for compatibility with old versions of GDB.\n\
10443Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
10444The special arg \"all\" is recognized to mean all signals except those\n\
1bedd215 10445used by the debugger, typically SIGTRAP and SIGINT.\n\
486c7739 10446\n\
1bedd215 10447Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
c906108c
SS
10448\"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
10449Stop means reenter debugger if this signal happens (implies print).\n\
10450Print means print a message if this signal happens.\n\
10451Pass means let program see this signal; otherwise program doesn't know.\n\
10452Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
dfbd5e7b
PA
10453Pass and Stop may be combined.\n\
10454\n\
10455Multiple signals may be specified. Signal numbers and signal names\n\
10456may be interspersed with actions, with the actions being performed for\n\
10457all signals cumulatively specified."));
de0bea00 10458 set_cmd_completer (c, handle_completer);
486c7739 10459
49a82d50
TT
10460 stop_command = add_cmd ("stop", class_obscure,
10461 not_just_help_class_command, _("\
1a966eab 10462There is no `stop' command, but you can set a hook on `stop'.\n\
c906108c 10463This allows you to set a list of commands to be run each time execution\n\
1a966eab 10464of the program stops."), &cmdlist);
c906108c 10465
94ba44a6
SM
10466 add_setshow_boolean_cmd
10467 ("infrun", class_maintenance, &debug_infrun,
10468 _("Set inferior debugging."),
10469 _("Show inferior debugging."),
10470 _("When non-zero, inferior specific debugging is enabled."),
03acd4d8 10471 nullptr, show_debug_infrun, &setdebuglist, &showdebuglist);
527159b7 10472
ad52ddc6
PA
10473 add_setshow_boolean_cmd ("non-stop", no_class,
10474 &non_stop_1, _("\
10475Set whether gdb controls the inferior in non-stop mode."), _("\
10476Show whether gdb controls the inferior in non-stop mode."), _("\
10477When debugging a multi-threaded program and this setting is\n\
10478off (the default, also called all-stop mode), when one thread stops\n\
10479(for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
10480all other threads in the program while you interact with the thread of\n\
10481interest. When you continue or step a thread, you can allow the other\n\
10482threads to run, or have them remain stopped, but while you inspect any\n\
10483thread's state, all threads stop.\n\
10484\n\
10485In non-stop mode, when one thread stops, other threads can continue\n\
10486to run freely. You'll be able to step each thread independently,\n\
10487leave it stopped or free to run as needed."),
10488 set_non_stop,
10489 show_non_stop,
10490 &setlist,
10491 &showlist);
10492
adc6a863 10493 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
c906108c
SS
10494 {
10495 signal_stop[i] = 1;
10496 signal_print[i] = 1;
10497 signal_program[i] = 1;
ab04a2af 10498 signal_catch[i] = 0;
c906108c
SS
10499 }
10500
4d9d9d04
PA
10501 /* Signals caused by debugger's own actions should not be given to
10502 the program afterwards.
10503
10504 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
10505 explicitly specifies that it should be delivered to the target
10506 program. Typically, that would occur when a user is debugging a
10507 target monitor on a simulator: the target monitor sets a
10508 breakpoint; the simulator encounters this breakpoint and halts
10509 the simulation handing control to GDB; GDB, noting that the stop
10510 address doesn't map to any known breakpoint, returns control back
10511 to the simulator; the simulator then delivers the hardware
10512 equivalent of a GDB_SIGNAL_TRAP to the program being
10513 debugged. */
a493e3e2
PA
10514 signal_program[GDB_SIGNAL_TRAP] = 0;
10515 signal_program[GDB_SIGNAL_INT] = 0;
c906108c
SS
10516
10517 /* Signals that are not errors should not normally enter the debugger. */
a493e3e2
PA
10518 signal_stop[GDB_SIGNAL_ALRM] = 0;
10519 signal_print[GDB_SIGNAL_ALRM] = 0;
10520 signal_stop[GDB_SIGNAL_VTALRM] = 0;
10521 signal_print[GDB_SIGNAL_VTALRM] = 0;
10522 signal_stop[GDB_SIGNAL_PROF] = 0;
10523 signal_print[GDB_SIGNAL_PROF] = 0;
10524 signal_stop[GDB_SIGNAL_CHLD] = 0;
10525 signal_print[GDB_SIGNAL_CHLD] = 0;
10526 signal_stop[GDB_SIGNAL_IO] = 0;
10527 signal_print[GDB_SIGNAL_IO] = 0;
10528 signal_stop[GDB_SIGNAL_POLL] = 0;
10529 signal_print[GDB_SIGNAL_POLL] = 0;
10530 signal_stop[GDB_SIGNAL_URG] = 0;
10531 signal_print[GDB_SIGNAL_URG] = 0;
10532 signal_stop[GDB_SIGNAL_WINCH] = 0;
10533 signal_print[GDB_SIGNAL_WINCH] = 0;
10534 signal_stop[GDB_SIGNAL_PRIO] = 0;
10535 signal_print[GDB_SIGNAL_PRIO] = 0;
c906108c 10536
cd0fc7c3
SS
10537 /* These signals are used internally by user-level thread
10538 implementations. (See signal(5) on Solaris.) Like the above
10539 signals, a healthy program receives and handles them as part of
10540 its normal operation. */
a493e3e2
PA
10541 signal_stop[GDB_SIGNAL_LWP] = 0;
10542 signal_print[GDB_SIGNAL_LWP] = 0;
10543 signal_stop[GDB_SIGNAL_WAITING] = 0;
10544 signal_print[GDB_SIGNAL_WAITING] = 0;
10545 signal_stop[GDB_SIGNAL_CANCEL] = 0;
10546 signal_print[GDB_SIGNAL_CANCEL] = 0;
bc7b765a
JB
10547 signal_stop[GDB_SIGNAL_LIBRT] = 0;
10548 signal_print[GDB_SIGNAL_LIBRT] = 0;
cd0fc7c3 10549
2455069d
UW
10550 /* Update cached state. */
10551 signal_cache_update (-1);
10552
85c07804
AC
10553 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
10554 &stop_on_solib_events, _("\
10555Set stopping for shared library events."), _("\
10556Show stopping for shared library events."), _("\
c906108c
SS
10557If nonzero, gdb will give control to the user when the dynamic linker\n\
10558notifies gdb of shared library events. The most common event of interest\n\
85c07804 10559to the user would be loading/unloading of a new library."),
f9e14852 10560 set_stop_on_solib_events,
920d2a44 10561 show_stop_on_solib_events,
85c07804 10562 &setlist, &showlist);
c906108c 10563
7ab04401
AC
10564 add_setshow_enum_cmd ("follow-fork-mode", class_run,
10565 follow_fork_mode_kind_names,
10566 &follow_fork_mode_string, _("\
10567Set debugger response to a program call of fork or vfork."), _("\
10568Show debugger response to a program call of fork or vfork."), _("\
c906108c
SS
10569A fork or vfork creates a new process. follow-fork-mode can be:\n\
10570 parent - the original process is debugged after a fork\n\
10571 child - the new process is debugged after a fork\n\
ea1dd7bc 10572The unfollowed process will continue to run.\n\
7ab04401 10573By default, the debugger will follow the parent process."),
03acd4d8 10574 nullptr,
920d2a44 10575 show_follow_fork_mode_string,
7ab04401
AC
10576 &setlist, &showlist);
10577
6c95b8df
PA
10578 add_setshow_enum_cmd ("follow-exec-mode", class_run,
10579 follow_exec_mode_names,
10580 &follow_exec_mode_string, _("\
10581Set debugger response to a program call of exec."), _("\
10582Show debugger response to a program call of exec."), _("\
10583An exec call replaces the program image of a process.\n\
10584\n\
10585follow-exec-mode can be:\n\
10586\n\
cce7e648 10587 new - the debugger creates a new inferior and rebinds the process\n\
6c95b8df
PA
10588to this new inferior. The program the process was running before\n\
10589the exec call can be restarted afterwards by restarting the original\n\
10590inferior.\n\
10591\n\
10592 same - the debugger keeps the process bound to the same inferior.\n\
10593The new executable image replaces the previous executable loaded in\n\
10594the inferior. Restarting the inferior after the exec call restarts\n\
10595the executable the process was running after the exec call.\n\
10596\n\
10597By default, the debugger will use the same inferior."),
03acd4d8 10598 nullptr,
6c95b8df
PA
10599 show_follow_exec_mode_string,
10600 &setlist, &showlist);
10601
7ab04401
AC
10602 add_setshow_enum_cmd ("scheduler-locking", class_run,
10603 scheduler_enums, &scheduler_mode, _("\
10604Set mode for locking scheduler during execution."), _("\
10605Show mode for locking scheduler during execution."), _("\
f2665db5
MM
10606off == no locking (threads may preempt at any time)\n\
10607on == full locking (no thread except the current thread may run)\n\
dda83cd7 10608 This applies to both normal execution and replay mode.\n\
f2665db5 10609step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
dda83cd7
SM
10610 In this mode, other threads may run during other commands.\n\
10611 This applies to both normal execution and replay mode.\n\
f2665db5 10612replay == scheduler locked in replay mode and unlocked during normal execution."),
7ab04401 10613 set_schedlock_func, /* traps on target vector */
920d2a44 10614 show_scheduler_mode,
7ab04401 10615 &setlist, &showlist);
5fbbeb29 10616
d4db2f36
PA
10617 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
10618Set mode for resuming threads of all processes."), _("\
10619Show mode for resuming threads of all processes."), _("\
10620When on, execution commands (such as 'continue' or 'next') resume all\n\
10621threads of all processes. When off (which is the default), execution\n\
10622commands only resume the threads of the current process. The set of\n\
10623threads that are resumed is further refined by the scheduler-locking\n\
10624mode (see help set scheduler-locking)."),
03acd4d8 10625 nullptr,
d4db2f36
PA
10626 show_schedule_multiple,
10627 &setlist, &showlist);
10628
5bf193a2
AC
10629 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
10630Set mode of the step operation."), _("\
10631Show mode of the step operation."), _("\
10632When set, doing a step over a function without debug line information\n\
10633will stop at the first instruction of that function. Otherwise, the\n\
10634function is skipped and the step command stops at a different source line."),
03acd4d8 10635 nullptr,
920d2a44 10636 show_step_stop_if_no_debug,
5bf193a2 10637 &setlist, &showlist);
ca6724c1 10638
72d0e2c5
YQ
10639 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
10640 &can_use_displaced_stepping, _("\
237fc4c9
PA
10641Set debugger's willingness to use displaced stepping."), _("\
10642Show debugger's willingness to use displaced stepping."), _("\
fff08868
HZ
10643If on, gdb will use displaced stepping to step over breakpoints if it is\n\
10644supported by the target architecture. If off, gdb will not use displaced\n\
10645stepping to step over breakpoints, even if such is supported by the target\n\
10646architecture. If auto (which is the default), gdb will use displaced stepping\n\
10647if the target architecture supports it and non-stop mode is active, but will not\n\
10648use it in all-stop mode (see help set non-stop)."),
03acd4d8 10649 nullptr,
72d0e2c5
YQ
10650 show_can_use_displaced_stepping,
10651 &setlist, &showlist);
237fc4c9 10652
b2175913
MS
10653 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
10654 &exec_direction, _("Set direction of execution.\n\
10655Options are 'forward' or 'reverse'."),
10656 _("Show direction of execution (forward/reverse)."),
10657 _("Tells gdb whether to execute forward or backward."),
10658 set_exec_direction_func, show_exec_direction_func,
10659 &setlist, &showlist);
10660
6c95b8df
PA
10661 /* Set/show detach-on-fork: user-settable mode. */
10662
10663 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
10664Set whether gdb will detach the child of a fork."), _("\
10665Show whether gdb will detach the child of a fork."), _("\
10666Tells gdb whether to detach the child of a fork."),
03acd4d8 10667 nullptr, nullptr, &setlist, &showlist);
6c95b8df 10668
03583c20
UW
10669 /* Set/show disable address space randomization mode. */
10670
10671 add_setshow_boolean_cmd ("disable-randomization", class_support,
10672 &disable_randomization, _("\
10673Set disabling of debuggee's virtual address space randomization."), _("\
10674Show disabling of debuggee's virtual address space randomization."), _("\
10675When this mode is on (which is the default), randomization of the virtual\n\
10676address space is disabled. Standalone programs run with the randomization\n\
10677enabled by default on some platforms."),
10678 &set_disable_randomization,
10679 &show_disable_randomization,
10680 &setlist, &showlist);
10681
ca6724c1 10682 /* ptid initializations */
ca6724c1
KB
10683 inferior_ptid = null_ptid;
10684 target_last_wait_ptid = minus_one_ptid;
5231c1fd 10685
c90e7d63
SM
10686 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed,
10687 "infrun");
10688 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested,
10689 "infrun");
c90e7d63
SM
10690 gdb::observers::inferior_exit.attach (infrun_inferior_exit, "infrun");
10691 gdb::observers::inferior_execd.attach (infrun_inferior_execd, "infrun");
4aa995e1
PA
10692
10693 /* Explicitly create without lookup, since that tries to create a
10694 value with a void typed value, and when we get here, gdbarch
10695 isn't initialized yet. At this point, we're quite sure there
10696 isn't another convenience variable of the same name. */
03acd4d8 10697 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, nullptr);
d914c394
SS
10698
10699 add_setshow_boolean_cmd ("observer", no_class,
10700 &observer_mode_1, _("\
10701Set whether gdb controls the inferior in observer mode."), _("\
10702Show whether gdb controls the inferior in observer mode."), _("\
10703In observer mode, GDB can get data from the inferior, but not\n\
10704affect its execution. Registers and memory may not be changed,\n\
10705breakpoints may not be set, and the program cannot be interrupted\n\
10706or signalled."),
10707 set_observer_mode,
10708 show_observer_mode,
10709 &setlist,
10710 &showlist);
b161a60d
SM
10711
10712#if GDB_SELF_TEST
10713 selftests::register_test ("infrun_thread_ptid_changed",
10714 selftests::infrun_thread_ptid_changed);
10715#endif
c906108c 10716}