]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/infrun.c
gdb: call post_create_inferior at end of follow_fork_inferior
[thirdparty/binutils-gdb.git] / gdb / infrun.c
CommitLineData
ca557f44
AC
1/* Target-struct-independent code to start (run) and stop an inferior
2 process.
8926118c 3
3666a048 4 Copyright (C) 1986-2021 Free Software Foundation, Inc.
c906108c 5
c5aa993b 6 This file is part of GDB.
c906108c 7
c5aa993b
JM
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
c5aa993b 11 (at your option) any later version.
c906108c 12
c5aa993b
JM
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
c906108c 17
c5aa993b 18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
c906108c
SS
20
21#include "defs.h"
bab37966 22#include "displaced-stepping.h"
45741a9c 23#include "infrun.h"
c906108c
SS
24#include <ctype.h>
25#include "symtab.h"
26#include "frame.h"
27#include "inferior.h"
28#include "breakpoint.h"
c906108c
SS
29#include "gdbcore.h"
30#include "gdbcmd.h"
31#include "target.h"
2f4fcf00 32#include "target-connection.h"
c906108c
SS
33#include "gdbthread.h"
34#include "annotate.h"
1adeb98a 35#include "symfile.h"
7a292a7a 36#include "top.h"
2acceee2 37#include "inf-loop.h"
4e052eda 38#include "regcache.h"
fd0407d6 39#include "value.h"
76727919 40#include "observable.h"
f636b87d 41#include "language.h"
a77053c2 42#include "solib.h"
f17517ea 43#include "main.h"
186c406b 44#include "block.h"
034dad6f 45#include "mi/mi-common.h"
4f8d22e3 46#include "event-top.h"
96429cc8 47#include "record.h"
d02ed0bb 48#include "record-full.h"
edb3359d 49#include "inline-frame.h"
4efc6507 50#include "jit.h"
06cd862c 51#include "tracepoint.h"
1bfeeb0f 52#include "skip.h"
28106bc2
SDJ
53#include "probe.h"
54#include "objfiles.h"
de0bea00 55#include "completer.h"
9107fc8d 56#include "target-descriptions.h"
f15cb84a 57#include "target-dcache.h"
d83ad864 58#include "terminal.h"
ff862be4 59#include "solist.h"
400b5eca 60#include "gdbsupport/event-loop.h"
243a9253 61#include "thread-fsm.h"
268a13a5 62#include "gdbsupport/enum-flags.h"
5ed8105e 63#include "progspace-and-thread.h"
268a13a5 64#include "gdbsupport/gdb_optional.h"
46a62268 65#include "arch-utils.h"
268a13a5
TT
66#include "gdbsupport/scope-exit.h"
67#include "gdbsupport/forward-scope-exit.h"
06cc9596 68#include "gdbsupport/gdb_select.h"
5b6d1e4f 69#include <unordered_map>
93b54c8e 70#include "async-event.h"
b161a60d
SM
71#include "gdbsupport/selftest.h"
72#include "scoped-mock-context.h"
73#include "test-target.h"
ba988419 74#include "gdbsupport/common-debug.h"
c906108c
SS
75
76/* Prototypes for local functions */
77
2ea28649 78static void sig_print_info (enum gdb_signal);
c906108c 79
96baa820 80static void sig_print_header (void);
c906108c 81
d83ad864
DB
82static void follow_inferior_reset_breakpoints (void);
83
c4464ade 84static bool currently_stepping (struct thread_info *tp);
a289b8f6 85
2c03e5be 86static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
2484c66b
UW
87
88static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
89
2484c66b
UW
90static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
91
22b11ba9 92static bool maybe_software_singlestep (struct gdbarch *gdbarch);
8550d3b3 93
aff4e175
AB
94static void resume (gdb_signal sig);
95
5b6d1e4f
PA
96static void wait_for_inferior (inferior *inf);
97
372316f1
PA
98/* Asynchronous signal handler registered as event loop source for
99 when we have pending events ready to be passed to the core. */
100static struct async_event_handler *infrun_async_inferior_event_token;
101
102/* Stores whether infrun_async was previously enabled or disabled.
103 Starts off as -1, indicating "never enabled/disabled". */
104static int infrun_is_async = -1;
105
106/* See infrun.h. */
107
108void
109infrun_async (int enable)
110{
111 if (infrun_is_async != enable)
112 {
113 infrun_is_async = enable;
114
1eb8556f 115 infrun_debug_printf ("enable=%d", enable);
372316f1
PA
116
117 if (enable)
118 mark_async_event_handler (infrun_async_inferior_event_token);
119 else
120 clear_async_event_handler (infrun_async_inferior_event_token);
121 }
122}
123
0b333c5e
PA
124/* See infrun.h. */
125
126void
127mark_infrun_async_event_handler (void)
128{
129 mark_async_event_handler (infrun_async_inferior_event_token);
130}
131
5fbbeb29
CF
132/* When set, stop the 'step' command if we enter a function which has
133 no line number information. The normal behavior is that we step
134 over such function. */
491144b5 135bool step_stop_if_no_debug = false;
920d2a44
AC
136static void
137show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
138 struct cmd_list_element *c, const char *value)
139{
140 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
141}
5fbbeb29 142
b9f437de
PA
143/* proceed and normal_stop use this to notify the user when the
144 inferior stopped in a different thread than it had been running
145 in. */
96baa820 146
39f77062 147static ptid_t previous_inferior_ptid;
7a292a7a 148
07107ca6
LM
149/* If set (default for legacy reasons), when following a fork, GDB
150 will detach from one of the fork branches, child or parent.
151 Exactly which branch is detached depends on 'set follow-fork-mode'
152 setting. */
153
491144b5 154static bool detach_fork = true;
6c95b8df 155
94ba44a6 156bool debug_infrun = false;
920d2a44
AC
157static void
158show_debug_infrun (struct ui_file *file, int from_tty,
159 struct cmd_list_element *c, const char *value)
160{
161 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
162}
527159b7 163
03583c20
UW
164/* Support for disabling address space randomization. */
165
491144b5 166bool disable_randomization = true;
03583c20
UW
167
168static void
169show_disable_randomization (struct ui_file *file, int from_tty,
170 struct cmd_list_element *c, const char *value)
171{
172 if (target_supports_disable_randomization ())
173 fprintf_filtered (file,
174 _("Disabling randomization of debuggee's "
175 "virtual address space is %s.\n"),
176 value);
177 else
178 fputs_filtered (_("Disabling randomization of debuggee's "
179 "virtual address space is unsupported on\n"
180 "this platform.\n"), file);
181}
182
183static void
eb4c3f4a 184set_disable_randomization (const char *args, int from_tty,
03583c20
UW
185 struct cmd_list_element *c)
186{
187 if (!target_supports_disable_randomization ())
188 error (_("Disabling randomization of debuggee's "
189 "virtual address space is unsupported on\n"
190 "this platform."));
191}
192
d32dc48e
PA
193/* User interface for non-stop mode. */
194
491144b5
CB
195bool non_stop = false;
196static bool non_stop_1 = false;
d32dc48e
PA
197
198static void
eb4c3f4a 199set_non_stop (const char *args, int from_tty,
d32dc48e
PA
200 struct cmd_list_element *c)
201{
55f6301a 202 if (target_has_execution ())
d32dc48e
PA
203 {
204 non_stop_1 = non_stop;
205 error (_("Cannot change this setting while the inferior is running."));
206 }
207
208 non_stop = non_stop_1;
209}
210
211static void
212show_non_stop (struct ui_file *file, int from_tty,
213 struct cmd_list_element *c, const char *value)
214{
215 fprintf_filtered (file,
216 _("Controlling the inferior in non-stop mode is %s.\n"),
217 value);
218}
219
d914c394
SS
220/* "Observer mode" is somewhat like a more extreme version of
221 non-stop, in which all GDB operations that might affect the
222 target's execution have been disabled. */
223
6bd434d6 224static bool observer_mode = false;
491144b5 225static bool observer_mode_1 = false;
d914c394
SS
226
227static void
eb4c3f4a 228set_observer_mode (const char *args, int from_tty,
d914c394
SS
229 struct cmd_list_element *c)
230{
55f6301a 231 if (target_has_execution ())
d914c394
SS
232 {
233 observer_mode_1 = observer_mode;
234 error (_("Cannot change this setting while the inferior is running."));
235 }
236
237 observer_mode = observer_mode_1;
238
239 may_write_registers = !observer_mode;
240 may_write_memory = !observer_mode;
241 may_insert_breakpoints = !observer_mode;
242 may_insert_tracepoints = !observer_mode;
243 /* We can insert fast tracepoints in or out of observer mode,
244 but enable them if we're going into this mode. */
245 if (observer_mode)
491144b5 246 may_insert_fast_tracepoints = true;
d914c394
SS
247 may_stop = !observer_mode;
248 update_target_permissions ();
249
250 /* Going *into* observer mode we must force non-stop, then
251 going out we leave it that way. */
252 if (observer_mode)
253 {
d914c394 254 pagination_enabled = 0;
491144b5 255 non_stop = non_stop_1 = true;
d914c394
SS
256 }
257
258 if (from_tty)
259 printf_filtered (_("Observer mode is now %s.\n"),
260 (observer_mode ? "on" : "off"));
261}
262
263static void
264show_observer_mode (struct ui_file *file, int from_tty,
265 struct cmd_list_element *c, const char *value)
266{
267 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
268}
269
270/* This updates the value of observer mode based on changes in
271 permissions. Note that we are deliberately ignoring the values of
272 may-write-registers and may-write-memory, since the user may have
273 reason to enable these during a session, for instance to turn on a
274 debugging-related global. */
275
276void
277update_observer_mode (void)
278{
491144b5
CB
279 bool newval = (!may_insert_breakpoints
280 && !may_insert_tracepoints
281 && may_insert_fast_tracepoints
282 && !may_stop
283 && non_stop);
d914c394
SS
284
285 /* Let the user know if things change. */
286 if (newval != observer_mode)
287 printf_filtered (_("Observer mode is now %s.\n"),
288 (newval ? "on" : "off"));
289
290 observer_mode = observer_mode_1 = newval;
291}
c2c6d25f 292
c906108c
SS
293/* Tables of how to react to signals; the user sets them. */
294
adc6a863
PA
295static unsigned char signal_stop[GDB_SIGNAL_LAST];
296static unsigned char signal_print[GDB_SIGNAL_LAST];
297static unsigned char signal_program[GDB_SIGNAL_LAST];
c906108c 298
ab04a2af
TT
299/* Table of signals that are registered with "catch signal". A
300 non-zero entry indicates that the signal is caught by some "catch
adc6a863
PA
301 signal" command. */
302static unsigned char signal_catch[GDB_SIGNAL_LAST];
ab04a2af 303
2455069d
UW
304/* Table of signals that the target may silently handle.
305 This is automatically determined from the flags above,
306 and simply cached here. */
adc6a863 307static unsigned char signal_pass[GDB_SIGNAL_LAST];
2455069d 308
c906108c
SS
309#define SET_SIGS(nsigs,sigs,flags) \
310 do { \
311 int signum = (nsigs); \
312 while (signum-- > 0) \
313 if ((sigs)[signum]) \
314 (flags)[signum] = 1; \
315 } while (0)
316
317#define UNSET_SIGS(nsigs,sigs,flags) \
318 do { \
319 int signum = (nsigs); \
320 while (signum-- > 0) \
321 if ((sigs)[signum]) \
322 (flags)[signum] = 0; \
323 } while (0)
324
9b224c5e
PA
325/* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
326 this function is to avoid exporting `signal_program'. */
327
328void
329update_signals_program_target (void)
330{
adc6a863 331 target_program_signals (signal_program);
9b224c5e
PA
332}
333
1777feb0 334/* Value to pass to target_resume() to cause all threads to resume. */
39f77062 335
edb3359d 336#define RESUME_ALL minus_one_ptid
c906108c
SS
337
338/* Command list pointer for the "stop" placeholder. */
339
340static struct cmd_list_element *stop_command;
341
c906108c
SS
342/* Nonzero if we want to give control to the user when we're notified
343 of shared library events by the dynamic linker. */
628fe4e4 344int stop_on_solib_events;
f9e14852
GB
345
346/* Enable or disable optional shared library event breakpoints
347 as appropriate when the above flag is changed. */
348
349static void
eb4c3f4a
TT
350set_stop_on_solib_events (const char *args,
351 int from_tty, struct cmd_list_element *c)
f9e14852
GB
352{
353 update_solib_breakpoints ();
354}
355
920d2a44
AC
356static void
357show_stop_on_solib_events (struct ui_file *file, int from_tty,
358 struct cmd_list_element *c, const char *value)
359{
360 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
361 value);
362}
c906108c 363
c4464ade 364/* True after stop if current stack frame should be printed. */
c906108c 365
c4464ade 366static bool stop_print_frame;
c906108c 367
5b6d1e4f
PA
368/* This is a cached copy of the target/ptid/waitstatus of the last
369 event returned by target_wait()/deprecated_target_wait_hook().
370 This information is returned by get_last_target_status(). */
371static process_stratum_target *target_last_proc_target;
39f77062 372static ptid_t target_last_wait_ptid;
e02bc4cc
DS
373static struct target_waitstatus target_last_waitstatus;
374
4e1c45ea 375void init_thread_stepping_state (struct thread_info *tss);
0d1e5fa7 376
53904c9e
AC
377static const char follow_fork_mode_child[] = "child";
378static const char follow_fork_mode_parent[] = "parent";
379
40478521 380static const char *const follow_fork_mode_kind_names[] = {
53904c9e
AC
381 follow_fork_mode_child,
382 follow_fork_mode_parent,
383 NULL
ef346e04 384};
c906108c 385
53904c9e 386static const char *follow_fork_mode_string = follow_fork_mode_parent;
920d2a44
AC
387static void
388show_follow_fork_mode_string (struct ui_file *file, int from_tty,
389 struct cmd_list_element *c, const char *value)
390{
3e43a32a
MS
391 fprintf_filtered (file,
392 _("Debugger response to a program "
393 "call of fork or vfork is \"%s\".\n"),
920d2a44
AC
394 value);
395}
c906108c
SS
396\f
397
d83ad864
DB
398/* Handle changes to the inferior list based on the type of fork,
399 which process is being followed, and whether the other process
400 should be detached. On entry inferior_ptid must be the ptid of
401 the fork parent. At return inferior_ptid is the ptid of the
402 followed inferior. */
403
5ab2fbf1
SM
404static bool
405follow_fork_inferior (bool follow_child, bool detach_fork)
d83ad864
DB
406{
407 int has_vforked;
79639e11 408 ptid_t parent_ptid, child_ptid;
d83ad864
DB
409
410 has_vforked = (inferior_thread ()->pending_follow.kind
411 == TARGET_WAITKIND_VFORKED);
79639e11
PA
412 parent_ptid = inferior_ptid;
413 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
d83ad864
DB
414
415 if (has_vforked
416 && !non_stop /* Non-stop always resumes both branches. */
3b12939d 417 && current_ui->prompt_state == PROMPT_BLOCKED
d83ad864
DB
418 && !(follow_child || detach_fork || sched_multi))
419 {
420 /* The parent stays blocked inside the vfork syscall until the
421 child execs or exits. If we don't let the child run, then
422 the parent stays blocked. If we're telling the parent to run
423 in the foreground, the user will not be able to ctrl-c to get
424 back the terminal, effectively hanging the debug session. */
425 fprintf_filtered (gdb_stderr, _("\
426Can not resume the parent process over vfork in the foreground while\n\
427holding the child stopped. Try \"set detach-on-fork\" or \
428\"set schedule-multiple\".\n"));
e97007b6 429 return true;
d83ad864
DB
430 }
431
ff770835
SM
432 thread_info *child_thr = nullptr;
433
d83ad864
DB
434 if (!follow_child)
435 {
436 /* Detach new forked process? */
437 if (detach_fork)
438 {
d83ad864
DB
439 /* Before detaching from the child, remove all breakpoints
440 from it. If we forked, then this has already been taken
441 care of by infrun.c. If we vforked however, any
442 breakpoint inserted in the parent is visible in the
443 child, even those added while stopped in a vfork
444 catchpoint. This will remove the breakpoints from the
445 parent also, but they'll be reinserted below. */
446 if (has_vforked)
447 {
448 /* Keep breakpoints list in sync. */
00431a78 449 remove_breakpoints_inf (current_inferior ());
d83ad864
DB
450 }
451
f67c0c91 452 if (print_inferior_events)
d83ad864 453 {
8dd06f7a 454 /* Ensure that we have a process ptid. */
e99b03dc 455 ptid_t process_ptid = ptid_t (child_ptid.pid ());
8dd06f7a 456
223ffa71 457 target_terminal::ours_for_output ();
d83ad864 458 fprintf_filtered (gdb_stdlog,
f67c0c91 459 _("[Detaching after %s from child %s]\n"),
6f259a23 460 has_vforked ? "vfork" : "fork",
a068643d 461 target_pid_to_str (process_ptid).c_str ());
d83ad864
DB
462 }
463 }
464 else
465 {
466 struct inferior *parent_inf, *child_inf;
d83ad864
DB
467
468 /* Add process to GDB's tables. */
e99b03dc 469 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
470
471 parent_inf = current_inferior ();
472 child_inf->attach_flag = parent_inf->attach_flag;
473 copy_terminal_info (child_inf, parent_inf);
474 child_inf->gdbarch = parent_inf->gdbarch;
475 copy_inferior_target_desc_info (child_inf, parent_inf);
476
5ed8105e 477 scoped_restore_current_pspace_and_thread restore_pspace_thread;
d83ad864 478
2a00d7ce 479 set_current_inferior (child_inf);
5b6d1e4f 480 switch_to_no_thread ();
d83ad864 481 child_inf->symfile_flags = SYMFILE_NO_READ;
02980c56 482 child_inf->push_target (parent_inf->process_target ());
ff770835
SM
483 child_thr = add_thread_silent (child_inf->process_target (),
484 child_ptid);
d83ad864
DB
485
486 /* If this is a vfork child, then the address-space is
487 shared with the parent. */
488 if (has_vforked)
489 {
490 child_inf->pspace = parent_inf->pspace;
491 child_inf->aspace = parent_inf->aspace;
492
5b6d1e4f
PA
493 exec_on_vfork ();
494
d83ad864
DB
495 /* The parent will be frozen until the child is done
496 with the shared region. Keep track of the
497 parent. */
498 child_inf->vfork_parent = parent_inf;
499 child_inf->pending_detach = 0;
500 parent_inf->vfork_child = child_inf;
501 parent_inf->pending_detach = 0;
18493a00
PA
502
503 /* Now that the inferiors and program spaces are all
504 wired up, we can switch to the child thread (which
505 switches inferior and program space too). */
506 switch_to_thread (child_thr);
d83ad864
DB
507 }
508 else
509 {
510 child_inf->aspace = new_address_space ();
564b1e3f 511 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
512 child_inf->removable = 1;
513 set_current_program_space (child_inf->pspace);
514 clone_program_space (child_inf->pspace, parent_inf->pspace);
515
18493a00
PA
516 /* solib_create_inferior_hook relies on the current
517 thread. */
518 switch_to_thread (child_thr);
d83ad864 519 }
d83ad864
DB
520 }
521
522 if (has_vforked)
523 {
524 struct inferior *parent_inf;
525
526 parent_inf = current_inferior ();
527
528 /* If we detached from the child, then we have to be careful
529 to not insert breakpoints in the parent until the child
530 is done with the shared memory region. However, if we're
531 staying attached to the child, then we can and should
532 insert breakpoints, so that we can debug it. A
533 subsequent child exec or exit is enough to know when does
534 the child stops using the parent's address space. */
535 parent_inf->waiting_for_vfork_done = detach_fork;
536 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
537 }
538 }
539 else
540 {
541 /* Follow the child. */
542 struct inferior *parent_inf, *child_inf;
543 struct program_space *parent_pspace;
544
f67c0c91 545 if (print_inferior_events)
d83ad864 546 {
f67c0c91
SDJ
547 std::string parent_pid = target_pid_to_str (parent_ptid);
548 std::string child_pid = target_pid_to_str (child_ptid);
549
223ffa71 550 target_terminal::ours_for_output ();
6f259a23 551 fprintf_filtered (gdb_stdlog,
f67c0c91
SDJ
552 _("[Attaching after %s %s to child %s]\n"),
553 parent_pid.c_str (),
6f259a23 554 has_vforked ? "vfork" : "fork",
f67c0c91 555 child_pid.c_str ());
d83ad864
DB
556 }
557
558 /* Add the new inferior first, so that the target_detach below
559 doesn't unpush the target. */
560
e99b03dc 561 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
562
563 parent_inf = current_inferior ();
564 child_inf->attach_flag = parent_inf->attach_flag;
565 copy_terminal_info (child_inf, parent_inf);
566 child_inf->gdbarch = parent_inf->gdbarch;
567 copy_inferior_target_desc_info (child_inf, parent_inf);
568
569 parent_pspace = parent_inf->pspace;
570
5b6d1e4f 571 process_stratum_target *target = parent_inf->process_target ();
d83ad864 572
5b6d1e4f
PA
573 {
574 /* Hold a strong reference to the target while (maybe)
575 detaching the parent. Otherwise detaching could close the
576 target. */
577 auto target_ref = target_ops_ref::new_reference (target);
578
579 /* If we're vforking, we want to hold on to the parent until
580 the child exits or execs. At child exec or exit time we
581 can remove the old breakpoints from the parent and detach
582 or resume debugging it. Otherwise, detach the parent now;
583 we'll want to reuse it's program/address spaces, but we
584 can't set them to the child before removing breakpoints
585 from the parent, otherwise, the breakpoints module could
586 decide to remove breakpoints from the wrong process (since
587 they'd be assigned to the same address space). */
588
589 if (has_vforked)
590 {
591 gdb_assert (child_inf->vfork_parent == NULL);
592 gdb_assert (parent_inf->vfork_child == NULL);
593 child_inf->vfork_parent = parent_inf;
594 child_inf->pending_detach = 0;
595 parent_inf->vfork_child = child_inf;
596 parent_inf->pending_detach = detach_fork;
597 parent_inf->waiting_for_vfork_done = 0;
598 }
599 else if (detach_fork)
600 {
601 if (print_inferior_events)
602 {
603 /* Ensure that we have a process ptid. */
604 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
605
606 target_terminal::ours_for_output ();
607 fprintf_filtered (gdb_stdlog,
608 _("[Detaching after fork from "
609 "parent %s]\n"),
610 target_pid_to_str (process_ptid).c_str ());
611 }
8dd06f7a 612
5b6d1e4f
PA
613 target_detach (parent_inf, 0);
614 parent_inf = NULL;
615 }
6f259a23 616
5b6d1e4f 617 /* Note that the detach above makes PARENT_INF dangling. */
d83ad864 618
5b6d1e4f
PA
619 /* Add the child thread to the appropriate lists, and switch
620 to this new thread, before cloning the program space, and
621 informing the solib layer about this new process. */
d83ad864 622
5b6d1e4f 623 set_current_inferior (child_inf);
02980c56 624 child_inf->push_target (target);
5b6d1e4f 625 }
d83ad864 626
ff770835 627 child_thr = add_thread_silent (target, child_ptid);
d83ad864
DB
628
629 /* If this is a vfork child, then the address-space is shared
630 with the parent. If we detached from the parent, then we can
631 reuse the parent's program/address spaces. */
632 if (has_vforked || detach_fork)
633 {
634 child_inf->pspace = parent_pspace;
635 child_inf->aspace = child_inf->pspace->aspace;
5b6d1e4f
PA
636
637 exec_on_vfork ();
d83ad864
DB
638 }
639 else
640 {
641 child_inf->aspace = new_address_space ();
564b1e3f 642 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
643 child_inf->removable = 1;
644 child_inf->symfile_flags = SYMFILE_NO_READ;
645 set_current_program_space (child_inf->pspace);
646 clone_program_space (child_inf->pspace, parent_pspace);
d83ad864 647 }
18493a00
PA
648
649 switch_to_thread (child_thr);
d83ad864
DB
650 }
651
e97007b6
SM
652 target_follow_fork (follow_child, detach_fork);
653
ff770835
SM
654 /* If we ended up creating a new inferior, call post_create_inferior to inform
655 the various subcomponents. */
656 if (child_thr != nullptr)
657 {
658 scoped_restore_current_thread restore;
659 switch_to_thread (child_thr);
660
661 post_create_inferior (0);
662 }
663
e97007b6 664 return false;
d83ad864
DB
665}
666
e58b0e63
PA
667/* Tell the target to follow the fork we're stopped at. Returns true
668 if the inferior should be resumed; false, if the target for some
669 reason decided it's best not to resume. */
670
5ab2fbf1
SM
671static bool
672follow_fork ()
c906108c 673{
5ab2fbf1
SM
674 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
675 bool should_resume = true;
e58b0e63
PA
676 struct thread_info *tp;
677
678 /* Copy user stepping state to the new inferior thread. FIXME: the
679 followed fork child thread should have a copy of most of the
4e3990f4
DE
680 parent thread structure's run control related fields, not just these.
681 Initialized to avoid "may be used uninitialized" warnings from gcc. */
682 struct breakpoint *step_resume_breakpoint = NULL;
186c406b 683 struct breakpoint *exception_resume_breakpoint = NULL;
4e3990f4
DE
684 CORE_ADDR step_range_start = 0;
685 CORE_ADDR step_range_end = 0;
bf4cb9be
TV
686 int current_line = 0;
687 symtab *current_symtab = NULL;
4e3990f4 688 struct frame_id step_frame_id = { 0 };
8980e177 689 struct thread_fsm *thread_fsm = NULL;
e58b0e63
PA
690
691 if (!non_stop)
692 {
5b6d1e4f 693 process_stratum_target *wait_target;
e58b0e63
PA
694 ptid_t wait_ptid;
695 struct target_waitstatus wait_status;
696
697 /* Get the last target status returned by target_wait(). */
5b6d1e4f 698 get_last_target_status (&wait_target, &wait_ptid, &wait_status);
e58b0e63
PA
699
700 /* If not stopped at a fork event, then there's nothing else to
701 do. */
702 if (wait_status.kind != TARGET_WAITKIND_FORKED
703 && wait_status.kind != TARGET_WAITKIND_VFORKED)
704 return 1;
705
706 /* Check if we switched over from WAIT_PTID, since the event was
707 reported. */
00431a78 708 if (wait_ptid != minus_one_ptid
5b6d1e4f
PA
709 && (current_inferior ()->process_target () != wait_target
710 || inferior_ptid != wait_ptid))
e58b0e63
PA
711 {
712 /* We did. Switch back to WAIT_PTID thread, to tell the
713 target to follow it (in either direction). We'll
714 afterwards refuse to resume, and inform the user what
715 happened. */
5b6d1e4f 716 thread_info *wait_thread = find_thread_ptid (wait_target, wait_ptid);
00431a78 717 switch_to_thread (wait_thread);
5ab2fbf1 718 should_resume = false;
e58b0e63
PA
719 }
720 }
721
722 tp = inferior_thread ();
723
724 /* If there were any forks/vforks that were caught and are now to be
725 followed, then do so now. */
726 switch (tp->pending_follow.kind)
727 {
728 case TARGET_WAITKIND_FORKED:
729 case TARGET_WAITKIND_VFORKED:
730 {
731 ptid_t parent, child;
732
733 /* If the user did a next/step, etc, over a fork call,
734 preserve the stepping state in the fork child. */
735 if (follow_child && should_resume)
736 {
8358c15c
JK
737 step_resume_breakpoint = clone_momentary_breakpoint
738 (tp->control.step_resume_breakpoint);
16c381f0
JK
739 step_range_start = tp->control.step_range_start;
740 step_range_end = tp->control.step_range_end;
bf4cb9be
TV
741 current_line = tp->current_line;
742 current_symtab = tp->current_symtab;
16c381f0 743 step_frame_id = tp->control.step_frame_id;
186c406b
TT
744 exception_resume_breakpoint
745 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
8980e177 746 thread_fsm = tp->thread_fsm;
e58b0e63
PA
747
748 /* For now, delete the parent's sr breakpoint, otherwise,
749 parent/child sr breakpoints are considered duplicates,
750 and the child version will not be installed. Remove
751 this when the breakpoints module becomes aware of
752 inferiors and address spaces. */
753 delete_step_resume_breakpoint (tp);
16c381f0
JK
754 tp->control.step_range_start = 0;
755 tp->control.step_range_end = 0;
756 tp->control.step_frame_id = null_frame_id;
186c406b 757 delete_exception_resume_breakpoint (tp);
8980e177 758 tp->thread_fsm = NULL;
e58b0e63
PA
759 }
760
761 parent = inferior_ptid;
762 child = tp->pending_follow.value.related_pid;
763
5b6d1e4f 764 process_stratum_target *parent_targ = tp->inf->process_target ();
d83ad864
DB
765 /* Set up inferior(s) as specified by the caller, and tell the
766 target to do whatever is necessary to follow either parent
767 or child. */
768 if (follow_fork_inferior (follow_child, detach_fork))
e58b0e63
PA
769 {
770 /* Target refused to follow, or there's some other reason
771 we shouldn't resume. */
772 should_resume = 0;
773 }
774 else
775 {
776 /* This pending follow fork event is now handled, one way
777 or another. The previous selected thread may be gone
778 from the lists by now, but if it is still around, need
779 to clear the pending follow request. */
5b6d1e4f 780 tp = find_thread_ptid (parent_targ, parent);
e58b0e63
PA
781 if (tp)
782 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
783
784 /* This makes sure we don't try to apply the "Switched
785 over from WAIT_PID" logic above. */
786 nullify_last_target_wait_ptid ();
787
1777feb0 788 /* If we followed the child, switch to it... */
e58b0e63
PA
789 if (follow_child)
790 {
5b6d1e4f 791 thread_info *child_thr = find_thread_ptid (parent_targ, child);
00431a78 792 switch_to_thread (child_thr);
e58b0e63
PA
793
794 /* ... and preserve the stepping state, in case the
795 user was stepping over the fork call. */
796 if (should_resume)
797 {
798 tp = inferior_thread ();
8358c15c
JK
799 tp->control.step_resume_breakpoint
800 = step_resume_breakpoint;
16c381f0
JK
801 tp->control.step_range_start = step_range_start;
802 tp->control.step_range_end = step_range_end;
bf4cb9be
TV
803 tp->current_line = current_line;
804 tp->current_symtab = current_symtab;
16c381f0 805 tp->control.step_frame_id = step_frame_id;
186c406b
TT
806 tp->control.exception_resume_breakpoint
807 = exception_resume_breakpoint;
8980e177 808 tp->thread_fsm = thread_fsm;
e58b0e63
PA
809 }
810 else
811 {
812 /* If we get here, it was because we're trying to
813 resume from a fork catchpoint, but, the user
814 has switched threads away from the thread that
815 forked. In that case, the resume command
816 issued is most likely not applicable to the
817 child, so just warn, and refuse to resume. */
3e43a32a 818 warning (_("Not resuming: switched threads "
fd7dcb94 819 "before following fork child."));
e58b0e63
PA
820 }
821
822 /* Reset breakpoints in the child as appropriate. */
823 follow_inferior_reset_breakpoints ();
824 }
e58b0e63
PA
825 }
826 }
827 break;
828 case TARGET_WAITKIND_SPURIOUS:
829 /* Nothing to follow. */
830 break;
831 default:
832 internal_error (__FILE__, __LINE__,
833 "Unexpected pending_follow.kind %d\n",
834 tp->pending_follow.kind);
835 break;
836 }
c906108c 837
e58b0e63 838 return should_resume;
c906108c
SS
839}
840
d83ad864 841static void
6604731b 842follow_inferior_reset_breakpoints (void)
c906108c 843{
4e1c45ea
PA
844 struct thread_info *tp = inferior_thread ();
845
6604731b
DJ
846 /* Was there a step_resume breakpoint? (There was if the user
847 did a "next" at the fork() call.) If so, explicitly reset its
a1aa2221
LM
848 thread number. Cloned step_resume breakpoints are disabled on
849 creation, so enable it here now that it is associated with the
850 correct thread.
6604731b
DJ
851
852 step_resumes are a form of bp that are made to be per-thread.
853 Since we created the step_resume bp when the parent process
854 was being debugged, and now are switching to the child process,
855 from the breakpoint package's viewpoint, that's a switch of
856 "threads". We must update the bp's notion of which thread
857 it is for, or it'll be ignored when it triggers. */
858
8358c15c 859 if (tp->control.step_resume_breakpoint)
a1aa2221
LM
860 {
861 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
862 tp->control.step_resume_breakpoint->loc->enabled = 1;
863 }
6604731b 864
a1aa2221 865 /* Treat exception_resume breakpoints like step_resume breakpoints. */
186c406b 866 if (tp->control.exception_resume_breakpoint)
a1aa2221
LM
867 {
868 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
869 tp->control.exception_resume_breakpoint->loc->enabled = 1;
870 }
186c406b 871
6604731b
DJ
872 /* Reinsert all breakpoints in the child. The user may have set
873 breakpoints after catching the fork, in which case those
874 were never set in the child, but only in the parent. This makes
875 sure the inserted breakpoints match the breakpoint list. */
876
877 breakpoint_re_set ();
878 insert_breakpoints ();
c906108c 879}
c906108c 880
6c95b8df
PA
881/* The child has exited or execed: resume threads of the parent the
882 user wanted to be executing. */
883
884static int
885proceed_after_vfork_done (struct thread_info *thread,
886 void *arg)
887{
888 int pid = * (int *) arg;
889
00431a78
PA
890 if (thread->ptid.pid () == pid
891 && thread->state == THREAD_RUNNING
892 && !thread->executing
6c95b8df 893 && !thread->stop_requested
1edb66d8 894 && thread->stop_signal () == GDB_SIGNAL_0)
6c95b8df 895 {
1eb8556f
SM
896 infrun_debug_printf ("resuming vfork parent thread %s",
897 target_pid_to_str (thread->ptid).c_str ());
6c95b8df 898
00431a78 899 switch_to_thread (thread);
70509625 900 clear_proceed_status (0);
64ce06e4 901 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
6c95b8df
PA
902 }
903
904 return 0;
905}
906
907/* Called whenever we notice an exec or exit event, to handle
908 detaching or resuming a vfork parent. */
909
910static void
911handle_vfork_child_exec_or_exit (int exec)
912{
913 struct inferior *inf = current_inferior ();
914
915 if (inf->vfork_parent)
916 {
917 int resume_parent = -1;
918
919 /* This exec or exit marks the end of the shared memory region
b73715df
TV
920 between the parent and the child. Break the bonds. */
921 inferior *vfork_parent = inf->vfork_parent;
922 inf->vfork_parent->vfork_child = NULL;
923 inf->vfork_parent = NULL;
6c95b8df 924
b73715df
TV
925 /* If the user wanted to detach from the parent, now is the
926 time. */
927 if (vfork_parent->pending_detach)
6c95b8df 928 {
6c95b8df
PA
929 struct program_space *pspace;
930 struct address_space *aspace;
931
1777feb0 932 /* follow-fork child, detach-on-fork on. */
6c95b8df 933
b73715df 934 vfork_parent->pending_detach = 0;
68c9da30 935
18493a00 936 scoped_restore_current_pspace_and_thread restore_thread;
6c95b8df
PA
937
938 /* We're letting loose of the parent. */
18493a00 939 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
00431a78 940 switch_to_thread (tp);
6c95b8df
PA
941
942 /* We're about to detach from the parent, which implicitly
943 removes breakpoints from its address space. There's a
944 catch here: we want to reuse the spaces for the child,
945 but, parent/child are still sharing the pspace at this
946 point, although the exec in reality makes the kernel give
947 the child a fresh set of new pages. The problem here is
948 that the breakpoints module being unaware of this, would
949 likely chose the child process to write to the parent
950 address space. Swapping the child temporarily away from
951 the spaces has the desired effect. Yes, this is "sort
952 of" a hack. */
953
954 pspace = inf->pspace;
955 aspace = inf->aspace;
956 inf->aspace = NULL;
957 inf->pspace = NULL;
958
f67c0c91 959 if (print_inferior_events)
6c95b8df 960 {
a068643d 961 std::string pidstr
b73715df 962 = target_pid_to_str (ptid_t (vfork_parent->pid));
f67c0c91 963
223ffa71 964 target_terminal::ours_for_output ();
6c95b8df
PA
965
966 if (exec)
6f259a23
DB
967 {
968 fprintf_filtered (gdb_stdlog,
f67c0c91 969 _("[Detaching vfork parent %s "
a068643d 970 "after child exec]\n"), pidstr.c_str ());
6f259a23 971 }
6c95b8df 972 else
6f259a23
DB
973 {
974 fprintf_filtered (gdb_stdlog,
f67c0c91 975 _("[Detaching vfork parent %s "
a068643d 976 "after child exit]\n"), pidstr.c_str ());
6f259a23 977 }
6c95b8df
PA
978 }
979
b73715df 980 target_detach (vfork_parent, 0);
6c95b8df
PA
981
982 /* Put it back. */
983 inf->pspace = pspace;
984 inf->aspace = aspace;
6c95b8df
PA
985 }
986 else if (exec)
987 {
988 /* We're staying attached to the parent, so, really give the
989 child a new address space. */
564b1e3f 990 inf->pspace = new program_space (maybe_new_address_space ());
6c95b8df
PA
991 inf->aspace = inf->pspace->aspace;
992 inf->removable = 1;
993 set_current_program_space (inf->pspace);
994
b73715df 995 resume_parent = vfork_parent->pid;
6c95b8df
PA
996 }
997 else
998 {
6c95b8df
PA
999 /* If this is a vfork child exiting, then the pspace and
1000 aspaces were shared with the parent. Since we're
1001 reporting the process exit, we'll be mourning all that is
1002 found in the address space, and switching to null_ptid,
1003 preparing to start a new inferior. But, since we don't
1004 want to clobber the parent's address/program spaces, we
1005 go ahead and create a new one for this exiting
1006 inferior. */
1007
18493a00 1008 /* Switch to no-thread while running clone_program_space, so
5ed8105e
PA
1009 that clone_program_space doesn't want to read the
1010 selected frame of a dead process. */
18493a00
PA
1011 scoped_restore_current_thread restore_thread;
1012 switch_to_no_thread ();
6c95b8df 1013
53af73bf
PA
1014 inf->pspace = new program_space (maybe_new_address_space ());
1015 inf->aspace = inf->pspace->aspace;
1016 set_current_program_space (inf->pspace);
6c95b8df 1017 inf->removable = 1;
7dcd53a0 1018 inf->symfile_flags = SYMFILE_NO_READ;
53af73bf 1019 clone_program_space (inf->pspace, vfork_parent->pspace);
6c95b8df 1020
b73715df 1021 resume_parent = vfork_parent->pid;
6c95b8df
PA
1022 }
1023
6c95b8df
PA
1024 gdb_assert (current_program_space == inf->pspace);
1025
1026 if (non_stop && resume_parent != -1)
1027 {
1028 /* If the user wanted the parent to be running, let it go
1029 free now. */
5ed8105e 1030 scoped_restore_current_thread restore_thread;
6c95b8df 1031
1eb8556f
SM
1032 infrun_debug_printf ("resuming vfork parent process %d",
1033 resume_parent);
6c95b8df
PA
1034
1035 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
6c95b8df
PA
1036 }
1037 }
1038}
1039
eb6c553b 1040/* Enum strings for "set|show follow-exec-mode". */
6c95b8df
PA
1041
1042static const char follow_exec_mode_new[] = "new";
1043static const char follow_exec_mode_same[] = "same";
40478521 1044static const char *const follow_exec_mode_names[] =
6c95b8df
PA
1045{
1046 follow_exec_mode_new,
1047 follow_exec_mode_same,
1048 NULL,
1049};
1050
1051static const char *follow_exec_mode_string = follow_exec_mode_same;
1052static void
1053show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1054 struct cmd_list_element *c, const char *value)
1055{
1056 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1057}
1058
ecf45d2c 1059/* EXEC_FILE_TARGET is assumed to be non-NULL. */
1adeb98a 1060
c906108c 1061static void
4ca51187 1062follow_exec (ptid_t ptid, const char *exec_file_target)
c906108c 1063{
e99b03dc 1064 int pid = ptid.pid ();
94585166 1065 ptid_t process_ptid;
7a292a7a 1066
65d2b333
PW
1067 /* Switch terminal for any messages produced e.g. by
1068 breakpoint_re_set. */
1069 target_terminal::ours_for_output ();
1070
c906108c
SS
1071 /* This is an exec event that we actually wish to pay attention to.
1072 Refresh our symbol table to the newly exec'd program, remove any
1073 momentary bp's, etc.
1074
1075 If there are breakpoints, they aren't really inserted now,
1076 since the exec() transformed our inferior into a fresh set
1077 of instructions.
1078
1079 We want to preserve symbolic breakpoints on the list, since
1080 we have hopes that they can be reset after the new a.out's
1081 symbol table is read.
1082
1083 However, any "raw" breakpoints must be removed from the list
1084 (e.g., the solib bp's), since their address is probably invalid
1085 now.
1086
1087 And, we DON'T want to call delete_breakpoints() here, since
1088 that may write the bp's "shadow contents" (the instruction
85102364 1089 value that was overwritten with a TRAP instruction). Since
1777feb0 1090 we now have a new a.out, those shadow contents aren't valid. */
6c95b8df
PA
1091
1092 mark_breakpoints_out ();
1093
95e50b27
PA
1094 /* The target reports the exec event to the main thread, even if
1095 some other thread does the exec, and even if the main thread was
1096 stopped or already gone. We may still have non-leader threads of
1097 the process on our list. E.g., on targets that don't have thread
1098 exit events (like remote); or on native Linux in non-stop mode if
1099 there were only two threads in the inferior and the non-leader
1100 one is the one that execs (and nothing forces an update of the
1101 thread list up to here). When debugging remotely, it's best to
1102 avoid extra traffic, when possible, so avoid syncing the thread
1103 list with the target, and instead go ahead and delete all threads
1104 of the process but one that reported the event. Note this must
1105 be done before calling update_breakpoints_after_exec, as
1106 otherwise clearing the threads' resources would reference stale
1107 thread breakpoints -- it may have been one of these threads that
1108 stepped across the exec. We could just clear their stepping
1109 states, but as long as we're iterating, might as well delete
1110 them. Deleting them now rather than at the next user-visible
1111 stop provides a nicer sequence of events for user and MI
1112 notifications. */
08036331 1113 for (thread_info *th : all_threads_safe ())
d7e15655 1114 if (th->ptid.pid () == pid && th->ptid != ptid)
00431a78 1115 delete_thread (th);
95e50b27
PA
1116
1117 /* We also need to clear any left over stale state for the
1118 leader/event thread. E.g., if there was any step-resume
1119 breakpoint or similar, it's gone now. We cannot truly
1120 step-to-next statement through an exec(). */
08036331 1121 thread_info *th = inferior_thread ();
8358c15c 1122 th->control.step_resume_breakpoint = NULL;
186c406b 1123 th->control.exception_resume_breakpoint = NULL;
34b7e8a6 1124 th->control.single_step_breakpoints = NULL;
16c381f0
JK
1125 th->control.step_range_start = 0;
1126 th->control.step_range_end = 0;
c906108c 1127
95e50b27
PA
1128 /* The user may have had the main thread held stopped in the
1129 previous image (e.g., schedlock on, or non-stop). Release
1130 it now. */
a75724bc
PA
1131 th->stop_requested = 0;
1132
95e50b27
PA
1133 update_breakpoints_after_exec ();
1134
1777feb0 1135 /* What is this a.out's name? */
f2907e49 1136 process_ptid = ptid_t (pid);
6c95b8df 1137 printf_unfiltered (_("%s is executing new program: %s\n"),
a068643d 1138 target_pid_to_str (process_ptid).c_str (),
ecf45d2c 1139 exec_file_target);
c906108c
SS
1140
1141 /* We've followed the inferior through an exec. Therefore, the
1777feb0 1142 inferior has essentially been killed & reborn. */
7a292a7a 1143
6ca15a4b 1144 breakpoint_init_inferior (inf_execd);
e85a822c 1145
797bc1cb
TT
1146 gdb::unique_xmalloc_ptr<char> exec_file_host
1147 = exec_file_find (exec_file_target, NULL);
ff862be4 1148
ecf45d2c
SL
1149 /* If we were unable to map the executable target pathname onto a host
1150 pathname, tell the user that. Otherwise GDB's subsequent behavior
1151 is confusing. Maybe it would even be better to stop at this point
1152 so that the user can specify a file manually before continuing. */
1153 if (exec_file_host == NULL)
1154 warning (_("Could not load symbols for executable %s.\n"
1155 "Do you need \"set sysroot\"?"),
1156 exec_file_target);
c906108c 1157
cce9b6bf
PA
1158 /* Reset the shared library package. This ensures that we get a
1159 shlib event when the child reaches "_start", at which point the
1160 dld will have had a chance to initialize the child. */
1161 /* Also, loading a symbol file below may trigger symbol lookups, and
1162 we don't want those to be satisfied by the libraries of the
1163 previous incarnation of this process. */
1164 no_shared_libraries (NULL, 0);
1165
294c36eb
SM
1166 struct inferior *inf = current_inferior ();
1167
6c95b8df
PA
1168 if (follow_exec_mode_string == follow_exec_mode_new)
1169 {
6c95b8df
PA
1170 /* The user wants to keep the old inferior and program spaces
1171 around. Create a new fresh one, and switch to it. */
1172
35ed81d4
SM
1173 /* Do exit processing for the original inferior before setting the new
1174 inferior's pid. Having two inferiors with the same pid would confuse
1175 find_inferior_p(t)id. Transfer the terminal state and info from the
1176 old to the new inferior. */
294c36eb
SM
1177 inferior *new_inferior = add_inferior_with_spaces ();
1178
1179 swap_terminal_info (new_inferior, inf);
1180 exit_inferior_silent (inf);
1181
1182 new_inferior->pid = pid;
1183 target_follow_exec (new_inferior, ptid, exec_file_target);
1184
1185 /* We continue with the new inferior. */
1186 inf = new_inferior;
6c95b8df 1187 }
9107fc8d
PA
1188 else
1189 {
1190 /* The old description may no longer be fit for the new image.
1191 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1192 old description; we'll read a new one below. No need to do
1193 this on "follow-exec-mode new", as the old inferior stays
1194 around (its description is later cleared/refetched on
1195 restart). */
1196 target_clear_description ();
294c36eb 1197 target_follow_exec (inf, ptid, exec_file_target);
9107fc8d 1198 }
6c95b8df 1199
294c36eb 1200 gdb_assert (current_inferior () == inf);
6c95b8df
PA
1201 gdb_assert (current_program_space == inf->pspace);
1202
ecf45d2c
SL
1203 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1204 because the proper displacement for a PIE (Position Independent
1205 Executable) main symbol file will only be computed by
1206 solib_create_inferior_hook below. breakpoint_re_set would fail
1207 to insert the breakpoints with the zero displacement. */
797bc1cb 1208 try_open_exec_file (exec_file_host.get (), inf, SYMFILE_DEFER_BP_RESET);
c906108c 1209
9107fc8d
PA
1210 /* If the target can specify a description, read it. Must do this
1211 after flipping to the new executable (because the target supplied
1212 description must be compatible with the executable's
1213 architecture, and the old executable may e.g., be 32-bit, while
1214 the new one 64-bit), and before anything involving memory or
1215 registers. */
1216 target_find_description ();
1217
42a4fec5 1218 gdb::observers::inferior_execd.notify (inf);
4efc6507 1219
c1e56572
JK
1220 breakpoint_re_set ();
1221
c906108c
SS
1222 /* Reinsert all breakpoints. (Those which were symbolic have
1223 been reset to the proper address in the new a.out, thanks
1777feb0 1224 to symbol_file_command...). */
c906108c
SS
1225 insert_breakpoints ();
1226
1227 /* The next resume of this inferior should bring it to the shlib
1228 startup breakpoints. (If the user had also set bp's on
1229 "main" from the old (parent) process, then they'll auto-
1777feb0 1230 matically get reset there in the new process.). */
c906108c
SS
1231}
1232
28d5518b 1233/* The chain of threads that need to do a step-over operation to get
c2829269
PA
1234 past e.g., a breakpoint. What technique is used to step over the
1235 breakpoint/watchpoint does not matter -- all threads end up in the
1236 same queue, to maintain rough temporal order of execution, in order
1237 to avoid starvation, otherwise, we could e.g., find ourselves
1238 constantly stepping the same couple threads past their breakpoints
1239 over and over, if the single-step finish fast enough. */
8b6a69b2 1240thread_step_over_list global_thread_step_over_list;
c2829269 1241
6c4cfb24
PA
1242/* Bit flags indicating what the thread needs to step over. */
1243
8d297bbf 1244enum step_over_what_flag
6c4cfb24
PA
1245 {
1246 /* Step over a breakpoint. */
1247 STEP_OVER_BREAKPOINT = 1,
1248
1249 /* Step past a non-continuable watchpoint, in order to let the
1250 instruction execute so we can evaluate the watchpoint
1251 expression. */
1252 STEP_OVER_WATCHPOINT = 2
1253 };
8d297bbf 1254DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
6c4cfb24 1255
963f9c80 1256/* Info about an instruction that is being stepped over. */
31e77af2
PA
1257
1258struct step_over_info
1259{
963f9c80
PA
1260 /* If we're stepping past a breakpoint, this is the address space
1261 and address of the instruction the breakpoint is set at. We'll
1262 skip inserting all breakpoints here. Valid iff ASPACE is
1263 non-NULL. */
ac7d717c
PA
1264 const address_space *aspace = nullptr;
1265 CORE_ADDR address = 0;
963f9c80
PA
1266
1267 /* The instruction being stepped over triggers a nonsteppable
1268 watchpoint. If true, we'll skip inserting watchpoints. */
ac7d717c 1269 int nonsteppable_watchpoint_p = 0;
21edc42f
YQ
1270
1271 /* The thread's global number. */
ac7d717c 1272 int thread = -1;
31e77af2
PA
1273};
1274
1275/* The step-over info of the location that is being stepped over.
1276
1277 Note that with async/breakpoint always-inserted mode, a user might
1278 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1279 being stepped over. As setting a new breakpoint inserts all
1280 breakpoints, we need to make sure the breakpoint being stepped over
1281 isn't inserted then. We do that by only clearing the step-over
1282 info when the step-over is actually finished (or aborted).
1283
1284 Presently GDB can only step over one breakpoint at any given time.
1285 Given threads that can't run code in the same address space as the
1286 breakpoint's can't really miss the breakpoint, GDB could be taught
1287 to step-over at most one breakpoint per address space (so this info
1288 could move to the address space object if/when GDB is extended).
1289 The set of breakpoints being stepped over will normally be much
1290 smaller than the set of all breakpoints, so a flag in the
1291 breakpoint location structure would be wasteful. A separate list
1292 also saves complexity and run-time, as otherwise we'd have to go
1293 through all breakpoint locations clearing their flag whenever we
1294 start a new sequence. Similar considerations weigh against storing
1295 this info in the thread object. Plus, not all step overs actually
1296 have breakpoint locations -- e.g., stepping past a single-step
1297 breakpoint, or stepping to complete a non-continuable
1298 watchpoint. */
1299static struct step_over_info step_over_info;
1300
1301/* Record the address of the breakpoint/instruction we're currently
ce0db137
DE
1302 stepping over.
1303 N.B. We record the aspace and address now, instead of say just the thread,
1304 because when we need the info later the thread may be running. */
31e77af2
PA
1305
1306static void
8b86c959 1307set_step_over_info (const address_space *aspace, CORE_ADDR address,
21edc42f
YQ
1308 int nonsteppable_watchpoint_p,
1309 int thread)
31e77af2
PA
1310{
1311 step_over_info.aspace = aspace;
1312 step_over_info.address = address;
963f9c80 1313 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
21edc42f 1314 step_over_info.thread = thread;
31e77af2
PA
1315}
1316
1317/* Called when we're not longer stepping over a breakpoint / an
1318 instruction, so all breakpoints are free to be (re)inserted. */
1319
1320static void
1321clear_step_over_info (void)
1322{
1eb8556f 1323 infrun_debug_printf ("clearing step over info");
31e77af2
PA
1324 step_over_info.aspace = NULL;
1325 step_over_info.address = 0;
963f9c80 1326 step_over_info.nonsteppable_watchpoint_p = 0;
21edc42f 1327 step_over_info.thread = -1;
31e77af2
PA
1328}
1329
7f89fd65 1330/* See infrun.h. */
31e77af2
PA
1331
1332int
1333stepping_past_instruction_at (struct address_space *aspace,
1334 CORE_ADDR address)
1335{
1336 return (step_over_info.aspace != NULL
1337 && breakpoint_address_match (aspace, address,
1338 step_over_info.aspace,
1339 step_over_info.address));
1340}
1341
963f9c80
PA
1342/* See infrun.h. */
1343
21edc42f
YQ
1344int
1345thread_is_stepping_over_breakpoint (int thread)
1346{
1347 return (step_over_info.thread != -1
1348 && thread == step_over_info.thread);
1349}
1350
1351/* See infrun.h. */
1352
963f9c80
PA
1353int
1354stepping_past_nonsteppable_watchpoint (void)
1355{
1356 return step_over_info.nonsteppable_watchpoint_p;
1357}
1358
6cc83d2a
PA
1359/* Returns true if step-over info is valid. */
1360
c4464ade 1361static bool
6cc83d2a
PA
1362step_over_info_valid_p (void)
1363{
963f9c80
PA
1364 return (step_over_info.aspace != NULL
1365 || stepping_past_nonsteppable_watchpoint ());
6cc83d2a
PA
1366}
1367
c906108c 1368\f
237fc4c9
PA
1369/* Displaced stepping. */
1370
1371/* In non-stop debugging mode, we must take special care to manage
1372 breakpoints properly; in particular, the traditional strategy for
1373 stepping a thread past a breakpoint it has hit is unsuitable.
1374 'Displaced stepping' is a tactic for stepping one thread past a
1375 breakpoint it has hit while ensuring that other threads running
1376 concurrently will hit the breakpoint as they should.
1377
1378 The traditional way to step a thread T off a breakpoint in a
1379 multi-threaded program in all-stop mode is as follows:
1380
1381 a0) Initially, all threads are stopped, and breakpoints are not
1382 inserted.
1383 a1) We single-step T, leaving breakpoints uninserted.
1384 a2) We insert breakpoints, and resume all threads.
1385
1386 In non-stop debugging, however, this strategy is unsuitable: we
1387 don't want to have to stop all threads in the system in order to
1388 continue or step T past a breakpoint. Instead, we use displaced
1389 stepping:
1390
1391 n0) Initially, T is stopped, other threads are running, and
1392 breakpoints are inserted.
1393 n1) We copy the instruction "under" the breakpoint to a separate
1394 location, outside the main code stream, making any adjustments
1395 to the instruction, register, and memory state as directed by
1396 T's architecture.
1397 n2) We single-step T over the instruction at its new location.
1398 n3) We adjust the resulting register and memory state as directed
1399 by T's architecture. This includes resetting T's PC to point
1400 back into the main instruction stream.
1401 n4) We resume T.
1402
1403 This approach depends on the following gdbarch methods:
1404
1405 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1406 indicate where to copy the instruction, and how much space must
1407 be reserved there. We use these in step n1.
1408
1409 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1410 address, and makes any necessary adjustments to the instruction,
1411 register contents, and memory. We use this in step n1.
1412
1413 - gdbarch_displaced_step_fixup adjusts registers and memory after
85102364 1414 we have successfully single-stepped the instruction, to yield the
237fc4c9
PA
1415 same effect the instruction would have had if we had executed it
1416 at its original address. We use this in step n3.
1417
237fc4c9
PA
1418 The gdbarch_displaced_step_copy_insn and
1419 gdbarch_displaced_step_fixup functions must be written so that
1420 copying an instruction with gdbarch_displaced_step_copy_insn,
1421 single-stepping across the copied instruction, and then applying
1422 gdbarch_displaced_insn_fixup should have the same effects on the
1423 thread's memory and registers as stepping the instruction in place
1424 would have. Exactly which responsibilities fall to the copy and
1425 which fall to the fixup is up to the author of those functions.
1426
1427 See the comments in gdbarch.sh for details.
1428
1429 Note that displaced stepping and software single-step cannot
1430 currently be used in combination, although with some care I think
1431 they could be made to. Software single-step works by placing
1432 breakpoints on all possible subsequent instructions; if the
1433 displaced instruction is a PC-relative jump, those breakpoints
1434 could fall in very strange places --- on pages that aren't
1435 executable, or at addresses that are not proper instruction
1436 boundaries. (We do generally let other threads run while we wait
1437 to hit the software single-step breakpoint, and they might
1438 encounter such a corrupted instruction.) One way to work around
1439 this would be to have gdbarch_displaced_step_copy_insn fully
1440 simulate the effect of PC-relative instructions (and return NULL)
1441 on architectures that use software single-stepping.
1442
1443 In non-stop mode, we can have independent and simultaneous step
1444 requests, so more than one thread may need to simultaneously step
1445 over a breakpoint. The current implementation assumes there is
1446 only one scratch space per process. In this case, we have to
1447 serialize access to the scratch space. If thread A wants to step
1448 over a breakpoint, but we are currently waiting for some other
1449 thread to complete a displaced step, we leave thread A stopped and
1450 place it in the displaced_step_request_queue. Whenever a displaced
1451 step finishes, we pick the next thread in the queue and start a new
1452 displaced step operation on it. See displaced_step_prepare and
7def77a1 1453 displaced_step_finish for details. */
237fc4c9 1454
a46d1843 1455/* Return true if THREAD is doing a displaced step. */
c0987663 1456
c4464ade 1457static bool
00431a78 1458displaced_step_in_progress_thread (thread_info *thread)
c0987663 1459{
00431a78 1460 gdb_assert (thread != NULL);
c0987663 1461
187b041e 1462 return thread->displaced_step_state.in_progress ();
c0987663
YQ
1463}
1464
a46d1843 1465/* Return true if INF has a thread doing a displaced step. */
8f572e5c 1466
c4464ade 1467static bool
00431a78 1468displaced_step_in_progress (inferior *inf)
8f572e5c 1469{
187b041e 1470 return inf->displaced_step_state.in_progress_count > 0;
fc1cf338
PA
1471}
1472
187b041e 1473/* Return true if any thread is doing a displaced step. */
a42244db 1474
187b041e
SM
1475static bool
1476displaced_step_in_progress_any_thread ()
a42244db 1477{
187b041e
SM
1478 for (inferior *inf : all_non_exited_inferiors ())
1479 {
1480 if (displaced_step_in_progress (inf))
1481 return true;
1482 }
a42244db 1483
187b041e 1484 return false;
a42244db
YQ
1485}
1486
fc1cf338
PA
1487static void
1488infrun_inferior_exit (struct inferior *inf)
1489{
d20172fc 1490 inf->displaced_step_state.reset ();
fc1cf338 1491}
237fc4c9 1492
3b7a962d
SM
1493static void
1494infrun_inferior_execd (inferior *inf)
1495{
187b041e
SM
1496 /* If some threads where was doing a displaced step in this inferior at the
1497 moment of the exec, they no longer exist. Even if the exec'ing thread
3b7a962d
SM
1498 doing a displaced step, we don't want to to any fixup nor restore displaced
1499 stepping buffer bytes. */
1500 inf->displaced_step_state.reset ();
1501
187b041e
SM
1502 for (thread_info *thread : inf->threads ())
1503 thread->displaced_step_state.reset ();
1504
3b7a962d
SM
1505 /* Since an in-line step is done with everything else stopped, if there was
1506 one in progress at the time of the exec, it must have been the exec'ing
1507 thread. */
1508 clear_step_over_info ();
1509}
1510
fff08868
HZ
1511/* If ON, and the architecture supports it, GDB will use displaced
1512 stepping to step over breakpoints. If OFF, or if the architecture
1513 doesn't support it, GDB will instead use the traditional
1514 hold-and-step approach. If AUTO (which is the default), GDB will
1515 decide which technique to use to step over breakpoints depending on
9822cb57 1516 whether the target works in a non-stop way (see use_displaced_stepping). */
fff08868 1517
72d0e2c5 1518static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
fff08868 1519
237fc4c9
PA
1520static void
1521show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1522 struct cmd_list_element *c,
1523 const char *value)
1524{
72d0e2c5 1525 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
3e43a32a
MS
1526 fprintf_filtered (file,
1527 _("Debugger's willingness to use displaced stepping "
1528 "to step over breakpoints is %s (currently %s).\n"),
fbea99ea 1529 value, target_is_non_stop_p () ? "on" : "off");
fff08868 1530 else
3e43a32a
MS
1531 fprintf_filtered (file,
1532 _("Debugger's willingness to use displaced stepping "
1533 "to step over breakpoints is %s.\n"), value);
237fc4c9
PA
1534}
1535
9822cb57
SM
1536/* Return true if the gdbarch implements the required methods to use
1537 displaced stepping. */
1538
1539static bool
1540gdbarch_supports_displaced_stepping (gdbarch *arch)
1541{
187b041e
SM
1542 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1543 that if `prepare` is provided, so is `finish`. */
1544 return gdbarch_displaced_step_prepare_p (arch);
9822cb57
SM
1545}
1546
fff08868 1547/* Return non-zero if displaced stepping can/should be used to step
3fc8eb30 1548 over breakpoints of thread TP. */
fff08868 1549
9822cb57
SM
1550static bool
1551use_displaced_stepping (thread_info *tp)
237fc4c9 1552{
9822cb57
SM
1553 /* If the user disabled it explicitly, don't use displaced stepping. */
1554 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1555 return false;
1556
1557 /* If "auto", only use displaced stepping if the target operates in a non-stop
1558 way. */
1559 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1560 && !target_is_non_stop_p ())
1561 return false;
1562
1563 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1564
1565 /* If the architecture doesn't implement displaced stepping, don't use
1566 it. */
1567 if (!gdbarch_supports_displaced_stepping (gdbarch))
1568 return false;
1569
1570 /* If recording, don't use displaced stepping. */
1571 if (find_record_target () != nullptr)
1572 return false;
1573
9822cb57
SM
1574 /* If displaced stepping failed before for this inferior, don't bother trying
1575 again. */
f5f01699 1576 if (tp->inf->displaced_step_state.failed_before)
9822cb57
SM
1577 return false;
1578
1579 return true;
237fc4c9
PA
1580}
1581
187b041e 1582/* Simple function wrapper around displaced_step_thread_state::reset. */
d8d83535 1583
237fc4c9 1584static void
187b041e 1585displaced_step_reset (displaced_step_thread_state *displaced)
237fc4c9 1586{
d8d83535 1587 displaced->reset ();
237fc4c9
PA
1588}
1589
d8d83535
SM
1590/* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1591 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1592
1593using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
237fc4c9 1594
136821d9
SM
1595/* See infrun.h. */
1596
1597std::string
1598displaced_step_dump_bytes (const gdb_byte *buf, size_t len)
237fc4c9 1599{
136821d9 1600 std::string ret;
237fc4c9 1601
136821d9
SM
1602 for (size_t i = 0; i < len; i++)
1603 {
1604 if (i == 0)
1605 ret += string_printf ("%02x", buf[i]);
1606 else
1607 ret += string_printf (" %02x", buf[i]);
1608 }
1609
1610 return ret;
237fc4c9
PA
1611}
1612
1613/* Prepare to single-step, using displaced stepping.
1614
1615 Note that we cannot use displaced stepping when we have a signal to
1616 deliver. If we have a signal to deliver and an instruction to step
1617 over, then after the step, there will be no indication from the
1618 target whether the thread entered a signal handler or ignored the
1619 signal and stepped over the instruction successfully --- both cases
1620 result in a simple SIGTRAP. In the first case we mustn't do a
1621 fixup, and in the second case we must --- but we can't tell which.
1622 Comments in the code for 'random signals' in handle_inferior_event
1623 explain how we handle this case instead.
1624
bab37966
SM
1625 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1626 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1627 if displaced stepping this thread got queued; or
1628 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1629 stepped. */
7f03bd92 1630
bab37966 1631static displaced_step_prepare_status
00431a78 1632displaced_step_prepare_throw (thread_info *tp)
237fc4c9 1633{
00431a78 1634 regcache *regcache = get_thread_regcache (tp);
ac7936df 1635 struct gdbarch *gdbarch = regcache->arch ();
187b041e
SM
1636 displaced_step_thread_state &disp_step_thread_state
1637 = tp->displaced_step_state;
237fc4c9
PA
1638
1639 /* We should never reach this function if the architecture does not
1640 support displaced stepping. */
9822cb57 1641 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
237fc4c9 1642
c2829269
PA
1643 /* Nor if the thread isn't meant to step over a breakpoint. */
1644 gdb_assert (tp->control.trap_expected);
1645
c1e36e3e
PA
1646 /* Disable range stepping while executing in the scratch pad. We
1647 want a single-step even if executing the displaced instruction in
1648 the scratch buffer lands within the stepping range (e.g., a
1649 jump/branch). */
1650 tp->control.may_range_step = 0;
1651
187b041e
SM
1652 /* We are about to start a displaced step for this thread. If one is already
1653 in progress, something's wrong. */
1654 gdb_assert (!disp_step_thread_state.in_progress ());
237fc4c9 1655
187b041e 1656 if (tp->inf->displaced_step_state.unavailable)
237fc4c9 1657 {
187b041e
SM
1658 /* The gdbarch tells us it's not worth asking to try a prepare because
1659 it is likely that it will return unavailable, so don't bother asking. */
237fc4c9 1660
136821d9
SM
1661 displaced_debug_printf ("deferring step of %s",
1662 target_pid_to_str (tp->ptid).c_str ());
237fc4c9 1663
28d5518b 1664 global_thread_step_over_chain_enqueue (tp);
bab37966 1665 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
237fc4c9 1666 }
237fc4c9 1667
187b041e
SM
1668 displaced_debug_printf ("displaced-stepping %s now",
1669 target_pid_to_str (tp->ptid).c_str ());
237fc4c9 1670
00431a78
PA
1671 scoped_restore_current_thread restore_thread;
1672
1673 switch_to_thread (tp);
ad53cd71 1674
187b041e
SM
1675 CORE_ADDR original_pc = regcache_read_pc (regcache);
1676 CORE_ADDR displaced_pc;
237fc4c9 1677
187b041e
SM
1678 displaced_step_prepare_status status
1679 = gdbarch_displaced_step_prepare (gdbarch, tp, displaced_pc);
237fc4c9 1680
187b041e 1681 if (status == DISPLACED_STEP_PREPARE_STATUS_CANT)
d35ae833 1682 {
187b041e
SM
1683 displaced_debug_printf ("failed to prepare (%s)",
1684 target_pid_to_str (tp->ptid).c_str ());
d35ae833 1685
bab37966 1686 return DISPLACED_STEP_PREPARE_STATUS_CANT;
d35ae833 1687 }
187b041e 1688 else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
7f03bd92 1689 {
187b041e
SM
1690 /* Not enough displaced stepping resources available, defer this
1691 request by placing it the queue. */
1692
1693 displaced_debug_printf ("not enough resources available, "
1694 "deferring step of %s",
1695 target_pid_to_str (tp->ptid).c_str ());
1696
1697 global_thread_step_over_chain_enqueue (tp);
1698
1699 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
7f03bd92 1700 }
237fc4c9 1701
187b041e
SM
1702 gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK);
1703
9f5a595d
UW
1704 /* Save the information we need to fix things up if the step
1705 succeeds. */
187b041e 1706 disp_step_thread_state.set (gdbarch);
9f5a595d 1707
187b041e 1708 tp->inf->displaced_step_state.in_progress_count++;
ad53cd71 1709
187b041e
SM
1710 displaced_debug_printf ("prepared successfully thread=%s, "
1711 "original_pc=%s, displaced_pc=%s",
1712 target_pid_to_str (tp->ptid).c_str (),
1713 paddress (gdbarch, original_pc),
1714 paddress (gdbarch, displaced_pc));
237fc4c9 1715
bab37966 1716 return DISPLACED_STEP_PREPARE_STATUS_OK;
237fc4c9
PA
1717}
1718
3fc8eb30
PA
1719/* Wrapper for displaced_step_prepare_throw that disabled further
1720 attempts at displaced stepping if we get a memory error. */
1721
bab37966 1722static displaced_step_prepare_status
00431a78 1723displaced_step_prepare (thread_info *thread)
3fc8eb30 1724{
bab37966
SM
1725 displaced_step_prepare_status status
1726 = DISPLACED_STEP_PREPARE_STATUS_CANT;
3fc8eb30 1727
a70b8144 1728 try
3fc8eb30 1729 {
bab37966 1730 status = displaced_step_prepare_throw (thread);
3fc8eb30 1731 }
230d2906 1732 catch (const gdb_exception_error &ex)
3fc8eb30 1733 {
16b41842
PA
1734 if (ex.error != MEMORY_ERROR
1735 && ex.error != NOT_SUPPORTED_ERROR)
eedc3f4f 1736 throw;
3fc8eb30 1737
1eb8556f
SM
1738 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1739 ex.what ());
3fc8eb30
PA
1740
1741 /* Be verbose if "set displaced-stepping" is "on", silent if
1742 "auto". */
1743 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1744 {
fd7dcb94 1745 warning (_("disabling displaced stepping: %s"),
3d6e9d23 1746 ex.what ());
3fc8eb30
PA
1747 }
1748
1749 /* Disable further displaced stepping attempts. */
f5f01699 1750 thread->inf->displaced_step_state.failed_before = 1;
3fc8eb30 1751 }
3fc8eb30 1752
bab37966 1753 return status;
3fc8eb30
PA
1754}
1755
bab37966
SM
1756/* If we displaced stepped an instruction successfully, adjust registers and
1757 memory to yield the same effect the instruction would have had if we had
1758 executed it at its original address, and return
1759 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
1760 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
372316f1 1761
bab37966
SM
1762 If the thread wasn't displaced stepping, return
1763 DISPLACED_STEP_FINISH_STATUS_OK as well. */
1764
1765static displaced_step_finish_status
7def77a1 1766displaced_step_finish (thread_info *event_thread, enum gdb_signal signal)
237fc4c9 1767{
187b041e 1768 displaced_step_thread_state *displaced = &event_thread->displaced_step_state;
fc1cf338 1769
187b041e
SM
1770 /* Was this thread performing a displaced step? */
1771 if (!displaced->in_progress ())
bab37966 1772 return DISPLACED_STEP_FINISH_STATUS_OK;
237fc4c9 1773
187b041e
SM
1774 gdb_assert (event_thread->inf->displaced_step_state.in_progress_count > 0);
1775 event_thread->inf->displaced_step_state.in_progress_count--;
1776
cb71640d
PA
1777 /* Fixup may need to read memory/registers. Switch to the thread
1778 that we're fixing up. Also, target_stopped_by_watchpoint checks
d43b7a2d 1779 the current thread, and displaced_step_restore performs ptid-dependent
328d42d8 1780 memory accesses using current_inferior(). */
00431a78 1781 switch_to_thread (event_thread);
cb71640d 1782
d43b7a2d
TBA
1783 displaced_step_reset_cleanup cleanup (displaced);
1784
187b041e
SM
1785 /* Do the fixup, and release the resources acquired to do the displaced
1786 step. */
1787 return gdbarch_displaced_step_finish (displaced->get_original_gdbarch (),
1788 event_thread, signal);
c2829269 1789}
1c5cfe86 1790
4d9d9d04
PA
1791/* Data to be passed around while handling an event. This data is
1792 discarded between events. */
1793struct execution_control_state
1794{
5b6d1e4f 1795 process_stratum_target *target;
4d9d9d04
PA
1796 ptid_t ptid;
1797 /* The thread that got the event, if this was a thread event; NULL
1798 otherwise. */
1799 struct thread_info *event_thread;
1800
1801 struct target_waitstatus ws;
1802 int stop_func_filled_in;
1803 CORE_ADDR stop_func_start;
1804 CORE_ADDR stop_func_end;
1805 const char *stop_func_name;
1806 int wait_some_more;
1807
1808 /* True if the event thread hit the single-step breakpoint of
1809 another thread. Thus the event doesn't cause a stop, the thread
1810 needs to be single-stepped past the single-step breakpoint before
1811 we can switch back to the original stepping thread. */
1812 int hit_singlestep_breakpoint;
1813};
1814
1815/* Clear ECS and set it to point at TP. */
c2829269
PA
1816
1817static void
4d9d9d04
PA
1818reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1819{
1820 memset (ecs, 0, sizeof (*ecs));
1821 ecs->event_thread = tp;
1822 ecs->ptid = tp->ptid;
1823}
1824
1825static void keep_going_pass_signal (struct execution_control_state *ecs);
1826static void prepare_to_wait (struct execution_control_state *ecs);
c4464ade 1827static bool keep_going_stepped_thread (struct thread_info *tp);
8d297bbf 1828static step_over_what thread_still_needs_step_over (struct thread_info *tp);
4d9d9d04
PA
1829
1830/* Are there any pending step-over requests? If so, run all we can
1831 now and return true. Otherwise, return false. */
1832
c4464ade 1833static bool
c2829269
PA
1834start_step_over (void)
1835{
3ec3145c
SM
1836 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1837
372316f1
PA
1838 /* Don't start a new step-over if we already have an in-line
1839 step-over operation ongoing. */
1840 if (step_over_info_valid_p ())
c4464ade 1841 return false;
372316f1 1842
187b041e
SM
1843 /* Steal the global thread step over chain. As we try to initiate displaced
1844 steps, threads will be enqueued in the global chain if no buffers are
1845 available. If we iterated on the global chain directly, we might iterate
1846 indefinitely. */
8b6a69b2
SM
1847 thread_step_over_list threads_to_step
1848 = std::move (global_thread_step_over_list);
187b041e
SM
1849
1850 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
1851 thread_step_over_chain_length (threads_to_step));
1852
1853 bool started = false;
1854
1855 /* On scope exit (whatever the reason, return or exception), if there are
1856 threads left in the THREADS_TO_STEP chain, put back these threads in the
1857 global list. */
1858 SCOPE_EXIT
1859 {
8b6a69b2 1860 if (threads_to_step.empty ())
187b041e
SM
1861 infrun_debug_printf ("step-over queue now empty");
1862 else
1863 {
1864 infrun_debug_printf ("putting back %d threads to step in global queue",
1865 thread_step_over_chain_length (threads_to_step));
1866
8b6a69b2
SM
1867 global_thread_step_over_chain_enqueue_chain
1868 (std::move (threads_to_step));
187b041e
SM
1869 }
1870 };
1871
8b6a69b2
SM
1872 thread_step_over_list_safe_range range
1873 = make_thread_step_over_list_safe_range (threads_to_step);
1874
1875 for (thread_info *tp : range)
237fc4c9 1876 {
4d9d9d04
PA
1877 struct execution_control_state ecss;
1878 struct execution_control_state *ecs = &ecss;
8d297bbf 1879 step_over_what step_what;
372316f1 1880 int must_be_in_line;
c2829269 1881
c65d6b55
PA
1882 gdb_assert (!tp->stop_requested);
1883
187b041e
SM
1884 if (tp->inf->displaced_step_state.unavailable)
1885 {
1886 /* The arch told us to not even try preparing another displaced step
1887 for this inferior. Just leave the thread in THREADS_TO_STEP, it
1888 will get moved to the global chain on scope exit. */
1889 continue;
1890 }
1891
1892 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
1893 while we try to prepare the displaced step, we don't add it back to
1894 the global step over chain. This is to avoid a thread staying in the
1895 step over chain indefinitely if something goes wrong when resuming it
1896 If the error is intermittent and it still needs a step over, it will
1897 get enqueued again when we try to resume it normally. */
8b6a69b2 1898 threads_to_step.erase (threads_to_step.iterator_to (*tp));
c2829269 1899
372316f1
PA
1900 step_what = thread_still_needs_step_over (tp);
1901 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
1902 || ((step_what & STEP_OVER_BREAKPOINT)
3fc8eb30 1903 && !use_displaced_stepping (tp)));
372316f1
PA
1904
1905 /* We currently stop all threads of all processes to step-over
1906 in-line. If we need to start a new in-line step-over, let
1907 any pending displaced steps finish first. */
187b041e
SM
1908 if (must_be_in_line && displaced_step_in_progress_any_thread ())
1909 {
1910 global_thread_step_over_chain_enqueue (tp);
1911 continue;
1912 }
c2829269 1913
372316f1 1914 if (tp->control.trap_expected
7846f3aa 1915 || tp->resumed ()
372316f1 1916 || tp->executing)
ad53cd71 1917 {
4d9d9d04
PA
1918 internal_error (__FILE__, __LINE__,
1919 "[%s] has inconsistent state: "
372316f1 1920 "trap_expected=%d, resumed=%d, executing=%d\n",
a068643d 1921 target_pid_to_str (tp->ptid).c_str (),
4d9d9d04 1922 tp->control.trap_expected,
7846f3aa 1923 tp->resumed (),
4d9d9d04 1924 tp->executing);
ad53cd71 1925 }
1c5cfe86 1926
1eb8556f
SM
1927 infrun_debug_printf ("resuming [%s] for step-over",
1928 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04
PA
1929
1930 /* keep_going_pass_signal skips the step-over if the breakpoint
1931 is no longer inserted. In all-stop, we want to keep looking
1932 for a thread that needs a step-over instead of resuming TP,
1933 because we wouldn't be able to resume anything else until the
1934 target stops again. In non-stop, the resume always resumes
1935 only TP, so it's OK to let the thread resume freely. */
fbea99ea 1936 if (!target_is_non_stop_p () && !step_what)
4d9d9d04 1937 continue;
8550d3b3 1938
00431a78 1939 switch_to_thread (tp);
4d9d9d04
PA
1940 reset_ecs (ecs, tp);
1941 keep_going_pass_signal (ecs);
1c5cfe86 1942
4d9d9d04
PA
1943 if (!ecs->wait_some_more)
1944 error (_("Command aborted."));
1c5cfe86 1945
187b041e
SM
1946 /* If the thread's step over could not be initiated because no buffers
1947 were available, it was re-added to the global step over chain. */
7846f3aa 1948 if (tp->resumed ())
187b041e
SM
1949 {
1950 infrun_debug_printf ("[%s] was resumed.",
1951 target_pid_to_str (tp->ptid).c_str ());
1952 gdb_assert (!thread_is_in_step_over_chain (tp));
1953 }
1954 else
1955 {
1956 infrun_debug_printf ("[%s] was NOT resumed.",
1957 target_pid_to_str (tp->ptid).c_str ());
1958 gdb_assert (thread_is_in_step_over_chain (tp));
1959 }
372316f1
PA
1960
1961 /* If we started a new in-line step-over, we're done. */
1962 if (step_over_info_valid_p ())
1963 {
1964 gdb_assert (tp->control.trap_expected);
187b041e
SM
1965 started = true;
1966 break;
372316f1
PA
1967 }
1968
fbea99ea 1969 if (!target_is_non_stop_p ())
4d9d9d04
PA
1970 {
1971 /* On all-stop, shouldn't have resumed unless we needed a
1972 step over. */
1973 gdb_assert (tp->control.trap_expected
1974 || tp->step_after_step_resume_breakpoint);
1975
1976 /* With remote targets (at least), in all-stop, we can't
1977 issue any further remote commands until the program stops
1978 again. */
187b041e
SM
1979 started = true;
1980 break;
1c5cfe86 1981 }
c2829269 1982
4d9d9d04
PA
1983 /* Either the thread no longer needed a step-over, or a new
1984 displaced stepping sequence started. Even in the latter
1985 case, continue looking. Maybe we can also start another
1986 displaced step on a thread of other process. */
237fc4c9 1987 }
4d9d9d04 1988
187b041e 1989 return started;
237fc4c9
PA
1990}
1991
5231c1fd
PA
1992/* Update global variables holding ptids to hold NEW_PTID if they were
1993 holding OLD_PTID. */
1994static void
b161a60d
SM
1995infrun_thread_ptid_changed (process_stratum_target *target,
1996 ptid_t old_ptid, ptid_t new_ptid)
5231c1fd 1997{
b161a60d
SM
1998 if (inferior_ptid == old_ptid
1999 && current_inferior ()->process_target () == target)
5231c1fd 2000 inferior_ptid = new_ptid;
5231c1fd
PA
2001}
2002
237fc4c9 2003\f
c906108c 2004
53904c9e
AC
2005static const char schedlock_off[] = "off";
2006static const char schedlock_on[] = "on";
2007static const char schedlock_step[] = "step";
f2665db5 2008static const char schedlock_replay[] = "replay";
40478521 2009static const char *const scheduler_enums[] = {
ef346e04
AC
2010 schedlock_off,
2011 schedlock_on,
2012 schedlock_step,
f2665db5 2013 schedlock_replay,
ef346e04
AC
2014 NULL
2015};
f2665db5 2016static const char *scheduler_mode = schedlock_replay;
920d2a44
AC
2017static void
2018show_scheduler_mode (struct ui_file *file, int from_tty,
2019 struct cmd_list_element *c, const char *value)
2020{
3e43a32a
MS
2021 fprintf_filtered (file,
2022 _("Mode for locking scheduler "
2023 "during execution is \"%s\".\n"),
920d2a44
AC
2024 value);
2025}
c906108c
SS
2026
2027static void
eb4c3f4a 2028set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
c906108c 2029{
8a3ecb79 2030 if (!target_can_lock_scheduler ())
eefe576e
AC
2031 {
2032 scheduler_mode = schedlock_off;
d777bf0d
SM
2033 error (_("Target '%s' cannot support this command."),
2034 target_shortname ());
eefe576e 2035 }
c906108c
SS
2036}
2037
d4db2f36
PA
2038/* True if execution commands resume all threads of all processes by
2039 default; otherwise, resume only threads of the current inferior
2040 process. */
491144b5 2041bool sched_multi = false;
d4db2f36 2042
22b11ba9
LS
2043/* Try to setup for software single stepping. Return true if target_resume()
2044 should use hardware single step.
2facfe5c 2045
22b11ba9 2046 GDBARCH the current gdbarch. */
2facfe5c 2047
c4464ade 2048static bool
22b11ba9 2049maybe_software_singlestep (struct gdbarch *gdbarch)
2facfe5c 2050{
c4464ade 2051 bool hw_step = true;
2facfe5c 2052
f02253f1 2053 if (execution_direction == EXEC_FORWARD
93f9a11f
YQ
2054 && gdbarch_software_single_step_p (gdbarch))
2055 hw_step = !insert_single_step_breakpoints (gdbarch);
2056
2facfe5c
DD
2057 return hw_step;
2058}
c906108c 2059
f3263aa4
PA
2060/* See infrun.h. */
2061
09cee04b
PA
2062ptid_t
2063user_visible_resume_ptid (int step)
2064{
f3263aa4 2065 ptid_t resume_ptid;
09cee04b 2066
09cee04b
PA
2067 if (non_stop)
2068 {
2069 /* With non-stop mode on, threads are always handled
2070 individually. */
2071 resume_ptid = inferior_ptid;
2072 }
2073 else if ((scheduler_mode == schedlock_on)
03d46957 2074 || (scheduler_mode == schedlock_step && step))
09cee04b 2075 {
f3263aa4
PA
2076 /* User-settable 'scheduler' mode requires solo thread
2077 resume. */
09cee04b
PA
2078 resume_ptid = inferior_ptid;
2079 }
f2665db5
MM
2080 else if ((scheduler_mode == schedlock_replay)
2081 && target_record_will_replay (minus_one_ptid, execution_direction))
2082 {
2083 /* User-settable 'scheduler' mode requires solo thread resume in replay
2084 mode. */
2085 resume_ptid = inferior_ptid;
2086 }
f3263aa4
PA
2087 else if (!sched_multi && target_supports_multi_process ())
2088 {
2089 /* Resume all threads of the current process (and none of other
2090 processes). */
e99b03dc 2091 resume_ptid = ptid_t (inferior_ptid.pid ());
f3263aa4
PA
2092 }
2093 else
2094 {
2095 /* Resume all threads of all processes. */
2096 resume_ptid = RESUME_ALL;
2097 }
09cee04b
PA
2098
2099 return resume_ptid;
2100}
2101
5b6d1e4f
PA
2102/* See infrun.h. */
2103
2104process_stratum_target *
2105user_visible_resume_target (ptid_t resume_ptid)
2106{
2107 return (resume_ptid == minus_one_ptid && sched_multi
2108 ? NULL
2109 : current_inferior ()->process_target ());
2110}
2111
fbea99ea
PA
2112/* Return a ptid representing the set of threads that we will resume,
2113 in the perspective of the target, assuming run control handling
2114 does not require leaving some threads stopped (e.g., stepping past
2115 breakpoint). USER_STEP indicates whether we're about to start the
2116 target for a stepping command. */
2117
2118static ptid_t
2119internal_resume_ptid (int user_step)
2120{
2121 /* In non-stop, we always control threads individually. Note that
2122 the target may always work in non-stop mode even with "set
2123 non-stop off", in which case user_visible_resume_ptid could
2124 return a wildcard ptid. */
2125 if (target_is_non_stop_p ())
2126 return inferior_ptid;
2127 else
2128 return user_visible_resume_ptid (user_step);
2129}
2130
64ce06e4
PA
2131/* Wrapper for target_resume, that handles infrun-specific
2132 bookkeeping. */
2133
2134static void
c4464ade 2135do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
64ce06e4
PA
2136{
2137 struct thread_info *tp = inferior_thread ();
2138
c65d6b55
PA
2139 gdb_assert (!tp->stop_requested);
2140
64ce06e4 2141 /* Install inferior's terminal modes. */
223ffa71 2142 target_terminal::inferior ();
64ce06e4
PA
2143
2144 /* Avoid confusing the next resume, if the next stop/resume
2145 happens to apply to another thread. */
1edb66d8 2146 tp->set_stop_signal (GDB_SIGNAL_0);
64ce06e4 2147
8f572e5c
PA
2148 /* Advise target which signals may be handled silently.
2149
2150 If we have removed breakpoints because we are stepping over one
2151 in-line (in any thread), we need to receive all signals to avoid
2152 accidentally skipping a breakpoint during execution of a signal
2153 handler.
2154
2155 Likewise if we're displaced stepping, otherwise a trap for a
2156 breakpoint in a signal handler might be confused with the
7def77a1 2157 displaced step finishing. We don't make the displaced_step_finish
8f572e5c
PA
2158 step distinguish the cases instead, because:
2159
2160 - a backtrace while stopped in the signal handler would show the
2161 scratch pad as frame older than the signal handler, instead of
2162 the real mainline code.
2163
2164 - when the thread is later resumed, the signal handler would
2165 return to the scratch pad area, which would no longer be
2166 valid. */
2167 if (step_over_info_valid_p ()
00431a78 2168 || displaced_step_in_progress (tp->inf))
adc6a863 2169 target_pass_signals ({});
64ce06e4 2170 else
adc6a863 2171 target_pass_signals (signal_pass);
64ce06e4
PA
2172
2173 target_resume (resume_ptid, step, sig);
85ad3aaf 2174
5b6d1e4f
PA
2175 if (target_can_async_p ())
2176 target_async (1);
64ce06e4
PA
2177}
2178
d930703d 2179/* Resume the inferior. SIG is the signal to give the inferior
71d378ae
PA
2180 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2181 call 'resume', which handles exceptions. */
c906108c 2182
71d378ae
PA
2183static void
2184resume_1 (enum gdb_signal sig)
c906108c 2185{
515630c5 2186 struct regcache *regcache = get_current_regcache ();
ac7936df 2187 struct gdbarch *gdbarch = regcache->arch ();
4e1c45ea 2188 struct thread_info *tp = inferior_thread ();
8b86c959 2189 const address_space *aspace = regcache->aspace ();
b0f16a3e 2190 ptid_t resume_ptid;
856e7dd6
PA
2191 /* This represents the user's step vs continue request. When
2192 deciding whether "set scheduler-locking step" applies, it's the
2193 user's intention that counts. */
2194 const int user_step = tp->control.stepping_command;
64ce06e4
PA
2195 /* This represents what we'll actually request the target to do.
2196 This can decay from a step to a continue, if e.g., we need to
2197 implement single-stepping with breakpoints (software
2198 single-step). */
c4464ade 2199 bool step;
c7e8a53c 2200
c65d6b55 2201 gdb_assert (!tp->stop_requested);
c2829269
PA
2202 gdb_assert (!thread_is_in_step_over_chain (tp));
2203
1edb66d8 2204 if (tp->has_pending_waitstatus ())
372316f1 2205 {
1eb8556f
SM
2206 infrun_debug_printf
2207 ("thread %s has pending wait "
2208 "status %s (currently_stepping=%d).",
2209 target_pid_to_str (tp->ptid).c_str (),
1edb66d8 2210 target_waitstatus_to_string (&tp->pending_waitstatus ()).c_str (),
1eb8556f 2211 currently_stepping (tp));
372316f1 2212
5b6d1e4f 2213 tp->inf->process_target ()->threads_executing = true;
7846f3aa 2214 tp->set_resumed (true);
372316f1
PA
2215
2216 /* FIXME: What should we do if we are supposed to resume this
2217 thread with a signal? Maybe we should maintain a queue of
2218 pending signals to deliver. */
2219 if (sig != GDB_SIGNAL_0)
2220 {
fd7dcb94 2221 warning (_("Couldn't deliver signal %s to %s."),
a068643d
TT
2222 gdb_signal_to_name (sig),
2223 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2224 }
2225
1edb66d8 2226 tp->set_stop_signal (GDB_SIGNAL_0);
372316f1
PA
2227
2228 if (target_can_async_p ())
9516f85a
AB
2229 {
2230 target_async (1);
2231 /* Tell the event loop we have an event to process. */
2232 mark_async_event_handler (infrun_async_inferior_event_token);
2233 }
372316f1
PA
2234 return;
2235 }
2236
2237 tp->stepped_breakpoint = 0;
2238
6b403daa
PA
2239 /* Depends on stepped_breakpoint. */
2240 step = currently_stepping (tp);
2241
74609e71
YQ
2242 if (current_inferior ()->waiting_for_vfork_done)
2243 {
48f9886d
PA
2244 /* Don't try to single-step a vfork parent that is waiting for
2245 the child to get out of the shared memory region (by exec'ing
2246 or exiting). This is particularly important on software
2247 single-step archs, as the child process would trip on the
2248 software single step breakpoint inserted for the parent
2249 process. Since the parent will not actually execute any
2250 instruction until the child is out of the shared region (such
2251 are vfork's semantics), it is safe to simply continue it.
2252 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2253 the parent, and tell it to `keep_going', which automatically
2254 re-sets it stepping. */
1eb8556f 2255 infrun_debug_printf ("resume : clear step");
c4464ade 2256 step = false;
74609e71
YQ
2257 }
2258
7ca9b62a
TBA
2259 CORE_ADDR pc = regcache_read_pc (regcache);
2260
1eb8556f
SM
2261 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2262 "current thread [%s] at %s",
2263 step, gdb_signal_to_symbol_string (sig),
2264 tp->control.trap_expected,
2265 target_pid_to_str (inferior_ptid).c_str (),
2266 paddress (gdbarch, pc));
c906108c 2267
c2c6d25f
JM
2268 /* Normally, by the time we reach `resume', the breakpoints are either
2269 removed or inserted, as appropriate. The exception is if we're sitting
2270 at a permanent breakpoint; we need to step over it, but permanent
2271 breakpoints can't be removed. So we have to test for it here. */
6c95b8df 2272 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
6d350bb5 2273 {
af48d08f
PA
2274 if (sig != GDB_SIGNAL_0)
2275 {
2276 /* We have a signal to pass to the inferior. The resume
2277 may, or may not take us to the signal handler. If this
2278 is a step, we'll need to stop in the signal handler, if
2279 there's one, (if the target supports stepping into
2280 handlers), or in the next mainline instruction, if
2281 there's no handler. If this is a continue, we need to be
2282 sure to run the handler with all breakpoints inserted.
2283 In all cases, set a breakpoint at the current address
2284 (where the handler returns to), and once that breakpoint
2285 is hit, resume skipping the permanent breakpoint. If
2286 that breakpoint isn't hit, then we've stepped into the
2287 signal handler (or hit some other event). We'll delete
2288 the step-resume breakpoint then. */
2289
1eb8556f
SM
2290 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2291 "deliver signal first");
af48d08f
PA
2292
2293 clear_step_over_info ();
2294 tp->control.trap_expected = 0;
2295
2296 if (tp->control.step_resume_breakpoint == NULL)
2297 {
2298 /* Set a "high-priority" step-resume, as we don't want
2299 user breakpoints at PC to trigger (again) when this
2300 hits. */
2301 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2302 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2303
2304 tp->step_after_step_resume_breakpoint = step;
2305 }
2306
2307 insert_breakpoints ();
2308 }
2309 else
2310 {
2311 /* There's no signal to pass, we can go ahead and skip the
2312 permanent breakpoint manually. */
1eb8556f 2313 infrun_debug_printf ("skipping permanent breakpoint");
af48d08f
PA
2314 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2315 /* Update pc to reflect the new address from which we will
2316 execute instructions. */
2317 pc = regcache_read_pc (regcache);
2318
2319 if (step)
2320 {
2321 /* We've already advanced the PC, so the stepping part
2322 is done. Now we need to arrange for a trap to be
2323 reported to handle_inferior_event. Set a breakpoint
2324 at the current PC, and run to it. Don't update
2325 prev_pc, because if we end in
44a1ee51
PA
2326 switch_back_to_stepped_thread, we want the "expected
2327 thread advanced also" branch to be taken. IOW, we
2328 don't want this thread to step further from PC
af48d08f 2329 (overstep). */
1ac806b8 2330 gdb_assert (!step_over_info_valid_p ());
af48d08f
PA
2331 insert_single_step_breakpoint (gdbarch, aspace, pc);
2332 insert_breakpoints ();
2333
fbea99ea 2334 resume_ptid = internal_resume_ptid (user_step);
c4464ade 2335 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
7846f3aa 2336 tp->set_resumed (true);
af48d08f
PA
2337 return;
2338 }
2339 }
6d350bb5 2340 }
c2c6d25f 2341
c1e36e3e
PA
2342 /* If we have a breakpoint to step over, make sure to do a single
2343 step only. Same if we have software watchpoints. */
2344 if (tp->control.trap_expected || bpstat_should_step ())
2345 tp->control.may_range_step = 0;
2346
7da6a5b9
LM
2347 /* If displaced stepping is enabled, step over breakpoints by executing a
2348 copy of the instruction at a different address.
237fc4c9
PA
2349
2350 We can't use displaced stepping when we have a signal to deliver;
2351 the comments for displaced_step_prepare explain why. The
2352 comments in the handle_inferior event for dealing with 'random
74609e71
YQ
2353 signals' explain what we do instead.
2354
2355 We can't use displaced stepping when we are waiting for vfork_done
2356 event, displaced stepping breaks the vfork child similarly as single
2357 step software breakpoint. */
3fc8eb30
PA
2358 if (tp->control.trap_expected
2359 && use_displaced_stepping (tp)
cb71640d 2360 && !step_over_info_valid_p ()
a493e3e2 2361 && sig == GDB_SIGNAL_0
74609e71 2362 && !current_inferior ()->waiting_for_vfork_done)
237fc4c9 2363 {
bab37966
SM
2364 displaced_step_prepare_status prepare_status
2365 = displaced_step_prepare (tp);
fc1cf338 2366
bab37966 2367 if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
d56b7306 2368 {
1eb8556f 2369 infrun_debug_printf ("Got placed in step-over queue");
4d9d9d04
PA
2370
2371 tp->control.trap_expected = 0;
d56b7306
VP
2372 return;
2373 }
bab37966 2374 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_CANT)
3fc8eb30
PA
2375 {
2376 /* Fallback to stepping over the breakpoint in-line. */
2377
2378 if (target_is_non_stop_p ())
2379 stop_all_threads ();
2380
a01bda52 2381 set_step_over_info (regcache->aspace (),
21edc42f 2382 regcache_read_pc (regcache), 0, tp->global_num);
3fc8eb30 2383
22b11ba9 2384 step = maybe_software_singlestep (gdbarch);
3fc8eb30
PA
2385
2386 insert_breakpoints ();
2387 }
bab37966 2388 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
3fc8eb30 2389 {
3fc8eb30
PA
2390 /* Update pc to reflect the new address from which we will
2391 execute instructions due to displaced stepping. */
00431a78 2392 pc = regcache_read_pc (get_thread_regcache (tp));
ca7781d2 2393
40a53766 2394 step = gdbarch_displaced_step_hw_singlestep (gdbarch);
3fc8eb30 2395 }
bab37966
SM
2396 else
2397 gdb_assert_not_reached (_("Invalid displaced_step_prepare_status "
2398 "value."));
237fc4c9
PA
2399 }
2400
2facfe5c 2401 /* Do we need to do it the hard way, w/temp breakpoints? */
99e40580 2402 else if (step)
22b11ba9 2403 step = maybe_software_singlestep (gdbarch);
c906108c 2404
30852783
UW
2405 /* Currently, our software single-step implementation leads to different
2406 results than hardware single-stepping in one situation: when stepping
2407 into delivering a signal which has an associated signal handler,
2408 hardware single-step will stop at the first instruction of the handler,
2409 while software single-step will simply skip execution of the handler.
2410
2411 For now, this difference in behavior is accepted since there is no
2412 easy way to actually implement single-stepping into a signal handler
2413 without kernel support.
2414
2415 However, there is one scenario where this difference leads to follow-on
2416 problems: if we're stepping off a breakpoint by removing all breakpoints
2417 and then single-stepping. In this case, the software single-step
2418 behavior means that even if there is a *breakpoint* in the signal
2419 handler, GDB still would not stop.
2420
2421 Fortunately, we can at least fix this particular issue. We detect
2422 here the case where we are about to deliver a signal while software
2423 single-stepping with breakpoints removed. In this situation, we
2424 revert the decisions to remove all breakpoints and insert single-
2425 step breakpoints, and instead we install a step-resume breakpoint
2426 at the current address, deliver the signal without stepping, and
2427 once we arrive back at the step-resume breakpoint, actually step
2428 over the breakpoint we originally wanted to step over. */
34b7e8a6 2429 if (thread_has_single_step_breakpoints_set (tp)
6cc83d2a
PA
2430 && sig != GDB_SIGNAL_0
2431 && step_over_info_valid_p ())
30852783
UW
2432 {
2433 /* If we have nested signals or a pending signal is delivered
7da6a5b9 2434 immediately after a handler returns, might already have
30852783
UW
2435 a step-resume breakpoint set on the earlier handler. We cannot
2436 set another step-resume breakpoint; just continue on until the
2437 original breakpoint is hit. */
2438 if (tp->control.step_resume_breakpoint == NULL)
2439 {
2c03e5be 2440 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
30852783
UW
2441 tp->step_after_step_resume_breakpoint = 1;
2442 }
2443
34b7e8a6 2444 delete_single_step_breakpoints (tp);
30852783 2445
31e77af2 2446 clear_step_over_info ();
30852783 2447 tp->control.trap_expected = 0;
31e77af2
PA
2448
2449 insert_breakpoints ();
30852783
UW
2450 }
2451
b0f16a3e
SM
2452 /* If STEP is set, it's a request to use hardware stepping
2453 facilities. But in that case, we should never
2454 use singlestep breakpoint. */
34b7e8a6 2455 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
dfcd3bfb 2456
fbea99ea 2457 /* Decide the set of threads to ask the target to resume. */
1946c4cc 2458 if (tp->control.trap_expected)
b0f16a3e
SM
2459 {
2460 /* We're allowing a thread to run past a breakpoint it has
1946c4cc
YQ
2461 hit, either by single-stepping the thread with the breakpoint
2462 removed, or by displaced stepping, with the breakpoint inserted.
2463 In the former case, we need to single-step only this thread,
2464 and keep others stopped, as they can miss this breakpoint if
2465 allowed to run. That's not really a problem for displaced
2466 stepping, but, we still keep other threads stopped, in case
2467 another thread is also stopped for a breakpoint waiting for
2468 its turn in the displaced stepping queue. */
b0f16a3e
SM
2469 resume_ptid = inferior_ptid;
2470 }
fbea99ea
PA
2471 else
2472 resume_ptid = internal_resume_ptid (user_step);
d4db2f36 2473
7f5ef605
PA
2474 if (execution_direction != EXEC_REVERSE
2475 && step && breakpoint_inserted_here_p (aspace, pc))
b0f16a3e 2476 {
372316f1
PA
2477 /* There are two cases where we currently need to step a
2478 breakpoint instruction when we have a signal to deliver:
2479
2480 - See handle_signal_stop where we handle random signals that
2481 could take out us out of the stepping range. Normally, in
2482 that case we end up continuing (instead of stepping) over the
7f5ef605
PA
2483 signal handler with a breakpoint at PC, but there are cases
2484 where we should _always_ single-step, even if we have a
2485 step-resume breakpoint, like when a software watchpoint is
2486 set. Assuming single-stepping and delivering a signal at the
2487 same time would takes us to the signal handler, then we could
2488 have removed the breakpoint at PC to step over it. However,
2489 some hardware step targets (like e.g., Mac OS) can't step
2490 into signal handlers, and for those, we need to leave the
2491 breakpoint at PC inserted, as otherwise if the handler
2492 recurses and executes PC again, it'll miss the breakpoint.
2493 So we leave the breakpoint inserted anyway, but we need to
2494 record that we tried to step a breakpoint instruction, so
372316f1
PA
2495 that adjust_pc_after_break doesn't end up confused.
2496
dda83cd7 2497 - In non-stop if we insert a breakpoint (e.g., a step-resume)
372316f1
PA
2498 in one thread after another thread that was stepping had been
2499 momentarily paused for a step-over. When we re-resume the
2500 stepping thread, it may be resumed from that address with a
2501 breakpoint that hasn't trapped yet. Seen with
2502 gdb.threads/non-stop-fair-events.exp, on targets that don't
2503 do displaced stepping. */
2504
1eb8556f
SM
2505 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2506 target_pid_to_str (tp->ptid).c_str ());
7f5ef605
PA
2507
2508 tp->stepped_breakpoint = 1;
2509
b0f16a3e
SM
2510 /* Most targets can step a breakpoint instruction, thus
2511 executing it normally. But if this one cannot, just
2512 continue and we will hit it anyway. */
7f5ef605 2513 if (gdbarch_cannot_step_breakpoint (gdbarch))
c4464ade 2514 step = false;
b0f16a3e 2515 }
ef5cf84e 2516
b0f16a3e 2517 if (debug_displaced
cb71640d 2518 && tp->control.trap_expected
3fc8eb30 2519 && use_displaced_stepping (tp)
cb71640d 2520 && !step_over_info_valid_p ())
b0f16a3e 2521 {
00431a78 2522 struct regcache *resume_regcache = get_thread_regcache (tp);
ac7936df 2523 struct gdbarch *resume_gdbarch = resume_regcache->arch ();
b0f16a3e
SM
2524 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2525 gdb_byte buf[4];
2526
b0f16a3e 2527 read_memory (actual_pc, buf, sizeof (buf));
136821d9
SM
2528 displaced_debug_printf ("run %s: %s",
2529 paddress (resume_gdbarch, actual_pc),
2530 displaced_step_dump_bytes
2531 (buf, sizeof (buf)).c_str ());
b0f16a3e 2532 }
237fc4c9 2533
b0f16a3e
SM
2534 if (tp->control.may_range_step)
2535 {
2536 /* If we're resuming a thread with the PC out of the step
2537 range, then we're doing some nested/finer run control
2538 operation, like stepping the thread out of the dynamic
2539 linker or the displaced stepping scratch pad. We
2540 shouldn't have allowed a range step then. */
2541 gdb_assert (pc_in_thread_step_range (pc, tp));
2542 }
c1e36e3e 2543
64ce06e4 2544 do_target_resume (resume_ptid, step, sig);
7846f3aa 2545 tp->set_resumed (true);
c906108c 2546}
71d378ae
PA
2547
2548/* Resume the inferior. SIG is the signal to give the inferior
2549 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2550 rolls back state on error. */
2551
aff4e175 2552static void
71d378ae
PA
2553resume (gdb_signal sig)
2554{
a70b8144 2555 try
71d378ae
PA
2556 {
2557 resume_1 (sig);
2558 }
230d2906 2559 catch (const gdb_exception &ex)
71d378ae
PA
2560 {
2561 /* If resuming is being aborted for any reason, delete any
2562 single-step breakpoint resume_1 may have created, to avoid
2563 confusing the following resumption, and to avoid leaving
2564 single-step breakpoints perturbing other threads, in case
2565 we're running in non-stop mode. */
2566 if (inferior_ptid != null_ptid)
2567 delete_single_step_breakpoints (inferior_thread ());
eedc3f4f 2568 throw;
71d378ae 2569 }
71d378ae
PA
2570}
2571
c906108c 2572\f
237fc4c9 2573/* Proceeding. */
c906108c 2574
4c2f2a79
PA
2575/* See infrun.h. */
2576
2577/* Counter that tracks number of user visible stops. This can be used
2578 to tell whether a command has proceeded the inferior past the
2579 current location. This allows e.g., inferior function calls in
2580 breakpoint commands to not interrupt the command list. When the
2581 call finishes successfully, the inferior is standing at the same
2582 breakpoint as if nothing happened (and so we don't call
2583 normal_stop). */
2584static ULONGEST current_stop_id;
2585
2586/* See infrun.h. */
2587
2588ULONGEST
2589get_stop_id (void)
2590{
2591 return current_stop_id;
2592}
2593
2594/* Called when we report a user visible stop. */
2595
2596static void
2597new_stop_id (void)
2598{
2599 current_stop_id++;
2600}
2601
c906108c
SS
2602/* Clear out all variables saying what to do when inferior is continued.
2603 First do this, then set the ones you want, then call `proceed'. */
2604
a7212384
UW
2605static void
2606clear_proceed_status_thread (struct thread_info *tp)
c906108c 2607{
1eb8556f 2608 infrun_debug_printf ("%s", target_pid_to_str (tp->ptid).c_str ());
d6b48e9c 2609
372316f1
PA
2610 /* If we're starting a new sequence, then the previous finished
2611 single-step is no longer relevant. */
1edb66d8 2612 if (tp->has_pending_waitstatus ())
372316f1 2613 {
1edb66d8 2614 if (tp->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP)
372316f1 2615 {
1eb8556f
SM
2616 infrun_debug_printf ("pending event of %s was a finished step. "
2617 "Discarding.",
2618 target_pid_to_str (tp->ptid).c_str ());
372316f1 2619
1edb66d8
SM
2620 tp->clear_pending_waitstatus ();
2621 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
372316f1 2622 }
1eb8556f 2623 else
372316f1 2624 {
1eb8556f
SM
2625 infrun_debug_printf
2626 ("thread %s has pending wait status %s (currently_stepping=%d).",
2627 target_pid_to_str (tp->ptid).c_str (),
1edb66d8 2628 target_waitstatus_to_string (&tp->pending_waitstatus ()).c_str (),
1eb8556f 2629 currently_stepping (tp));
372316f1
PA
2630 }
2631 }
2632
70509625
PA
2633 /* If this signal should not be seen by program, give it zero.
2634 Used for debugging signals. */
1edb66d8
SM
2635 if (!signal_pass_state (tp->stop_signal ()))
2636 tp->set_stop_signal (GDB_SIGNAL_0);
70509625 2637
46e3ed7f 2638 delete tp->thread_fsm;
243a9253
PA
2639 tp->thread_fsm = NULL;
2640
16c381f0
JK
2641 tp->control.trap_expected = 0;
2642 tp->control.step_range_start = 0;
2643 tp->control.step_range_end = 0;
c1e36e3e 2644 tp->control.may_range_step = 0;
16c381f0
JK
2645 tp->control.step_frame_id = null_frame_id;
2646 tp->control.step_stack_frame_id = null_frame_id;
2647 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
885eeb5b 2648 tp->control.step_start_function = NULL;
a7212384 2649 tp->stop_requested = 0;
4e1c45ea 2650
16c381f0 2651 tp->control.stop_step = 0;
32400beb 2652
16c381f0 2653 tp->control.proceed_to_finish = 0;
414c69f7 2654
856e7dd6 2655 tp->control.stepping_command = 0;
17b2616c 2656
a7212384 2657 /* Discard any remaining commands or status from previous stop. */
16c381f0 2658 bpstat_clear (&tp->control.stop_bpstat);
a7212384 2659}
32400beb 2660
a7212384 2661void
70509625 2662clear_proceed_status (int step)
a7212384 2663{
f2665db5
MM
2664 /* With scheduler-locking replay, stop replaying other threads if we're
2665 not replaying the user-visible resume ptid.
2666
2667 This is a convenience feature to not require the user to explicitly
2668 stop replaying the other threads. We're assuming that the user's
2669 intent is to resume tracing the recorded process. */
2670 if (!non_stop && scheduler_mode == schedlock_replay
2671 && target_record_is_replaying (minus_one_ptid)
2672 && !target_record_will_replay (user_visible_resume_ptid (step),
2673 execution_direction))
2674 target_record_stop_replaying ();
2675
08036331 2676 if (!non_stop && inferior_ptid != null_ptid)
6c95b8df 2677 {
08036331 2678 ptid_t resume_ptid = user_visible_resume_ptid (step);
5b6d1e4f
PA
2679 process_stratum_target *resume_target
2680 = user_visible_resume_target (resume_ptid);
70509625
PA
2681
2682 /* In all-stop mode, delete the per-thread status of all threads
2683 we're about to resume, implicitly and explicitly. */
5b6d1e4f 2684 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
08036331 2685 clear_proceed_status_thread (tp);
6c95b8df
PA
2686 }
2687
d7e15655 2688 if (inferior_ptid != null_ptid)
a7212384
UW
2689 {
2690 struct inferior *inferior;
2691
2692 if (non_stop)
2693 {
6c95b8df
PA
2694 /* If in non-stop mode, only delete the per-thread status of
2695 the current thread. */
a7212384
UW
2696 clear_proceed_status_thread (inferior_thread ());
2697 }
6c95b8df 2698
d6b48e9c 2699 inferior = current_inferior ();
16c381f0 2700 inferior->control.stop_soon = NO_STOP_QUIETLY;
4e1c45ea
PA
2701 }
2702
76727919 2703 gdb::observers::about_to_proceed.notify ();
c906108c
SS
2704}
2705
99619bea
PA
2706/* Returns true if TP is still stopped at a breakpoint that needs
2707 stepping-over in order to make progress. If the breakpoint is gone
2708 meanwhile, we can skip the whole step-over dance. */
ea67f13b 2709
c4464ade 2710static bool
6c4cfb24 2711thread_still_needs_step_over_bp (struct thread_info *tp)
99619bea
PA
2712{
2713 if (tp->stepping_over_breakpoint)
2714 {
00431a78 2715 struct regcache *regcache = get_thread_regcache (tp);
99619bea 2716
a01bda52 2717 if (breakpoint_here_p (regcache->aspace (),
af48d08f
PA
2718 regcache_read_pc (regcache))
2719 == ordinary_breakpoint_here)
c4464ade 2720 return true;
99619bea
PA
2721
2722 tp->stepping_over_breakpoint = 0;
2723 }
2724
c4464ade 2725 return false;
99619bea
PA
2726}
2727
6c4cfb24
PA
2728/* Check whether thread TP still needs to start a step-over in order
2729 to make progress when resumed. Returns an bitwise or of enum
2730 step_over_what bits, indicating what needs to be stepped over. */
2731
8d297bbf 2732static step_over_what
6c4cfb24
PA
2733thread_still_needs_step_over (struct thread_info *tp)
2734{
8d297bbf 2735 step_over_what what = 0;
6c4cfb24
PA
2736
2737 if (thread_still_needs_step_over_bp (tp))
2738 what |= STEP_OVER_BREAKPOINT;
2739
2740 if (tp->stepping_over_watchpoint
9aed480c 2741 && !target_have_steppable_watchpoint ())
6c4cfb24
PA
2742 what |= STEP_OVER_WATCHPOINT;
2743
2744 return what;
2745}
2746
483805cf
PA
2747/* Returns true if scheduler locking applies. STEP indicates whether
2748 we're about to do a step/next-like command to a thread. */
2749
c4464ade 2750static bool
856e7dd6 2751schedlock_applies (struct thread_info *tp)
483805cf
PA
2752{
2753 return (scheduler_mode == schedlock_on
2754 || (scheduler_mode == schedlock_step
f2665db5
MM
2755 && tp->control.stepping_command)
2756 || (scheduler_mode == schedlock_replay
2757 && target_record_will_replay (minus_one_ptid,
2758 execution_direction)));
483805cf
PA
2759}
2760
1192f124
SM
2761/* Set process_stratum_target::COMMIT_RESUMED_STATE in all target
2762 stacks that have threads executing and don't have threads with
2763 pending events. */
5b6d1e4f
PA
2764
2765static void
1192f124
SM
2766maybe_set_commit_resumed_all_targets ()
2767{
b4b1a226
SM
2768 scoped_restore_current_thread restore_thread;
2769
1192f124
SM
2770 for (inferior *inf : all_non_exited_inferiors ())
2771 {
2772 process_stratum_target *proc_target = inf->process_target ();
2773
2774 if (proc_target->commit_resumed_state)
2775 {
2776 /* We already set this in a previous iteration, via another
2777 inferior sharing the process_stratum target. */
2778 continue;
2779 }
2780
2781 /* If the target has no resumed threads, it would be useless to
2782 ask it to commit the resumed threads. */
2783 if (!proc_target->threads_executing)
2784 {
2785 infrun_debug_printf ("not requesting commit-resumed for target "
2786 "%s, no resumed threads",
2787 proc_target->shortname ());
2788 continue;
2789 }
2790
2791 /* As an optimization, if a thread from this target has some
2792 status to report, handle it before requiring the target to
2793 commit its resumed threads: handling the status might lead to
2794 resuming more threads. */
273dadf2 2795 if (proc_target->has_resumed_with_pending_wait_status ())
1192f124
SM
2796 {
2797 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
2798 " thread has a pending waitstatus",
2799 proc_target->shortname ());
2800 continue;
2801 }
2802
b4b1a226
SM
2803 switch_to_inferior_no_thread (inf);
2804
2805 if (target_has_pending_events ())
2806 {
2807 infrun_debug_printf ("not requesting commit-resumed for target %s, "
2808 "target has pending events",
2809 proc_target->shortname ());
2810 continue;
2811 }
2812
1192f124
SM
2813 infrun_debug_printf ("enabling commit-resumed for target %s",
2814 proc_target->shortname ());
2815
2816 proc_target->commit_resumed_state = true;
2817 }
2818}
2819
2820/* See infrun.h. */
2821
2822void
2823maybe_call_commit_resumed_all_targets ()
5b6d1e4f
PA
2824{
2825 scoped_restore_current_thread restore_thread;
2826
1192f124
SM
2827 for (inferior *inf : all_non_exited_inferiors ())
2828 {
2829 process_stratum_target *proc_target = inf->process_target ();
2830
2831 if (!proc_target->commit_resumed_state)
2832 continue;
2833
2834 switch_to_inferior_no_thread (inf);
2835
2836 infrun_debug_printf ("calling commit_resumed for target %s",
2837 proc_target->shortname());
2838
2839 target_commit_resumed ();
2840 }
2841}
2842
2843/* To track nesting of scoped_disable_commit_resumed objects, ensuring
2844 that only the outermost one attempts to re-enable
2845 commit-resumed. */
2846static bool enable_commit_resumed = true;
2847
2848/* See infrun.h. */
2849
2850scoped_disable_commit_resumed::scoped_disable_commit_resumed
2851 (const char *reason)
2852 : m_reason (reason),
2853 m_prev_enable_commit_resumed (enable_commit_resumed)
2854{
2855 infrun_debug_printf ("reason=%s", m_reason);
2856
2857 enable_commit_resumed = false;
5b6d1e4f
PA
2858
2859 for (inferior *inf : all_non_exited_inferiors ())
1192f124
SM
2860 {
2861 process_stratum_target *proc_target = inf->process_target ();
5b6d1e4f 2862
1192f124
SM
2863 if (m_prev_enable_commit_resumed)
2864 {
2865 /* This is the outermost instance: force all
2866 COMMIT_RESUMED_STATE to false. */
2867 proc_target->commit_resumed_state = false;
2868 }
2869 else
2870 {
2871 /* This is not the outermost instance, we expect
2872 COMMIT_RESUMED_STATE to have been cleared by the
2873 outermost instance. */
2874 gdb_assert (!proc_target->commit_resumed_state);
2875 }
2876 }
2877}
2878
2879/* See infrun.h. */
2880
2881void
2882scoped_disable_commit_resumed::reset ()
2883{
2884 if (m_reset)
2885 return;
2886 m_reset = true;
2887
2888 infrun_debug_printf ("reason=%s", m_reason);
2889
2890 gdb_assert (!enable_commit_resumed);
2891
2892 enable_commit_resumed = m_prev_enable_commit_resumed;
2893
2894 if (m_prev_enable_commit_resumed)
5b6d1e4f 2895 {
1192f124
SM
2896 /* This is the outermost instance, re-enable
2897 COMMIT_RESUMED_STATE on the targets where it's possible. */
2898 maybe_set_commit_resumed_all_targets ();
2899 }
2900 else
2901 {
2902 /* This is not the outermost instance, we expect
2903 COMMIT_RESUMED_STATE to still be false. */
2904 for (inferior *inf : all_non_exited_inferiors ())
2905 {
2906 process_stratum_target *proc_target = inf->process_target ();
2907 gdb_assert (!proc_target->commit_resumed_state);
2908 }
2909 }
2910}
2911
2912/* See infrun.h. */
2913
2914scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
2915{
2916 reset ();
2917}
2918
2919/* See infrun.h. */
2920
2921void
2922scoped_disable_commit_resumed::reset_and_commit ()
2923{
2924 reset ();
2925 maybe_call_commit_resumed_all_targets ();
2926}
2927
2928/* See infrun.h. */
2929
2930scoped_enable_commit_resumed::scoped_enable_commit_resumed
2931 (const char *reason)
2932 : m_reason (reason),
2933 m_prev_enable_commit_resumed (enable_commit_resumed)
2934{
2935 infrun_debug_printf ("reason=%s", m_reason);
2936
2937 if (!enable_commit_resumed)
2938 {
2939 enable_commit_resumed = true;
2940
2941 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
2942 possible. */
2943 maybe_set_commit_resumed_all_targets ();
2944
2945 maybe_call_commit_resumed_all_targets ();
2946 }
2947}
2948
2949/* See infrun.h. */
2950
2951scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
2952{
2953 infrun_debug_printf ("reason=%s", m_reason);
2954
2955 gdb_assert (enable_commit_resumed);
2956
2957 enable_commit_resumed = m_prev_enable_commit_resumed;
2958
2959 if (!enable_commit_resumed)
2960 {
2961 /* Force all COMMIT_RESUMED_STATE back to false. */
2962 for (inferior *inf : all_non_exited_inferiors ())
2963 {
2964 process_stratum_target *proc_target = inf->process_target ();
2965 proc_target->commit_resumed_state = false;
2966 }
5b6d1e4f
PA
2967 }
2968}
2969
2f4fcf00
PA
2970/* Check that all the targets we're about to resume are in non-stop
2971 mode. Ideally, we'd only care whether all targets support
2972 target-async, but we're not there yet. E.g., stop_all_threads
2973 doesn't know how to handle all-stop targets. Also, the remote
2974 protocol in all-stop mode is synchronous, irrespective of
2975 target-async, which means that things like a breakpoint re-set
2976 triggered by one target would try to read memory from all targets
2977 and fail. */
2978
2979static void
2980check_multi_target_resumption (process_stratum_target *resume_target)
2981{
2982 if (!non_stop && resume_target == nullptr)
2983 {
2984 scoped_restore_current_thread restore_thread;
2985
2986 /* This is used to track whether we're resuming more than one
2987 target. */
2988 process_stratum_target *first_connection = nullptr;
2989
2990 /* The first inferior we see with a target that does not work in
2991 always-non-stop mode. */
2992 inferior *first_not_non_stop = nullptr;
2993
f058c521 2994 for (inferior *inf : all_non_exited_inferiors ())
2f4fcf00
PA
2995 {
2996 switch_to_inferior_no_thread (inf);
2997
55f6301a 2998 if (!target_has_execution ())
2f4fcf00
PA
2999 continue;
3000
3001 process_stratum_target *proc_target
3002 = current_inferior ()->process_target();
3003
3004 if (!target_is_non_stop_p ())
3005 first_not_non_stop = inf;
3006
3007 if (first_connection == nullptr)
3008 first_connection = proc_target;
3009 else if (first_connection != proc_target
3010 && first_not_non_stop != nullptr)
3011 {
3012 switch_to_inferior_no_thread (first_not_non_stop);
3013
3014 proc_target = current_inferior ()->process_target();
3015
3016 error (_("Connection %d (%s) does not support "
3017 "multi-target resumption."),
3018 proc_target->connection_number,
3019 make_target_connection_string (proc_target).c_str ());
3020 }
3021 }
3022 }
3023}
3024
c906108c
SS
3025/* Basic routine for continuing the program in various fashions.
3026
3027 ADDR is the address to resume at, or -1 for resume where stopped.
aff4e175
AB
3028 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3029 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
c906108c
SS
3030
3031 You should call clear_proceed_status before calling proceed. */
3032
3033void
64ce06e4 3034proceed (CORE_ADDR addr, enum gdb_signal siggnal)
c906108c 3035{
3ec3145c
SM
3036 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
3037
e58b0e63
PA
3038 struct regcache *regcache;
3039 struct gdbarch *gdbarch;
e58b0e63 3040 CORE_ADDR pc;
4d9d9d04
PA
3041 struct execution_control_state ecss;
3042 struct execution_control_state *ecs = &ecss;
c4464ade 3043 bool started;
c906108c 3044
e58b0e63
PA
3045 /* If we're stopped at a fork/vfork, follow the branch set by the
3046 "set follow-fork-mode" command; otherwise, we'll just proceed
3047 resuming the current thread. */
3048 if (!follow_fork ())
3049 {
3050 /* The target for some reason decided not to resume. */
3051 normal_stop ();
f148b27e 3052 if (target_can_async_p ())
b1a35af2 3053 inferior_event_handler (INF_EXEC_COMPLETE);
e58b0e63
PA
3054 return;
3055 }
3056
842951eb
PA
3057 /* We'll update this if & when we switch to a new thread. */
3058 previous_inferior_ptid = inferior_ptid;
3059
e58b0e63 3060 regcache = get_current_regcache ();
ac7936df 3061 gdbarch = regcache->arch ();
8b86c959
YQ
3062 const address_space *aspace = regcache->aspace ();
3063
fc75c28b
TBA
3064 pc = regcache_read_pc_protected (regcache);
3065
08036331 3066 thread_info *cur_thr = inferior_thread ();
e58b0e63 3067
99619bea 3068 /* Fill in with reasonable starting values. */
08036331 3069 init_thread_stepping_state (cur_thr);
99619bea 3070
08036331 3071 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
c2829269 3072
5b6d1e4f
PA
3073 ptid_t resume_ptid
3074 = user_visible_resume_ptid (cur_thr->control.stepping_command);
3075 process_stratum_target *resume_target
3076 = user_visible_resume_target (resume_ptid);
3077
2f4fcf00
PA
3078 check_multi_target_resumption (resume_target);
3079
2acceee2 3080 if (addr == (CORE_ADDR) -1)
c906108c 3081 {
1edb66d8 3082 if (pc == cur_thr->stop_pc ()
af48d08f 3083 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
b2175913 3084 && execution_direction != EXEC_REVERSE)
3352ef37
AC
3085 /* There is a breakpoint at the address we will resume at,
3086 step one instruction before inserting breakpoints so that
3087 we do not stop right away (and report a second hit at this
b2175913
MS
3088 breakpoint).
3089
3090 Note, we don't do this in reverse, because we won't
3091 actually be executing the breakpoint insn anyway.
3092 We'll be (un-)executing the previous instruction. */
08036331 3093 cur_thr->stepping_over_breakpoint = 1;
515630c5
UW
3094 else if (gdbarch_single_step_through_delay_p (gdbarch)
3095 && gdbarch_single_step_through_delay (gdbarch,
3096 get_current_frame ()))
3352ef37
AC
3097 /* We stepped onto an instruction that needs to be stepped
3098 again before re-inserting the breakpoint, do so. */
08036331 3099 cur_thr->stepping_over_breakpoint = 1;
c906108c
SS
3100 }
3101 else
3102 {
515630c5 3103 regcache_write_pc (regcache, addr);
c906108c
SS
3104 }
3105
70509625 3106 if (siggnal != GDB_SIGNAL_DEFAULT)
1edb66d8 3107 cur_thr->set_stop_signal (siggnal);
70509625 3108
4d9d9d04
PA
3109 /* If an exception is thrown from this point on, make sure to
3110 propagate GDB's knowledge of the executing state to the
3111 frontend/user running state. */
5b6d1e4f 3112 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
4d9d9d04
PA
3113
3114 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3115 threads (e.g., we might need to set threads stepping over
3116 breakpoints first), from the user/frontend's point of view, all
3117 threads in RESUME_PTID are now running. Unless we're calling an
3118 inferior function, as in that case we pretend the inferior
3119 doesn't run at all. */
08036331 3120 if (!cur_thr->control.in_infcall)
719546c4 3121 set_running (resume_target, resume_ptid, true);
17b2616c 3122
1eb8556f
SM
3123 infrun_debug_printf ("addr=%s, signal=%s", paddress (gdbarch, addr),
3124 gdb_signal_to_symbol_string (siggnal));
527159b7 3125
4d9d9d04
PA
3126 annotate_starting ();
3127
3128 /* Make sure that output from GDB appears before output from the
3129 inferior. */
3130 gdb_flush (gdb_stdout);
3131
d930703d
PA
3132 /* Since we've marked the inferior running, give it the terminal. A
3133 QUIT/Ctrl-C from here on is forwarded to the target (which can
3134 still detect attempts to unblock a stuck connection with repeated
3135 Ctrl-C from within target_pass_ctrlc). */
3136 target_terminal::inferior ();
3137
4d9d9d04
PA
3138 /* In a multi-threaded task we may select another thread and
3139 then continue or step.
3140
3141 But if a thread that we're resuming had stopped at a breakpoint,
3142 it will immediately cause another breakpoint stop without any
3143 execution (i.e. it will report a breakpoint hit incorrectly). So
3144 we must step over it first.
3145
3146 Look for threads other than the current (TP) that reported a
3147 breakpoint hit and haven't been resumed yet since. */
3148
3149 /* If scheduler locking applies, we can avoid iterating over all
3150 threads. */
08036331 3151 if (!non_stop && !schedlock_applies (cur_thr))
94cc34af 3152 {
5b6d1e4f
PA
3153 for (thread_info *tp : all_non_exited_threads (resume_target,
3154 resume_ptid))
08036331 3155 {
f3f8ece4
PA
3156 switch_to_thread_no_regs (tp);
3157
4d9d9d04
PA
3158 /* Ignore the current thread here. It's handled
3159 afterwards. */
08036331 3160 if (tp == cur_thr)
4d9d9d04 3161 continue;
c906108c 3162
4d9d9d04
PA
3163 if (!thread_still_needs_step_over (tp))
3164 continue;
3165
3166 gdb_assert (!thread_is_in_step_over_chain (tp));
c906108c 3167
1eb8556f
SM
3168 infrun_debug_printf ("need to step-over [%s] first",
3169 target_pid_to_str (tp->ptid).c_str ());
99619bea 3170
28d5518b 3171 global_thread_step_over_chain_enqueue (tp);
2adfaa28 3172 }
f3f8ece4
PA
3173
3174 switch_to_thread (cur_thr);
30852783
UW
3175 }
3176
4d9d9d04
PA
3177 /* Enqueue the current thread last, so that we move all other
3178 threads over their breakpoints first. */
08036331 3179 if (cur_thr->stepping_over_breakpoint)
28d5518b 3180 global_thread_step_over_chain_enqueue (cur_thr);
30852783 3181
4d9d9d04
PA
3182 /* If the thread isn't started, we'll still need to set its prev_pc,
3183 so that switch_back_to_stepped_thread knows the thread hasn't
3184 advanced. Must do this before resuming any thread, as in
3185 all-stop/remote, once we resume we can't send any other packet
3186 until the target stops again. */
fc75c28b 3187 cur_thr->prev_pc = regcache_read_pc_protected (regcache);
99619bea 3188
a9bc57b9 3189 {
1192f124 3190 scoped_disable_commit_resumed disable_commit_resumed ("proceeding");
85ad3aaf 3191
a9bc57b9 3192 started = start_step_over ();
c906108c 3193
a9bc57b9
TT
3194 if (step_over_info_valid_p ())
3195 {
3196 /* Either this thread started a new in-line step over, or some
3197 other thread was already doing one. In either case, don't
3198 resume anything else until the step-over is finished. */
3199 }
3200 else if (started && !target_is_non_stop_p ())
3201 {
3202 /* A new displaced stepping sequence was started. In all-stop,
3203 we can't talk to the target anymore until it next stops. */
3204 }
3205 else if (!non_stop && target_is_non_stop_p ())
3206 {
3ec3145c
SM
3207 INFRUN_SCOPED_DEBUG_START_END
3208 ("resuming threads, all-stop-on-top-of-non-stop");
3209
a9bc57b9
TT
3210 /* In all-stop, but the target is always in non-stop mode.
3211 Start all other threads that are implicitly resumed too. */
5b6d1e4f
PA
3212 for (thread_info *tp : all_non_exited_threads (resume_target,
3213 resume_ptid))
3214 {
3215 switch_to_thread_no_regs (tp);
3216
f9fac3c8
SM
3217 if (!tp->inf->has_execution ())
3218 {
1eb8556f
SM
3219 infrun_debug_printf ("[%s] target has no execution",
3220 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3221 continue;
3222 }
f3f8ece4 3223
7846f3aa 3224 if (tp->resumed ())
f9fac3c8 3225 {
1eb8556f
SM
3226 infrun_debug_printf ("[%s] resumed",
3227 target_pid_to_str (tp->ptid).c_str ());
1edb66d8 3228 gdb_assert (tp->executing || tp->has_pending_waitstatus ());
f9fac3c8
SM
3229 continue;
3230 }
fbea99ea 3231
f9fac3c8
SM
3232 if (thread_is_in_step_over_chain (tp))
3233 {
1eb8556f
SM
3234 infrun_debug_printf ("[%s] needs step-over",
3235 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3236 continue;
3237 }
fbea99ea 3238
1eb8556f 3239 infrun_debug_printf ("resuming %s",
dda83cd7 3240 target_pid_to_str (tp->ptid).c_str ());
fbea99ea 3241
f9fac3c8
SM
3242 reset_ecs (ecs, tp);
3243 switch_to_thread (tp);
3244 keep_going_pass_signal (ecs);
3245 if (!ecs->wait_some_more)
3246 error (_("Command aborted."));
3247 }
a9bc57b9 3248 }
7846f3aa 3249 else if (!cur_thr->resumed () && !thread_is_in_step_over_chain (cur_thr))
a9bc57b9
TT
3250 {
3251 /* The thread wasn't started, and isn't queued, run it now. */
08036331
PA
3252 reset_ecs (ecs, cur_thr);
3253 switch_to_thread (cur_thr);
a9bc57b9
TT
3254 keep_going_pass_signal (ecs);
3255 if (!ecs->wait_some_more)
3256 error (_("Command aborted."));
3257 }
c906108c 3258
1192f124
SM
3259 disable_commit_resumed.reset_and_commit ();
3260 }
85ad3aaf 3261
731f534f 3262 finish_state.release ();
c906108c 3263
873657b9
PA
3264 /* If we've switched threads above, switch back to the previously
3265 current thread. We don't want the user to see a different
3266 selected thread. */
3267 switch_to_thread (cur_thr);
3268
0b333c5e
PA
3269 /* Tell the event loop to wait for it to stop. If the target
3270 supports asynchronous execution, it'll do this from within
3271 target_resume. */
362646f5 3272 if (!target_can_async_p ())
0b333c5e 3273 mark_async_event_handler (infrun_async_inferior_event_token);
c906108c 3274}
c906108c
SS
3275\f
3276
3277/* Start remote-debugging of a machine over a serial link. */
96baa820 3278
c906108c 3279void
8621d6a9 3280start_remote (int from_tty)
c906108c 3281{
5b6d1e4f
PA
3282 inferior *inf = current_inferior ();
3283 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
43ff13b4 3284
1777feb0 3285 /* Always go on waiting for the target, regardless of the mode. */
6426a772 3286 /* FIXME: cagney/1999-09-23: At present it isn't possible to
7e73cedf 3287 indicate to wait_for_inferior that a target should timeout if
6426a772
JM
3288 nothing is returned (instead of just blocking). Because of this,
3289 targets expecting an immediate response need to, internally, set
3290 things up so that the target_wait() is forced to eventually
1777feb0 3291 timeout. */
6426a772
JM
3292 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3293 differentiate to its caller what the state of the target is after
3294 the initial open has been performed. Here we're assuming that
3295 the target has stopped. It should be possible to eventually have
3296 target_open() return to the caller an indication that the target
3297 is currently running and GDB state should be set to the same as
1777feb0 3298 for an async run. */
5b6d1e4f 3299 wait_for_inferior (inf);
8621d6a9
DJ
3300
3301 /* Now that the inferior has stopped, do any bookkeeping like
3302 loading shared libraries. We want to do this before normal_stop,
3303 so that the displayed frame is up to date. */
a7aba266 3304 post_create_inferior (from_tty);
8621d6a9 3305
6426a772 3306 normal_stop ();
c906108c
SS
3307}
3308
3309/* Initialize static vars when a new inferior begins. */
3310
3311void
96baa820 3312init_wait_for_inferior (void)
c906108c
SS
3313{
3314 /* These are meaningless until the first time through wait_for_inferior. */
c906108c 3315
c906108c
SS
3316 breakpoint_init_inferior (inf_starting);
3317
70509625 3318 clear_proceed_status (0);
9f976b41 3319
ab1ddbcf 3320 nullify_last_target_wait_ptid ();
237fc4c9 3321
842951eb 3322 previous_inferior_ptid = inferior_ptid;
c906108c 3323}
237fc4c9 3324
c906108c 3325\f
488f131b 3326
ec9499be 3327static void handle_inferior_event (struct execution_control_state *ecs);
cd0fc7c3 3328
568d6575
UW
3329static void handle_step_into_function (struct gdbarch *gdbarch,
3330 struct execution_control_state *ecs);
3331static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3332 struct execution_control_state *ecs);
4f5d7f63 3333static void handle_signal_stop (struct execution_control_state *ecs);
186c406b 3334static void check_exception_resume (struct execution_control_state *,
28106bc2 3335 struct frame_info *);
611c83ae 3336
bdc36728 3337static void end_stepping_range (struct execution_control_state *ecs);
22bcd14b 3338static void stop_waiting (struct execution_control_state *ecs);
d4f3574e 3339static void keep_going (struct execution_control_state *ecs);
94c57d6a 3340static void process_event_stop_test (struct execution_control_state *ecs);
c4464ade 3341static bool switch_back_to_stepped_thread (struct execution_control_state *ecs);
104c1213 3342
252fbfc8
PA
3343/* This function is attached as a "thread_stop_requested" observer.
3344 Cleanup local state that assumed the PTID was to be resumed, and
3345 report the stop to the frontend. */
3346
2c0b251b 3347static void
252fbfc8
PA
3348infrun_thread_stop_requested (ptid_t ptid)
3349{
5b6d1e4f
PA
3350 process_stratum_target *curr_target = current_inferior ()->process_target ();
3351
c65d6b55
PA
3352 /* PTID was requested to stop. If the thread was already stopped,
3353 but the user/frontend doesn't know about that yet (e.g., the
3354 thread had been temporarily paused for some step-over), set up
3355 for reporting the stop now. */
5b6d1e4f 3356 for (thread_info *tp : all_threads (curr_target, ptid))
08036331
PA
3357 {
3358 if (tp->state != THREAD_RUNNING)
3359 continue;
3360 if (tp->executing)
3361 continue;
c65d6b55 3362
08036331
PA
3363 /* Remove matching threads from the step-over queue, so
3364 start_step_over doesn't try to resume them
3365 automatically. */
3366 if (thread_is_in_step_over_chain (tp))
28d5518b 3367 global_thread_step_over_chain_remove (tp);
c65d6b55 3368
08036331
PA
3369 /* If the thread is stopped, but the user/frontend doesn't
3370 know about that yet, queue a pending event, as if the
3371 thread had just stopped now. Unless the thread already had
3372 a pending event. */
1edb66d8 3373 if (!tp->has_pending_waitstatus ())
08036331 3374 {
1edb66d8
SM
3375 target_waitstatus ws;
3376 ws.kind = TARGET_WAITKIND_STOPPED;
3377 ws.value.sig = GDB_SIGNAL_0;
3378 tp->set_pending_waitstatus (ws);
08036331 3379 }
c65d6b55 3380
08036331
PA
3381 /* Clear the inline-frame state, since we're re-processing the
3382 stop. */
5b6d1e4f 3383 clear_inline_frame_state (tp);
c65d6b55 3384
08036331
PA
3385 /* If this thread was paused because some other thread was
3386 doing an inline-step over, let that finish first. Once
3387 that happens, we'll restart all threads and consume pending
3388 stop events then. */
3389 if (step_over_info_valid_p ())
3390 continue;
3391
3392 /* Otherwise we can process the (new) pending event now. Set
3393 it so this pending event is considered by
3394 do_target_wait. */
7846f3aa 3395 tp->set_resumed (true);
08036331 3396 }
252fbfc8
PA
3397}
3398
a07daef3
PA
3399static void
3400infrun_thread_thread_exit (struct thread_info *tp, int silent)
3401{
5b6d1e4f
PA
3402 if (target_last_proc_target == tp->inf->process_target ()
3403 && target_last_wait_ptid == tp->ptid)
a07daef3
PA
3404 nullify_last_target_wait_ptid ();
3405}
3406
0cbcdb96
PA
3407/* Delete the step resume, single-step and longjmp/exception resume
3408 breakpoints of TP. */
4e1c45ea 3409
0cbcdb96
PA
3410static void
3411delete_thread_infrun_breakpoints (struct thread_info *tp)
4e1c45ea 3412{
0cbcdb96
PA
3413 delete_step_resume_breakpoint (tp);
3414 delete_exception_resume_breakpoint (tp);
34b7e8a6 3415 delete_single_step_breakpoints (tp);
4e1c45ea
PA
3416}
3417
0cbcdb96
PA
3418/* If the target still has execution, call FUNC for each thread that
3419 just stopped. In all-stop, that's all the non-exited threads; in
3420 non-stop, that's the current thread, only. */
3421
3422typedef void (*for_each_just_stopped_thread_callback_func)
3423 (struct thread_info *tp);
4e1c45ea
PA
3424
3425static void
0cbcdb96 3426for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
4e1c45ea 3427{
55f6301a 3428 if (!target_has_execution () || inferior_ptid == null_ptid)
4e1c45ea
PA
3429 return;
3430
fbea99ea 3431 if (target_is_non_stop_p ())
4e1c45ea 3432 {
0cbcdb96
PA
3433 /* If in non-stop mode, only the current thread stopped. */
3434 func (inferior_thread ());
4e1c45ea
PA
3435 }
3436 else
0cbcdb96 3437 {
0cbcdb96 3438 /* In all-stop mode, all threads have stopped. */
08036331
PA
3439 for (thread_info *tp : all_non_exited_threads ())
3440 func (tp);
0cbcdb96
PA
3441 }
3442}
3443
3444/* Delete the step resume and longjmp/exception resume breakpoints of
3445 the threads that just stopped. */
3446
3447static void
3448delete_just_stopped_threads_infrun_breakpoints (void)
3449{
3450 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
34b7e8a6
PA
3451}
3452
3453/* Delete the single-step breakpoints of the threads that just
3454 stopped. */
7c16b83e 3455
34b7e8a6
PA
3456static void
3457delete_just_stopped_threads_single_step_breakpoints (void)
3458{
3459 for_each_just_stopped_thread (delete_single_step_breakpoints);
4e1c45ea
PA
3460}
3461
221e1a37 3462/* See infrun.h. */
223698f8 3463
221e1a37 3464void
223698f8
DE
3465print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3466 const struct target_waitstatus *ws)
3467{
e71daf80
SM
3468 infrun_debug_printf ("target_wait (%d.%ld.%ld [%s], status) =",
3469 waiton_ptid.pid (),
3470 waiton_ptid.lwp (),
3471 waiton_ptid.tid (),
3472 target_pid_to_str (waiton_ptid).c_str ());
3473 infrun_debug_printf (" %d.%ld.%ld [%s],",
3474 result_ptid.pid (),
3475 result_ptid.lwp (),
3476 result_ptid.tid (),
3477 target_pid_to_str (result_ptid).c_str ());
3478 infrun_debug_printf (" %s", target_waitstatus_to_string (ws).c_str ());
223698f8
DE
3479}
3480
372316f1
PA
3481/* Select a thread at random, out of those which are resumed and have
3482 had events. */
3483
3484static struct thread_info *
5b6d1e4f 3485random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
372316f1 3486{
71a23490
SM
3487 process_stratum_target *proc_target = inf->process_target ();
3488 thread_info *thread
3489 = proc_target->random_resumed_with_pending_wait_status (inf, waiton_ptid);
08036331 3490
71a23490 3491 if (thread == nullptr)
08036331 3492 {
71a23490
SM
3493 infrun_debug_printf ("None found.");
3494 return nullptr;
3495 }
372316f1 3496
71a23490
SM
3497 infrun_debug_printf ("Found %s.", target_pid_to_str (thread->ptid).c_str ());
3498 gdb_assert (thread->resumed ());
3499 gdb_assert (thread->has_pending_waitstatus ());
372316f1 3500
71a23490 3501 return thread;
372316f1
PA
3502}
3503
3504/* Wrapper for target_wait that first checks whether threads have
3505 pending statuses to report before actually asking the target for
5b6d1e4f
PA
3506 more events. INF is the inferior we're using to call target_wait
3507 on. */
372316f1
PA
3508
3509static ptid_t
5b6d1e4f 3510do_target_wait_1 (inferior *inf, ptid_t ptid,
b60cea74 3511 target_waitstatus *status, target_wait_flags options)
372316f1
PA
3512{
3513 ptid_t event_ptid;
3514 struct thread_info *tp;
3515
24ed6739
AB
3516 /* We know that we are looking for an event in the target of inferior
3517 INF, but we don't know which thread the event might come from. As
3518 such we want to make sure that INFERIOR_PTID is reset so that none of
3519 the wait code relies on it - doing so is always a mistake. */
3520 switch_to_inferior_no_thread (inf);
3521
372316f1
PA
3522 /* First check if there is a resumed thread with a wait status
3523 pending. */
d7e15655 3524 if (ptid == minus_one_ptid || ptid.is_pid ())
372316f1 3525 {
5b6d1e4f 3526 tp = random_pending_event_thread (inf, ptid);
372316f1
PA
3527 }
3528 else
3529 {
1eb8556f
SM
3530 infrun_debug_printf ("Waiting for specific thread %s.",
3531 target_pid_to_str (ptid).c_str ());
372316f1
PA
3532
3533 /* We have a specific thread to check. */
5b6d1e4f 3534 tp = find_thread_ptid (inf, ptid);
372316f1 3535 gdb_assert (tp != NULL);
1edb66d8 3536 if (!tp->has_pending_waitstatus ())
372316f1
PA
3537 tp = NULL;
3538 }
3539
3540 if (tp != NULL
1edb66d8
SM
3541 && (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
3542 || tp->stop_reason () == TARGET_STOPPED_BY_HW_BREAKPOINT))
372316f1 3543 {
00431a78 3544 struct regcache *regcache = get_thread_regcache (tp);
ac7936df 3545 struct gdbarch *gdbarch = regcache->arch ();
372316f1
PA
3546 CORE_ADDR pc;
3547 int discard = 0;
3548
3549 pc = regcache_read_pc (regcache);
3550
1edb66d8 3551 if (pc != tp->stop_pc ())
372316f1 3552 {
1eb8556f
SM
3553 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
3554 target_pid_to_str (tp->ptid).c_str (),
1edb66d8 3555 paddress (gdbarch, tp->stop_pc ()),
1eb8556f 3556 paddress (gdbarch, pc));
372316f1
PA
3557 discard = 1;
3558 }
a01bda52 3559 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
372316f1 3560 {
1eb8556f
SM
3561 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
3562 target_pid_to_str (tp->ptid).c_str (),
3563 paddress (gdbarch, pc));
372316f1
PA
3564
3565 discard = 1;
3566 }
3567
3568 if (discard)
3569 {
1eb8556f
SM
3570 infrun_debug_printf ("pending event of %s cancelled.",
3571 target_pid_to_str (tp->ptid).c_str ());
372316f1 3572
1edb66d8
SM
3573 tp->clear_pending_waitstatus ();
3574 target_waitstatus ws;
3575 ws.kind = TARGET_WAITKIND_SPURIOUS;
3576 tp->set_pending_waitstatus (ws);
3577 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
372316f1
PA
3578 }
3579 }
3580
3581 if (tp != NULL)
3582 {
1eb8556f
SM
3583 infrun_debug_printf ("Using pending wait status %s for %s.",
3584 target_waitstatus_to_string
1edb66d8 3585 (&tp->pending_waitstatus ()).c_str (),
1eb8556f 3586 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3587
3588 /* Now that we've selected our final event LWP, un-adjust its PC
3589 if it was a software breakpoint (and the target doesn't
3590 always adjust the PC itself). */
1edb66d8 3591 if (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
372316f1
PA
3592 && !target_supports_stopped_by_sw_breakpoint ())
3593 {
3594 struct regcache *regcache;
3595 struct gdbarch *gdbarch;
3596 int decr_pc;
3597
00431a78 3598 regcache = get_thread_regcache (tp);
ac7936df 3599 gdbarch = regcache->arch ();
372316f1
PA
3600
3601 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3602 if (decr_pc != 0)
3603 {
3604 CORE_ADDR pc;
3605
3606 pc = regcache_read_pc (regcache);
3607 regcache_write_pc (regcache, pc + decr_pc);
3608 }
3609 }
3610
1edb66d8
SM
3611 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
3612 *status = tp->pending_waitstatus ();
3613 tp->clear_pending_waitstatus ();
372316f1
PA
3614
3615 /* Wake up the event loop again, until all pending events are
3616 processed. */
3617 if (target_is_async_p ())
3618 mark_async_event_handler (infrun_async_inferior_event_token);
3619 return tp->ptid;
3620 }
3621
3622 /* But if we don't find one, we'll have to wait. */
3623
d3a07122
SM
3624 /* We can't ask a non-async target to do a non-blocking wait, so this will be
3625 a blocking wait. */
3626 if (!target_can_async_p ())
3627 options &= ~TARGET_WNOHANG;
3628
372316f1
PA
3629 if (deprecated_target_wait_hook)
3630 event_ptid = deprecated_target_wait_hook (ptid, status, options);
3631 else
3632 event_ptid = target_wait (ptid, status, options);
3633
3634 return event_ptid;
3635}
3636
5b6d1e4f
PA
3637/* Wrapper for target_wait that first checks whether threads have
3638 pending statuses to report before actually asking the target for
b3e3a4c1 3639 more events. Polls for events from all inferiors/targets. */
5b6d1e4f
PA
3640
3641static bool
ac0d67ed 3642do_target_wait (execution_control_state *ecs, target_wait_flags options)
5b6d1e4f
PA
3643{
3644 int num_inferiors = 0;
3645 int random_selector;
3646
b3e3a4c1
SM
3647 /* For fairness, we pick the first inferior/target to poll at random
3648 out of all inferiors that may report events, and then continue
3649 polling the rest of the inferior list starting from that one in a
3650 circular fashion until the whole list is polled once. */
5b6d1e4f 3651
ac0d67ed 3652 auto inferior_matches = [] (inferior *inf)
5b6d1e4f 3653 {
ac0d67ed 3654 return inf->process_target () != nullptr;
5b6d1e4f
PA
3655 };
3656
b3e3a4c1 3657 /* First see how many matching inferiors we have. */
5b6d1e4f
PA
3658 for (inferior *inf : all_inferiors ())
3659 if (inferior_matches (inf))
3660 num_inferiors++;
3661
3662 if (num_inferiors == 0)
3663 {
3664 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3665 return false;
3666 }
3667
b3e3a4c1 3668 /* Now randomly pick an inferior out of those that matched. */
5b6d1e4f
PA
3669 random_selector = (int)
3670 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
3671
1eb8556f
SM
3672 if (num_inferiors > 1)
3673 infrun_debug_printf ("Found %d inferiors, starting at #%d",
3674 num_inferiors, random_selector);
5b6d1e4f 3675
b3e3a4c1 3676 /* Select the Nth inferior that matched. */
5b6d1e4f
PA
3677
3678 inferior *selected = nullptr;
3679
3680 for (inferior *inf : all_inferiors ())
3681 if (inferior_matches (inf))
3682 if (random_selector-- == 0)
3683 {
3684 selected = inf;
3685 break;
3686 }
3687
b3e3a4c1 3688 /* Now poll for events out of each of the matching inferior's
5b6d1e4f
PA
3689 targets, starting from the selected one. */
3690
3691 auto do_wait = [&] (inferior *inf)
3692 {
ac0d67ed 3693 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, options);
5b6d1e4f
PA
3694 ecs->target = inf->process_target ();
3695 return (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
3696 };
3697
b3e3a4c1
SM
3698 /* Needed in 'all-stop + target-non-stop' mode, because we end up
3699 here spuriously after the target is all stopped and we've already
5b6d1e4f
PA
3700 reported the stop to the user, polling for events. */
3701 scoped_restore_current_thread restore_thread;
3702
08bdefb5
PA
3703 intrusive_list_iterator<inferior> start
3704 = inferior_list.iterator_to (*selected);
3705
3706 for (intrusive_list_iterator<inferior> it = start;
3707 it != inferior_list.end ();
3708 ++it)
3709 {
3710 inferior *inf = &*it;
3711
3712 if (inferior_matches (inf) && do_wait (inf))
5b6d1e4f 3713 return true;
08bdefb5 3714 }
5b6d1e4f 3715
08bdefb5
PA
3716 for (intrusive_list_iterator<inferior> it = inferior_list.begin ();
3717 it != start;
3718 ++it)
3719 {
3720 inferior *inf = &*it;
3721
3722 if (inferior_matches (inf) && do_wait (inf))
5b6d1e4f 3723 return true;
08bdefb5 3724 }
5b6d1e4f
PA
3725
3726 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3727 return false;
3728}
3729
8ff53139
PA
3730/* An event reported by wait_one. */
3731
3732struct wait_one_event
3733{
3734 /* The target the event came out of. */
3735 process_stratum_target *target;
3736
3737 /* The PTID the event was for. */
3738 ptid_t ptid;
3739
3740 /* The waitstatus. */
3741 target_waitstatus ws;
3742};
3743
3744static bool handle_one (const wait_one_event &event);
ac7d717c 3745static void restart_threads (struct thread_info *event_thread);
8ff53139 3746
24291992
PA
3747/* Prepare and stabilize the inferior for detaching it. E.g.,
3748 detaching while a thread is displaced stepping is a recipe for
3749 crashing it, as nothing would readjust the PC out of the scratch
3750 pad. */
3751
3752void
3753prepare_for_detach (void)
3754{
3755 struct inferior *inf = current_inferior ();
f2907e49 3756 ptid_t pid_ptid = ptid_t (inf->pid);
8ff53139 3757 scoped_restore_current_thread restore_thread;
24291992 3758
9bcb1f16 3759 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
24291992 3760
8ff53139
PA
3761 /* Remove all threads of INF from the global step-over chain. We
3762 want to stop any ongoing step-over, not start any new one. */
8b6a69b2
SM
3763 thread_step_over_list_safe_range range
3764 = make_thread_step_over_list_safe_range (global_thread_step_over_list);
3765
3766 for (thread_info *tp : range)
3767 if (tp->inf == inf)
3768 {
3769 infrun_debug_printf ("removing thread %s from global step over chain",
3770 target_pid_to_str (tp->ptid).c_str ());
8ff53139 3771 global_thread_step_over_chain_remove (tp);
8b6a69b2 3772 }
24291992 3773
ac7d717c
PA
3774 /* If we were already in the middle of an inline step-over, and the
3775 thread stepping belongs to the inferior we're detaching, we need
3776 to restart the threads of other inferiors. */
3777 if (step_over_info.thread != -1)
3778 {
3779 infrun_debug_printf ("inline step-over in-process while detaching");
3780
3781 thread_info *thr = find_thread_global_id (step_over_info.thread);
3782 if (thr->inf == inf)
3783 {
3784 /* Since we removed threads of INF from the step-over chain,
3785 we know this won't start a step-over for INF. */
3786 clear_step_over_info ();
3787
3788 if (target_is_non_stop_p ())
3789 {
3790 /* Start a new step-over in another thread if there's
3791 one that needs it. */
3792 start_step_over ();
3793
3794 /* Restart all other threads (except the
3795 previously-stepping thread, since that one is still
3796 running). */
3797 if (!step_over_info_valid_p ())
3798 restart_threads (thr);
3799 }
3800 }
3801 }
3802
8ff53139
PA
3803 if (displaced_step_in_progress (inf))
3804 {
3805 infrun_debug_printf ("displaced-stepping in-process while detaching");
24291992 3806
8ff53139 3807 /* Stop threads currently displaced stepping, aborting it. */
24291992 3808
8ff53139
PA
3809 for (thread_info *thr : inf->non_exited_threads ())
3810 {
3811 if (thr->displaced_step_state.in_progress ())
3812 {
3813 if (thr->executing)
3814 {
3815 if (!thr->stop_requested)
3816 {
3817 target_stop (thr->ptid);
3818 thr->stop_requested = true;
3819 }
3820 }
3821 else
7846f3aa 3822 thr->set_resumed (false);
8ff53139
PA
3823 }
3824 }
24291992 3825
8ff53139
PA
3826 while (displaced_step_in_progress (inf))
3827 {
3828 wait_one_event event;
24291992 3829
8ff53139
PA
3830 event.target = inf->process_target ();
3831 event.ptid = do_target_wait_1 (inf, pid_ptid, &event.ws, 0);
24291992 3832
8ff53139
PA
3833 if (debug_infrun)
3834 print_target_wait_results (pid_ptid, event.ptid, &event.ws);
24291992 3835
8ff53139
PA
3836 handle_one (event);
3837 }
24291992 3838
8ff53139
PA
3839 /* It's OK to leave some of the threads of INF stopped, since
3840 they'll be detached shortly. */
24291992 3841 }
24291992
PA
3842}
3843
cd0fc7c3 3844/* Wait for control to return from inferior to debugger.
ae123ec6 3845
cd0fc7c3
SS
3846 If inferior gets a signal, we may decide to start it up again
3847 instead of returning. That is why there is a loop in this function.
3848 When this function actually returns it means the inferior
3849 should be left stopped and GDB should read more commands. */
3850
5b6d1e4f
PA
3851static void
3852wait_for_inferior (inferior *inf)
cd0fc7c3 3853{
1eb8556f 3854 infrun_debug_printf ("wait_for_inferior ()");
527159b7 3855
4c41382a 3856 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
cd0fc7c3 3857
e6f5c25b
PA
3858 /* If an error happens while handling the event, propagate GDB's
3859 knowledge of the executing state to the frontend/user running
3860 state. */
5b6d1e4f
PA
3861 scoped_finish_thread_state finish_state
3862 (inf->process_target (), minus_one_ptid);
e6f5c25b 3863
c906108c
SS
3864 while (1)
3865 {
ae25568b
PA
3866 struct execution_control_state ecss;
3867 struct execution_control_state *ecs = &ecss;
29f49a6a 3868
ae25568b
PA
3869 memset (ecs, 0, sizeof (*ecs));
3870
ec9499be 3871 overlay_cache_invalid = 1;
ec9499be 3872
f15cb84a
YQ
3873 /* Flush target cache before starting to handle each event.
3874 Target was running and cache could be stale. This is just a
3875 heuristic. Running threads may modify target memory, but we
3876 don't get any event. */
3877 target_dcache_invalidate ();
3878
5b6d1e4f
PA
3879 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, 0);
3880 ecs->target = inf->process_target ();
c906108c 3881
f00150c9 3882 if (debug_infrun)
5b6d1e4f 3883 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
f00150c9 3884
cd0fc7c3
SS
3885 /* Now figure out what to do with the result of the result. */
3886 handle_inferior_event (ecs);
c906108c 3887
cd0fc7c3
SS
3888 if (!ecs->wait_some_more)
3889 break;
3890 }
4e1c45ea 3891
e6f5c25b 3892 /* No error, don't finish the state yet. */
731f534f 3893 finish_state.release ();
cd0fc7c3 3894}
c906108c 3895
d3d4baed
PA
3896/* Cleanup that reinstalls the readline callback handler, if the
3897 target is running in the background. If while handling the target
3898 event something triggered a secondary prompt, like e.g., a
3899 pagination prompt, we'll have removed the callback handler (see
3900 gdb_readline_wrapper_line). Need to do this as we go back to the
3901 event loop, ready to process further input. Note this has no
3902 effect if the handler hasn't actually been removed, because calling
3903 rl_callback_handler_install resets the line buffer, thus losing
3904 input. */
3905
3906static void
d238133d 3907reinstall_readline_callback_handler_cleanup ()
d3d4baed 3908{
3b12939d
PA
3909 struct ui *ui = current_ui;
3910
3911 if (!ui->async)
6c400b59
PA
3912 {
3913 /* We're not going back to the top level event loop yet. Don't
3914 install the readline callback, as it'd prep the terminal,
3915 readline-style (raw, noecho) (e.g., --batch). We'll install
3916 it the next time the prompt is displayed, when we're ready
3917 for input. */
3918 return;
3919 }
3920
3b12939d 3921 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
d3d4baed
PA
3922 gdb_rl_callback_handler_reinstall ();
3923}
3924
243a9253
PA
3925/* Clean up the FSMs of threads that are now stopped. In non-stop,
3926 that's just the event thread. In all-stop, that's all threads. */
3927
3928static void
3929clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
3930{
08036331
PA
3931 if (ecs->event_thread != NULL
3932 && ecs->event_thread->thread_fsm != NULL)
46e3ed7f 3933 ecs->event_thread->thread_fsm->clean_up (ecs->event_thread);
243a9253
PA
3934
3935 if (!non_stop)
3936 {
08036331 3937 for (thread_info *thr : all_non_exited_threads ())
dda83cd7 3938 {
243a9253
PA
3939 if (thr->thread_fsm == NULL)
3940 continue;
3941 if (thr == ecs->event_thread)
3942 continue;
3943
00431a78 3944 switch_to_thread (thr);
46e3ed7f 3945 thr->thread_fsm->clean_up (thr);
243a9253
PA
3946 }
3947
3948 if (ecs->event_thread != NULL)
00431a78 3949 switch_to_thread (ecs->event_thread);
243a9253
PA
3950 }
3951}
3952
3b12939d
PA
3953/* Helper for all_uis_check_sync_execution_done that works on the
3954 current UI. */
3955
3956static void
3957check_curr_ui_sync_execution_done (void)
3958{
3959 struct ui *ui = current_ui;
3960
3961 if (ui->prompt_state == PROMPT_NEEDED
3962 && ui->async
3963 && !gdb_in_secondary_prompt_p (ui))
3964 {
223ffa71 3965 target_terminal::ours ();
76727919 3966 gdb::observers::sync_execution_done.notify ();
3eb7562a 3967 ui_register_input_event_handler (ui);
3b12939d
PA
3968 }
3969}
3970
3971/* See infrun.h. */
3972
3973void
3974all_uis_check_sync_execution_done (void)
3975{
0e454242 3976 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
3977 {
3978 check_curr_ui_sync_execution_done ();
3979 }
3980}
3981
a8836c93
PA
3982/* See infrun.h. */
3983
3984void
3985all_uis_on_sync_execution_starting (void)
3986{
0e454242 3987 SWITCH_THRU_ALL_UIS ()
a8836c93
PA
3988 {
3989 if (current_ui->prompt_state == PROMPT_NEEDED)
3990 async_disable_stdin ();
3991 }
3992}
3993
1777feb0 3994/* Asynchronous version of wait_for_inferior. It is called by the
43ff13b4 3995 event loop whenever a change of state is detected on the file
1777feb0
MS
3996 descriptor corresponding to the target. It can be called more than
3997 once to complete a single execution command. In such cases we need
3998 to keep the state in a global variable ECSS. If it is the last time
a474d7c2
PA
3999 that this function is called for a single execution command, then
4000 report to the user that the inferior has stopped, and do the
1777feb0 4001 necessary cleanups. */
43ff13b4
JM
4002
4003void
b1a35af2 4004fetch_inferior_event ()
43ff13b4 4005{
3ec3145c
SM
4006 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
4007
0d1e5fa7 4008 struct execution_control_state ecss;
a474d7c2 4009 struct execution_control_state *ecs = &ecss;
0f641c01 4010 int cmd_done = 0;
43ff13b4 4011
0d1e5fa7
PA
4012 memset (ecs, 0, sizeof (*ecs));
4013
c61db772
PA
4014 /* Events are always processed with the main UI as current UI. This
4015 way, warnings, debug output, etc. are always consistently sent to
4016 the main console. */
4b6749b9 4017 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
c61db772 4018
b78b3a29
TBA
4019 /* Temporarily disable pagination. Otherwise, the user would be
4020 given an option to press 'q' to quit, which would cause an early
4021 exit and could leave GDB in a half-baked state. */
4022 scoped_restore save_pagination
4023 = make_scoped_restore (&pagination_enabled, false);
4024
d3d4baed 4025 /* End up with readline processing input, if necessary. */
d238133d
TT
4026 {
4027 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
4028
4029 /* We're handling a live event, so make sure we're doing live
4030 debugging. If we're looking at traceframes while the target is
4031 running, we're going to need to get back to that mode after
4032 handling the event. */
4033 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
4034 if (non_stop)
4035 {
4036 maybe_restore_traceframe.emplace ();
4037 set_current_traceframe (-1);
4038 }
43ff13b4 4039
873657b9
PA
4040 /* The user/frontend should not notice a thread switch due to
4041 internal events. Make sure we revert to the user selected
4042 thread and frame after handling the event and running any
4043 breakpoint commands. */
4044 scoped_restore_current_thread restore_thread;
d238133d
TT
4045
4046 overlay_cache_invalid = 1;
4047 /* Flush target cache before starting to handle each event. Target
4048 was running and cache could be stale. This is just a heuristic.
4049 Running threads may modify target memory, but we don't get any
4050 event. */
4051 target_dcache_invalidate ();
4052
4053 scoped_restore save_exec_dir
4054 = make_scoped_restore (&execution_direction,
4055 target_execution_direction ());
4056
1192f124
SM
4057 /* Allow targets to pause their resumed threads while we handle
4058 the event. */
4059 scoped_disable_commit_resumed disable_commit_resumed ("handling event");
4060
ac0d67ed 4061 if (!do_target_wait (ecs, TARGET_WNOHANG))
1192f124
SM
4062 {
4063 infrun_debug_printf ("do_target_wait returned no event");
4064 disable_commit_resumed.reset_and_commit ();
4065 return;
4066 }
5b6d1e4f
PA
4067
4068 gdb_assert (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
4069
4070 /* Switch to the target that generated the event, so we can do
7f08fd51
TBA
4071 target calls. */
4072 switch_to_target_no_thread (ecs->target);
d238133d
TT
4073
4074 if (debug_infrun)
5b6d1e4f 4075 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
d238133d
TT
4076
4077 /* If an error happens while handling the event, propagate GDB's
4078 knowledge of the executing state to the frontend/user running
4079 state. */
4080 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs->ptid;
5b6d1e4f 4081 scoped_finish_thread_state finish_state (ecs->target, finish_ptid);
d238133d 4082
979a0d13 4083 /* Get executed before scoped_restore_current_thread above to apply
d238133d
TT
4084 still for the thread which has thrown the exception. */
4085 auto defer_bpstat_clear
4086 = make_scope_exit (bpstat_clear_actions);
4087 auto defer_delete_threads
4088 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
4089
4090 /* Now figure out what to do with the result of the result. */
4091 handle_inferior_event (ecs);
4092
4093 if (!ecs->wait_some_more)
4094 {
5b6d1e4f 4095 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
758cb810 4096 bool should_stop = true;
d238133d 4097 struct thread_info *thr = ecs->event_thread;
d6b48e9c 4098
d238133d 4099 delete_just_stopped_threads_infrun_breakpoints ();
f107f563 4100
d238133d
TT
4101 if (thr != NULL)
4102 {
4103 struct thread_fsm *thread_fsm = thr->thread_fsm;
243a9253 4104
d238133d 4105 if (thread_fsm != NULL)
46e3ed7f 4106 should_stop = thread_fsm->should_stop (thr);
d238133d 4107 }
243a9253 4108
d238133d
TT
4109 if (!should_stop)
4110 {
4111 keep_going (ecs);
4112 }
4113 else
4114 {
46e3ed7f 4115 bool should_notify_stop = true;
d238133d 4116 int proceeded = 0;
1840d81a 4117
d238133d 4118 clean_up_just_stopped_threads_fsms (ecs);
243a9253 4119
d238133d 4120 if (thr != NULL && thr->thread_fsm != NULL)
46e3ed7f 4121 should_notify_stop = thr->thread_fsm->should_notify_stop ();
388a7084 4122
d238133d
TT
4123 if (should_notify_stop)
4124 {
4125 /* We may not find an inferior if this was a process exit. */
4126 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
4127 proceeded = normal_stop ();
4128 }
243a9253 4129
d238133d
TT
4130 if (!proceeded)
4131 {
b1a35af2 4132 inferior_event_handler (INF_EXEC_COMPLETE);
d238133d
TT
4133 cmd_done = 1;
4134 }
873657b9
PA
4135
4136 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4137 previously selected thread is gone. We have two
4138 choices - switch to no thread selected, or restore the
4139 previously selected thread (now exited). We chose the
4140 later, just because that's what GDB used to do. After
4141 this, "info threads" says "The current thread <Thread
4142 ID 2> has terminated." instead of "No thread
4143 selected.". */
4144 if (!non_stop
4145 && cmd_done
4146 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
4147 restore_thread.dont_restore ();
d238133d
TT
4148 }
4149 }
4f8d22e3 4150
d238133d
TT
4151 defer_delete_threads.release ();
4152 defer_bpstat_clear.release ();
29f49a6a 4153
d238133d
TT
4154 /* No error, don't finish the thread states yet. */
4155 finish_state.release ();
731f534f 4156
1192f124
SM
4157 disable_commit_resumed.reset_and_commit ();
4158
d238133d
TT
4159 /* This scope is used to ensure that readline callbacks are
4160 reinstalled here. */
4161 }
4f8d22e3 4162
3b12939d
PA
4163 /* If a UI was in sync execution mode, and now isn't, restore its
4164 prompt (a synchronous execution command has finished, and we're
4165 ready for input). */
4166 all_uis_check_sync_execution_done ();
0f641c01
PA
4167
4168 if (cmd_done
0f641c01 4169 && exec_done_display_p
00431a78
PA
4170 && (inferior_ptid == null_ptid
4171 || inferior_thread ()->state != THREAD_RUNNING))
0f641c01 4172 printf_unfiltered (_("completed.\n"));
43ff13b4
JM
4173}
4174
29734269
SM
4175/* See infrun.h. */
4176
edb3359d 4177void
29734269
SM
4178set_step_info (thread_info *tp, struct frame_info *frame,
4179 struct symtab_and_line sal)
edb3359d 4180{
29734269
SM
4181 /* This can be removed once this function no longer implicitly relies on the
4182 inferior_ptid value. */
4183 gdb_assert (inferior_ptid == tp->ptid);
edb3359d 4184
16c381f0
JK
4185 tp->control.step_frame_id = get_frame_id (frame);
4186 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
edb3359d
DJ
4187
4188 tp->current_symtab = sal.symtab;
4189 tp->current_line = sal.line;
4190}
4191
0d1e5fa7
PA
4192/* Clear context switchable stepping state. */
4193
4194void
4e1c45ea 4195init_thread_stepping_state (struct thread_info *tss)
0d1e5fa7 4196{
7f5ef605 4197 tss->stepped_breakpoint = 0;
0d1e5fa7 4198 tss->stepping_over_breakpoint = 0;
963f9c80 4199 tss->stepping_over_watchpoint = 0;
0d1e5fa7 4200 tss->step_after_step_resume_breakpoint = 0;
cd0fc7c3
SS
4201}
4202
ab1ddbcf 4203/* See infrun.h. */
c32c64b7 4204
6efcd9a8 4205void
5b6d1e4f
PA
4206set_last_target_status (process_stratum_target *target, ptid_t ptid,
4207 target_waitstatus status)
c32c64b7 4208{
5b6d1e4f 4209 target_last_proc_target = target;
c32c64b7
DE
4210 target_last_wait_ptid = ptid;
4211 target_last_waitstatus = status;
4212}
4213
ab1ddbcf 4214/* See infrun.h. */
e02bc4cc
DS
4215
4216void
5b6d1e4f
PA
4217get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4218 target_waitstatus *status)
e02bc4cc 4219{
5b6d1e4f
PA
4220 if (target != nullptr)
4221 *target = target_last_proc_target;
ab1ddbcf
PA
4222 if (ptid != nullptr)
4223 *ptid = target_last_wait_ptid;
4224 if (status != nullptr)
4225 *status = target_last_waitstatus;
e02bc4cc
DS
4226}
4227
ab1ddbcf
PA
4228/* See infrun.h. */
4229
ac264b3b
MS
4230void
4231nullify_last_target_wait_ptid (void)
4232{
5b6d1e4f 4233 target_last_proc_target = nullptr;
ac264b3b 4234 target_last_wait_ptid = minus_one_ptid;
ab1ddbcf 4235 target_last_waitstatus = {};
ac264b3b
MS
4236}
4237
dcf4fbde 4238/* Switch thread contexts. */
dd80620e
MS
4239
4240static void
00431a78 4241context_switch (execution_control_state *ecs)
dd80620e 4242{
1eb8556f 4243 if (ecs->ptid != inferior_ptid
5b6d1e4f
PA
4244 && (inferior_ptid == null_ptid
4245 || ecs->event_thread != inferior_thread ()))
fd48f117 4246 {
1eb8556f
SM
4247 infrun_debug_printf ("Switching context from %s to %s",
4248 target_pid_to_str (inferior_ptid).c_str (),
4249 target_pid_to_str (ecs->ptid).c_str ());
fd48f117
DJ
4250 }
4251
00431a78 4252 switch_to_thread (ecs->event_thread);
dd80620e
MS
4253}
4254
d8dd4d5f
PA
4255/* If the target can't tell whether we've hit breakpoints
4256 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4257 check whether that could have been caused by a breakpoint. If so,
4258 adjust the PC, per gdbarch_decr_pc_after_break. */
4259
4fa8626c 4260static void
d8dd4d5f 4261adjust_pc_after_break (struct thread_info *thread,
1edb66d8 4262 const target_waitstatus *ws)
4fa8626c 4263{
24a73cce
UW
4264 struct regcache *regcache;
4265 struct gdbarch *gdbarch;
118e6252 4266 CORE_ADDR breakpoint_pc, decr_pc;
4fa8626c 4267
4fa8626c
DJ
4268 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4269 we aren't, just return.
9709f61c
DJ
4270
4271 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
b798847d
UW
4272 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4273 implemented by software breakpoints should be handled through the normal
4274 breakpoint layer.
8fb3e588 4275
4fa8626c
DJ
4276 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4277 different signals (SIGILL or SIGEMT for instance), but it is less
4278 clear where the PC is pointing afterwards. It may not match
b798847d
UW
4279 gdbarch_decr_pc_after_break. I don't know any specific target that
4280 generates these signals at breakpoints (the code has been in GDB since at
4281 least 1992) so I can not guess how to handle them here.
8fb3e588 4282
e6cf7916
UW
4283 In earlier versions of GDB, a target with
4284 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
b798847d
UW
4285 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4286 target with both of these set in GDB history, and it seems unlikely to be
4287 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4fa8626c 4288
d8dd4d5f 4289 if (ws->kind != TARGET_WAITKIND_STOPPED)
4fa8626c
DJ
4290 return;
4291
d8dd4d5f 4292 if (ws->value.sig != GDB_SIGNAL_TRAP)
4fa8626c
DJ
4293 return;
4294
4058b839
PA
4295 /* In reverse execution, when a breakpoint is hit, the instruction
4296 under it has already been de-executed. The reported PC always
4297 points at the breakpoint address, so adjusting it further would
4298 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4299 architecture:
4300
4301 B1 0x08000000 : INSN1
4302 B2 0x08000001 : INSN2
4303 0x08000002 : INSN3
4304 PC -> 0x08000003 : INSN4
4305
4306 Say you're stopped at 0x08000003 as above. Reverse continuing
4307 from that point should hit B2 as below. Reading the PC when the
4308 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4309 been de-executed already.
4310
4311 B1 0x08000000 : INSN1
4312 B2 PC -> 0x08000001 : INSN2
4313 0x08000002 : INSN3
4314 0x08000003 : INSN4
4315
4316 We can't apply the same logic as for forward execution, because
4317 we would wrongly adjust the PC to 0x08000000, since there's a
4318 breakpoint at PC - 1. We'd then report a hit on B1, although
4319 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4320 behaviour. */
4321 if (execution_direction == EXEC_REVERSE)
4322 return;
4323
1cf4d951
PA
4324 /* If the target can tell whether the thread hit a SW breakpoint,
4325 trust it. Targets that can tell also adjust the PC
4326 themselves. */
4327 if (target_supports_stopped_by_sw_breakpoint ())
4328 return;
4329
4330 /* Note that relying on whether a breakpoint is planted in memory to
4331 determine this can fail. E.g,. the breakpoint could have been
4332 removed since. Or the thread could have been told to step an
4333 instruction the size of a breakpoint instruction, and only
4334 _after_ was a breakpoint inserted at its address. */
4335
24a73cce
UW
4336 /* If this target does not decrement the PC after breakpoints, then
4337 we have nothing to do. */
00431a78 4338 regcache = get_thread_regcache (thread);
ac7936df 4339 gdbarch = regcache->arch ();
118e6252 4340
527a273a 4341 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
118e6252 4342 if (decr_pc == 0)
24a73cce
UW
4343 return;
4344
8b86c959 4345 const address_space *aspace = regcache->aspace ();
6c95b8df 4346
8aad930b
AC
4347 /* Find the location where (if we've hit a breakpoint) the
4348 breakpoint would be. */
118e6252 4349 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
8aad930b 4350
1cf4d951
PA
4351 /* If the target can't tell whether a software breakpoint triggered,
4352 fallback to figuring it out based on breakpoints we think were
4353 inserted in the target, and on whether the thread was stepped or
4354 continued. */
4355
1c5cfe86
PA
4356 /* Check whether there actually is a software breakpoint inserted at
4357 that location.
4358
4359 If in non-stop mode, a race condition is possible where we've
4360 removed a breakpoint, but stop events for that breakpoint were
4361 already queued and arrive later. To suppress those spurious
4362 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
1cf4d951
PA
4363 and retire them after a number of stop events are reported. Note
4364 this is an heuristic and can thus get confused. The real fix is
4365 to get the "stopped by SW BP and needs adjustment" info out of
4366 the target/kernel (and thus never reach here; see above). */
6c95b8df 4367 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
fbea99ea
PA
4368 || (target_is_non_stop_p ()
4369 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
8aad930b 4370 {
07036511 4371 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
abbb1732 4372
8213266a 4373 if (record_full_is_used ())
07036511
TT
4374 restore_operation_disable.emplace
4375 (record_full_gdb_operation_disable_set ());
96429cc8 4376
1c0fdd0e
UW
4377 /* When using hardware single-step, a SIGTRAP is reported for both
4378 a completed single-step and a software breakpoint. Need to
4379 differentiate between the two, as the latter needs adjusting
4380 but the former does not.
4381
4382 The SIGTRAP can be due to a completed hardware single-step only if
4383 - we didn't insert software single-step breakpoints
1c0fdd0e
UW
4384 - this thread is currently being stepped
4385
4386 If any of these events did not occur, we must have stopped due
4387 to hitting a software breakpoint, and have to back up to the
4388 breakpoint address.
4389
4390 As a special case, we could have hardware single-stepped a
4391 software breakpoint. In this case (prev_pc == breakpoint_pc),
4392 we also need to back up to the breakpoint address. */
4393
d8dd4d5f
PA
4394 if (thread_has_single_step_breakpoints_set (thread)
4395 || !currently_stepping (thread)
4396 || (thread->stepped_breakpoint
4397 && thread->prev_pc == breakpoint_pc))
515630c5 4398 regcache_write_pc (regcache, breakpoint_pc);
8aad930b 4399 }
4fa8626c
DJ
4400}
4401
c4464ade 4402static bool
edb3359d
DJ
4403stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
4404{
4405 for (frame = get_prev_frame (frame);
4406 frame != NULL;
4407 frame = get_prev_frame (frame))
4408 {
4409 if (frame_id_eq (get_frame_id (frame), step_frame_id))
c4464ade
SM
4410 return true;
4411
edb3359d
DJ
4412 if (get_frame_type (frame) != INLINE_FRAME)
4413 break;
4414 }
4415
c4464ade 4416 return false;
edb3359d
DJ
4417}
4418
4a4c04f1
BE
4419/* Look for an inline frame that is marked for skip.
4420 If PREV_FRAME is TRUE start at the previous frame,
4421 otherwise start at the current frame. Stop at the
4422 first non-inline frame, or at the frame where the
4423 step started. */
4424
4425static bool
4426inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
4427{
4428 struct frame_info *frame = get_current_frame ();
4429
4430 if (prev_frame)
4431 frame = get_prev_frame (frame);
4432
4433 for (; frame != NULL; frame = get_prev_frame (frame))
4434 {
4435 const char *fn = NULL;
4436 symtab_and_line sal;
4437 struct symbol *sym;
4438
4439 if (frame_id_eq (get_frame_id (frame), tp->control.step_frame_id))
4440 break;
4441 if (get_frame_type (frame) != INLINE_FRAME)
4442 break;
4443
4444 sal = find_frame_sal (frame);
4445 sym = get_frame_function (frame);
4446
4447 if (sym != NULL)
4448 fn = sym->print_name ();
4449
4450 if (sal.line != 0
4451 && function_name_is_marked_for_skip (fn, sal))
4452 return true;
4453 }
4454
4455 return false;
4456}
4457
c65d6b55
PA
4458/* If the event thread has the stop requested flag set, pretend it
4459 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4460 target_stop). */
4461
4462static bool
4463handle_stop_requested (struct execution_control_state *ecs)
4464{
4465 if (ecs->event_thread->stop_requested)
4466 {
4467 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
4468 ecs->ws.value.sig = GDB_SIGNAL_0;
4469 handle_signal_stop (ecs);
4470 return true;
4471 }
4472 return false;
4473}
4474
a96d9b2e 4475/* Auxiliary function that handles syscall entry/return events.
c4464ade
SM
4476 It returns true if the inferior should keep going (and GDB
4477 should ignore the event), or false if the event deserves to be
a96d9b2e 4478 processed. */
ca2163eb 4479
c4464ade 4480static bool
ca2163eb 4481handle_syscall_event (struct execution_control_state *ecs)
a96d9b2e 4482{
ca2163eb 4483 struct regcache *regcache;
ca2163eb
PA
4484 int syscall_number;
4485
00431a78 4486 context_switch (ecs);
ca2163eb 4487
00431a78 4488 regcache = get_thread_regcache (ecs->event_thread);
f90263c1 4489 syscall_number = ecs->ws.value.syscall_number;
1edb66d8 4490 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
ca2163eb 4491
a96d9b2e
SDJ
4492 if (catch_syscall_enabled () > 0
4493 && catching_syscall_number (syscall_number) > 0)
4494 {
1eb8556f 4495 infrun_debug_printf ("syscall number=%d", syscall_number);
a96d9b2e 4496
16c381f0 4497 ecs->event_thread->control.stop_bpstat
a01bda52 4498 = bpstat_stop_status (regcache->aspace (),
1edb66d8 4499 ecs->event_thread->stop_pc (),
f2ffa92b 4500 ecs->event_thread, &ecs->ws);
ab04a2af 4501
c65d6b55 4502 if (handle_stop_requested (ecs))
c4464ade 4503 return false;
c65d6b55 4504
ce12b012 4505 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
ca2163eb
PA
4506 {
4507 /* Catchpoint hit. */
c4464ade 4508 return false;
ca2163eb 4509 }
a96d9b2e 4510 }
ca2163eb 4511
c65d6b55 4512 if (handle_stop_requested (ecs))
c4464ade 4513 return false;
c65d6b55 4514
ca2163eb 4515 /* If no catchpoint triggered for this, then keep going. */
ca2163eb 4516 keep_going (ecs);
c4464ade
SM
4517
4518 return true;
a96d9b2e
SDJ
4519}
4520
7e324e48
GB
4521/* Lazily fill in the execution_control_state's stop_func_* fields. */
4522
4523static void
4524fill_in_stop_func (struct gdbarch *gdbarch,
4525 struct execution_control_state *ecs)
4526{
4527 if (!ecs->stop_func_filled_in)
4528 {
98a617f8 4529 const block *block;
fe830662 4530 const general_symbol_info *gsi;
98a617f8 4531
7e324e48
GB
4532 /* Don't care about return value; stop_func_start and stop_func_name
4533 will both be 0 if it doesn't work. */
1edb66d8 4534 find_pc_partial_function_sym (ecs->event_thread->stop_pc (),
fe830662
TT
4535 &gsi,
4536 &ecs->stop_func_start,
4537 &ecs->stop_func_end,
4538 &block);
4539 ecs->stop_func_name = gsi == nullptr ? nullptr : gsi->print_name ();
98a617f8
KB
4540
4541 /* The call to find_pc_partial_function, above, will set
4542 stop_func_start and stop_func_end to the start and end
4543 of the range containing the stop pc. If this range
4544 contains the entry pc for the block (which is always the
4545 case for contiguous blocks), advance stop_func_start past
4546 the function's start offset and entrypoint. Note that
4547 stop_func_start is NOT advanced when in a range of a
4548 non-contiguous block that does not contain the entry pc. */
4549 if (block != nullptr
4550 && ecs->stop_func_start <= BLOCK_ENTRY_PC (block)
4551 && BLOCK_ENTRY_PC (block) < ecs->stop_func_end)
4552 {
4553 ecs->stop_func_start
4554 += gdbarch_deprecated_function_start_offset (gdbarch);
4555
4556 if (gdbarch_skip_entrypoint_p (gdbarch))
4557 ecs->stop_func_start
4558 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
4559 }
591a12a1 4560
7e324e48
GB
4561 ecs->stop_func_filled_in = 1;
4562 }
4563}
4564
4f5d7f63 4565
00431a78 4566/* Return the STOP_SOON field of the inferior pointed at by ECS. */
4f5d7f63
PA
4567
4568static enum stop_kind
00431a78 4569get_inferior_stop_soon (execution_control_state *ecs)
4f5d7f63 4570{
5b6d1e4f 4571 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4f5d7f63
PA
4572
4573 gdb_assert (inf != NULL);
4574 return inf->control.stop_soon;
4575}
4576
5b6d1e4f
PA
4577/* Poll for one event out of the current target. Store the resulting
4578 waitstatus in WS, and return the event ptid. Does not block. */
372316f1
PA
4579
4580static ptid_t
5b6d1e4f 4581poll_one_curr_target (struct target_waitstatus *ws)
372316f1
PA
4582{
4583 ptid_t event_ptid;
372316f1
PA
4584
4585 overlay_cache_invalid = 1;
4586
4587 /* Flush target cache before starting to handle each event.
4588 Target was running and cache could be stale. This is just a
4589 heuristic. Running threads may modify target memory, but we
4590 don't get any event. */
4591 target_dcache_invalidate ();
4592
4593 if (deprecated_target_wait_hook)
5b6d1e4f 4594 event_ptid = deprecated_target_wait_hook (minus_one_ptid, ws, TARGET_WNOHANG);
372316f1 4595 else
5b6d1e4f 4596 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
372316f1
PA
4597
4598 if (debug_infrun)
5b6d1e4f 4599 print_target_wait_results (minus_one_ptid, event_ptid, ws);
372316f1
PA
4600
4601 return event_ptid;
4602}
4603
5b6d1e4f
PA
4604/* Wait for one event out of any target. */
4605
4606static wait_one_event
4607wait_one ()
4608{
4609 while (1)
4610 {
4611 for (inferior *inf : all_inferiors ())
4612 {
4613 process_stratum_target *target = inf->process_target ();
4614 if (target == NULL
4615 || !target->is_async_p ()
4616 || !target->threads_executing)
4617 continue;
4618
4619 switch_to_inferior_no_thread (inf);
4620
4621 wait_one_event event;
4622 event.target = target;
4623 event.ptid = poll_one_curr_target (&event.ws);
4624
4625 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
4626 {
4627 /* If nothing is resumed, remove the target from the
4628 event loop. */
4629 target_async (0);
4630 }
4631 else if (event.ws.kind != TARGET_WAITKIND_IGNORE)
4632 return event;
4633 }
4634
4635 /* Block waiting for some event. */
4636
4637 fd_set readfds;
4638 int nfds = 0;
4639
4640 FD_ZERO (&readfds);
4641
4642 for (inferior *inf : all_inferiors ())
4643 {
4644 process_stratum_target *target = inf->process_target ();
4645 if (target == NULL
4646 || !target->is_async_p ()
4647 || !target->threads_executing)
4648 continue;
4649
4650 int fd = target->async_wait_fd ();
4651 FD_SET (fd, &readfds);
4652 if (nfds <= fd)
4653 nfds = fd + 1;
4654 }
4655
4656 if (nfds == 0)
4657 {
4658 /* No waitable targets left. All must be stopped. */
4659 return {NULL, minus_one_ptid, {TARGET_WAITKIND_NO_RESUMED}};
4660 }
4661
4662 QUIT;
4663
4664 int numfds = interruptible_select (nfds, &readfds, 0, NULL, 0);
4665 if (numfds < 0)
4666 {
4667 if (errno == EINTR)
4668 continue;
4669 else
4670 perror_with_name ("interruptible_select");
4671 }
4672 }
4673}
4674
372316f1
PA
4675/* Save the thread's event and stop reason to process it later. */
4676
4677static void
5b6d1e4f 4678save_waitstatus (struct thread_info *tp, const target_waitstatus *ws)
372316f1 4679{
1eb8556f
SM
4680 infrun_debug_printf ("saving status %s for %d.%ld.%ld",
4681 target_waitstatus_to_string (ws).c_str (),
4682 tp->ptid.pid (),
4683 tp->ptid.lwp (),
4684 tp->ptid.tid ());
372316f1
PA
4685
4686 /* Record for later. */
1edb66d8 4687 tp->set_pending_waitstatus (*ws);
372316f1 4688
372316f1
PA
4689 if (ws->kind == TARGET_WAITKIND_STOPPED
4690 && ws->value.sig == GDB_SIGNAL_TRAP)
4691 {
89ba430c
SM
4692 struct regcache *regcache = get_thread_regcache (tp);
4693 const address_space *aspace = regcache->aspace ();
372316f1
PA
4694 CORE_ADDR pc = regcache_read_pc (regcache);
4695
1edb66d8 4696 adjust_pc_after_break (tp, &tp->pending_waitstatus ());
372316f1 4697
18493a00
PA
4698 scoped_restore_current_thread restore_thread;
4699 switch_to_thread (tp);
4700
4701 if (target_stopped_by_watchpoint ())
1edb66d8 4702 tp->set_stop_reason (TARGET_STOPPED_BY_WATCHPOINT);
372316f1 4703 else if (target_supports_stopped_by_sw_breakpoint ()
18493a00 4704 && target_stopped_by_sw_breakpoint ())
1edb66d8 4705 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
372316f1 4706 else if (target_supports_stopped_by_hw_breakpoint ()
18493a00 4707 && target_stopped_by_hw_breakpoint ())
1edb66d8 4708 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
372316f1 4709 else if (!target_supports_stopped_by_hw_breakpoint ()
1edb66d8
SM
4710 && hardware_breakpoint_inserted_here_p (aspace, pc))
4711 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
372316f1 4712 else if (!target_supports_stopped_by_sw_breakpoint ()
1edb66d8
SM
4713 && software_breakpoint_inserted_here_p (aspace, pc))
4714 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
372316f1
PA
4715 else if (!thread_has_single_step_breakpoints_set (tp)
4716 && currently_stepping (tp))
1edb66d8 4717 tp->set_stop_reason (TARGET_STOPPED_BY_SINGLE_STEP);
372316f1
PA
4718 }
4719}
4720
293b3ebc
TBA
4721/* Mark the non-executing threads accordingly. In all-stop, all
4722 threads of all processes are stopped when we get any event
4723 reported. In non-stop mode, only the event thread stops. */
4724
4725static void
4726mark_non_executing_threads (process_stratum_target *target,
4727 ptid_t event_ptid,
4728 struct target_waitstatus ws)
4729{
4730 ptid_t mark_ptid;
4731
4732 if (!target_is_non_stop_p ())
4733 mark_ptid = minus_one_ptid;
4734 else if (ws.kind == TARGET_WAITKIND_SIGNALLED
4735 || ws.kind == TARGET_WAITKIND_EXITED)
4736 {
4737 /* If we're handling a process exit in non-stop mode, even
4738 though threads haven't been deleted yet, one would think
4739 that there is nothing to do, as threads of the dead process
4740 will be soon deleted, and threads of any other process were
4741 left running. However, on some targets, threads survive a
4742 process exit event. E.g., for the "checkpoint" command,
4743 when the current checkpoint/fork exits, linux-fork.c
4744 automatically switches to another fork from within
4745 target_mourn_inferior, by associating the same
4746 inferior/thread to another fork. We haven't mourned yet at
4747 this point, but we must mark any threads left in the
4748 process as not-executing so that finish_thread_state marks
4749 them stopped (in the user's perspective) if/when we present
4750 the stop to the user. */
4751 mark_ptid = ptid_t (event_ptid.pid ());
4752 }
4753 else
4754 mark_ptid = event_ptid;
4755
4756 set_executing (target, mark_ptid, false);
4757
4758 /* Likewise the resumed flag. */
4759 set_resumed (target, mark_ptid, false);
4760}
4761
d758e62c
PA
4762/* Handle one event after stopping threads. If the eventing thread
4763 reports back any interesting event, we leave it pending. If the
4764 eventing thread was in the middle of a displaced step, we
8ff53139
PA
4765 cancel/finish it, and unless the thread's inferior is being
4766 detached, put the thread back in the step-over chain. Returns true
4767 if there are no resumed threads left in the target (thus there's no
4768 point in waiting further), false otherwise. */
d758e62c
PA
4769
4770static bool
4771handle_one (const wait_one_event &event)
4772{
4773 infrun_debug_printf
4774 ("%s %s", target_waitstatus_to_string (&event.ws).c_str (),
4775 target_pid_to_str (event.ptid).c_str ());
4776
4777 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
4778 {
4779 /* All resumed threads exited. */
4780 return true;
4781 }
4782 else if (event.ws.kind == TARGET_WAITKIND_THREAD_EXITED
4783 || event.ws.kind == TARGET_WAITKIND_EXITED
4784 || event.ws.kind == TARGET_WAITKIND_SIGNALLED)
4785 {
4786 /* One thread/process exited/signalled. */
4787
4788 thread_info *t = nullptr;
4789
4790 /* The target may have reported just a pid. If so, try
4791 the first non-exited thread. */
4792 if (event.ptid.is_pid ())
4793 {
4794 int pid = event.ptid.pid ();
4795 inferior *inf = find_inferior_pid (event.target, pid);
4796 for (thread_info *tp : inf->non_exited_threads ())
4797 {
4798 t = tp;
4799 break;
4800 }
4801
4802 /* If there is no available thread, the event would
4803 have to be appended to a per-inferior event list,
4804 which does not exist (and if it did, we'd have
4805 to adjust run control command to be able to
4806 resume such an inferior). We assert here instead
4807 of going into an infinite loop. */
4808 gdb_assert (t != nullptr);
4809
4810 infrun_debug_printf
4811 ("using %s", target_pid_to_str (t->ptid).c_str ());
4812 }
4813 else
4814 {
4815 t = find_thread_ptid (event.target, event.ptid);
4816 /* Check if this is the first time we see this thread.
4817 Don't bother adding if it individually exited. */
4818 if (t == nullptr
4819 && event.ws.kind != TARGET_WAITKIND_THREAD_EXITED)
4820 t = add_thread (event.target, event.ptid);
4821 }
4822
4823 if (t != nullptr)
4824 {
4825 /* Set the threads as non-executing to avoid
4826 another stop attempt on them. */
4827 switch_to_thread_no_regs (t);
4828 mark_non_executing_threads (event.target, event.ptid,
4829 event.ws);
4830 save_waitstatus (t, &event.ws);
4831 t->stop_requested = false;
4832 }
4833 }
4834 else
4835 {
4836 thread_info *t = find_thread_ptid (event.target, event.ptid);
4837 if (t == NULL)
4838 t = add_thread (event.target, event.ptid);
4839
4840 t->stop_requested = 0;
4841 t->executing = 0;
7846f3aa 4842 t->set_resumed (false);
d758e62c
PA
4843 t->control.may_range_step = 0;
4844
4845 /* This may be the first time we see the inferior report
4846 a stop. */
4847 inferior *inf = find_inferior_ptid (event.target, event.ptid);
4848 if (inf->needs_setup)
4849 {
4850 switch_to_thread_no_regs (t);
4851 setup_inferior (0);
4852 }
4853
4854 if (event.ws.kind == TARGET_WAITKIND_STOPPED
4855 && event.ws.value.sig == GDB_SIGNAL_0)
4856 {
4857 /* We caught the event that we intended to catch, so
1edb66d8 4858 there's no event to save as pending. */
d758e62c
PA
4859
4860 if (displaced_step_finish (t, GDB_SIGNAL_0)
4861 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
4862 {
4863 /* Add it back to the step-over queue. */
4864 infrun_debug_printf
4865 ("displaced-step of %s canceled",
4866 target_pid_to_str (t->ptid).c_str ());
4867
4868 t->control.trap_expected = 0;
8ff53139
PA
4869 if (!t->inf->detaching)
4870 global_thread_step_over_chain_enqueue (t);
d758e62c
PA
4871 }
4872 }
4873 else
4874 {
4875 enum gdb_signal sig;
4876 struct regcache *regcache;
4877
4878 infrun_debug_printf
4879 ("target_wait %s, saving status for %d.%ld.%ld",
4880 target_waitstatus_to_string (&event.ws).c_str (),
4881 t->ptid.pid (), t->ptid.lwp (), t->ptid.tid ());
4882
4883 /* Record for later. */
4884 save_waitstatus (t, &event.ws);
4885
4886 sig = (event.ws.kind == TARGET_WAITKIND_STOPPED
4887 ? event.ws.value.sig : GDB_SIGNAL_0);
4888
4889 if (displaced_step_finish (t, sig)
4890 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
4891 {
4892 /* Add it back to the step-over queue. */
4893 t->control.trap_expected = 0;
8ff53139
PA
4894 if (!t->inf->detaching)
4895 global_thread_step_over_chain_enqueue (t);
d758e62c
PA
4896 }
4897
4898 regcache = get_thread_regcache (t);
1edb66d8 4899 t->set_stop_pc (regcache_read_pc (regcache));
d758e62c
PA
4900
4901 infrun_debug_printf ("saved stop_pc=%s for %s "
4902 "(currently_stepping=%d)",
1edb66d8 4903 paddress (target_gdbarch (), t->stop_pc ()),
d758e62c
PA
4904 target_pid_to_str (t->ptid).c_str (),
4905 currently_stepping (t));
4906 }
4907 }
4908
4909 return false;
4910}
4911
6efcd9a8 4912/* See infrun.h. */
372316f1 4913
6efcd9a8 4914void
372316f1
PA
4915stop_all_threads (void)
4916{
4917 /* We may need multiple passes to discover all threads. */
4918 int pass;
4919 int iterations = 0;
372316f1 4920
53cccef1 4921 gdb_assert (exists_non_stop_target ());
372316f1 4922
1eb8556f 4923 infrun_debug_printf ("starting");
372316f1 4924
00431a78 4925 scoped_restore_current_thread restore_thread;
372316f1 4926
6ad82919
TBA
4927 /* Enable thread events of all targets. */
4928 for (auto *target : all_non_exited_process_targets ())
4929 {
4930 switch_to_target_no_thread (target);
4931 target_thread_events (true);
4932 }
4933
4934 SCOPE_EXIT
4935 {
4936 /* Disable thread events of all targets. */
4937 for (auto *target : all_non_exited_process_targets ())
4938 {
4939 switch_to_target_no_thread (target);
4940 target_thread_events (false);
4941 }
4942
17417fb0 4943 /* Use debug_prefixed_printf directly to get a meaningful function
dda83cd7 4944 name. */
6ad82919 4945 if (debug_infrun)
17417fb0 4946 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
6ad82919 4947 };
65706a29 4948
372316f1
PA
4949 /* Request threads to stop, and then wait for the stops. Because
4950 threads we already know about can spawn more threads while we're
4951 trying to stop them, and we only learn about new threads when we
4952 update the thread list, do this in a loop, and keep iterating
4953 until two passes find no threads that need to be stopped. */
4954 for (pass = 0; pass < 2; pass++, iterations++)
4955 {
1eb8556f 4956 infrun_debug_printf ("pass=%d, iterations=%d", pass, iterations);
372316f1
PA
4957 while (1)
4958 {
29d6859f 4959 int waits_needed = 0;
372316f1 4960
a05575d3
TBA
4961 for (auto *target : all_non_exited_process_targets ())
4962 {
4963 switch_to_target_no_thread (target);
4964 update_thread_list ();
4965 }
372316f1
PA
4966
4967 /* Go through all threads looking for threads that we need
4968 to tell the target to stop. */
08036331 4969 for (thread_info *t : all_non_exited_threads ())
372316f1 4970 {
53cccef1
TBA
4971 /* For a single-target setting with an all-stop target,
4972 we would not even arrive here. For a multi-target
4973 setting, until GDB is able to handle a mixture of
4974 all-stop and non-stop targets, simply skip all-stop
4975 targets' threads. This should be fine due to the
4976 protection of 'check_multi_target_resumption'. */
4977
4978 switch_to_thread_no_regs (t);
4979 if (!target_is_non_stop_p ())
4980 continue;
4981
372316f1
PA
4982 if (t->executing)
4983 {
4984 /* If already stopping, don't request a stop again.
4985 We just haven't seen the notification yet. */
4986 if (!t->stop_requested)
4987 {
1eb8556f
SM
4988 infrun_debug_printf (" %s executing, need stop",
4989 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4990 target_stop (t->ptid);
4991 t->stop_requested = 1;
4992 }
4993 else
4994 {
1eb8556f
SM
4995 infrun_debug_printf (" %s executing, already stopping",
4996 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4997 }
4998
4999 if (t->stop_requested)
29d6859f 5000 waits_needed++;
372316f1
PA
5001 }
5002 else
5003 {
1eb8556f
SM
5004 infrun_debug_printf (" %s not executing",
5005 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
5006
5007 /* The thread may be not executing, but still be
5008 resumed with a pending status to process. */
7846f3aa 5009 t->set_resumed (false);
372316f1
PA
5010 }
5011 }
5012
29d6859f 5013 if (waits_needed == 0)
372316f1
PA
5014 break;
5015
5016 /* If we find new threads on the second iteration, restart
5017 over. We want to see two iterations in a row with all
5018 threads stopped. */
5019 if (pass > 0)
5020 pass = -1;
5021
29d6859f 5022 for (int i = 0; i < waits_needed; i++)
c29705b7 5023 {
29d6859f 5024 wait_one_event event = wait_one ();
d758e62c
PA
5025 if (handle_one (event))
5026 break;
372316f1
PA
5027 }
5028 }
5029 }
372316f1
PA
5030}
5031
f4836ba9
PA
5032/* Handle a TARGET_WAITKIND_NO_RESUMED event. */
5033
c4464ade 5034static bool
f4836ba9
PA
5035handle_no_resumed (struct execution_control_state *ecs)
5036{
3b12939d 5037 if (target_can_async_p ())
f4836ba9 5038 {
c4464ade 5039 bool any_sync = false;
f4836ba9 5040
2dab0c7b 5041 for (ui *ui : all_uis ())
3b12939d
PA
5042 {
5043 if (ui->prompt_state == PROMPT_BLOCKED)
5044 {
c4464ade 5045 any_sync = true;
3b12939d
PA
5046 break;
5047 }
5048 }
5049 if (!any_sync)
5050 {
5051 /* There were no unwaited-for children left in the target, but,
5052 we're not synchronously waiting for events either. Just
5053 ignore. */
5054
1eb8556f 5055 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
3b12939d 5056 prepare_to_wait (ecs);
c4464ade 5057 return true;
3b12939d 5058 }
f4836ba9
PA
5059 }
5060
5061 /* Otherwise, if we were running a synchronous execution command, we
5062 may need to cancel it and give the user back the terminal.
5063
5064 In non-stop mode, the target can't tell whether we've already
5065 consumed previous stop events, so it can end up sending us a
5066 no-resumed event like so:
5067
5068 #0 - thread 1 is left stopped
5069
5070 #1 - thread 2 is resumed and hits breakpoint
dda83cd7 5071 -> TARGET_WAITKIND_STOPPED
f4836ba9
PA
5072
5073 #2 - thread 3 is resumed and exits
dda83cd7 5074 this is the last resumed thread, so
f4836ba9
PA
5075 -> TARGET_WAITKIND_NO_RESUMED
5076
5077 #3 - gdb processes stop for thread 2 and decides to re-resume
dda83cd7 5078 it.
f4836ba9
PA
5079
5080 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
dda83cd7 5081 thread 2 is now resumed, so the event should be ignored.
f4836ba9
PA
5082
5083 IOW, if the stop for thread 2 doesn't end a foreground command,
5084 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5085 event. But it could be that the event meant that thread 2 itself
5086 (or whatever other thread was the last resumed thread) exited.
5087
5088 To address this we refresh the thread list and check whether we
5089 have resumed threads _now_. In the example above, this removes
5090 thread 3 from the thread list. If thread 2 was re-resumed, we
5091 ignore this event. If we find no thread resumed, then we cancel
7d3badc6
PA
5092 the synchronous command and show "no unwaited-for " to the
5093 user. */
f4836ba9 5094
d6cc5d98 5095 inferior *curr_inf = current_inferior ();
7d3badc6 5096
d6cc5d98
PA
5097 scoped_restore_current_thread restore_thread;
5098
5099 for (auto *target : all_non_exited_process_targets ())
5100 {
5101 switch_to_target_no_thread (target);
5102 update_thread_list ();
5103 }
5104
5105 /* If:
5106
5107 - the current target has no thread executing, and
5108 - the current inferior is native, and
5109 - the current inferior is the one which has the terminal, and
5110 - we did nothing,
5111
5112 then a Ctrl-C from this point on would remain stuck in the
5113 kernel, until a thread resumes and dequeues it. That would
5114 result in the GDB CLI not reacting to Ctrl-C, not able to
5115 interrupt the program. To address this, if the current inferior
5116 no longer has any thread executing, we give the terminal to some
5117 other inferior that has at least one thread executing. */
5118 bool swap_terminal = true;
5119
5120 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5121 whether to report it to the user. */
5122 bool ignore_event = false;
7d3badc6
PA
5123
5124 for (thread_info *thread : all_non_exited_threads ())
f4836ba9 5125 {
d6cc5d98
PA
5126 if (swap_terminal && thread->executing)
5127 {
5128 if (thread->inf != curr_inf)
5129 {
5130 target_terminal::ours ();
5131
5132 switch_to_thread (thread);
5133 target_terminal::inferior ();
5134 }
5135 swap_terminal = false;
5136 }
5137
5138 if (!ignore_event
1edb66d8 5139 && (thread->executing || thread->has_pending_waitstatus ()))
f4836ba9 5140 {
7d3badc6
PA
5141 /* Either there were no unwaited-for children left in the
5142 target at some point, but there are now, or some target
5143 other than the eventing one has unwaited-for children
5144 left. Just ignore. */
1eb8556f
SM
5145 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5146 "(ignoring: found resumed)");
d6cc5d98
PA
5147
5148 ignore_event = true;
f4836ba9 5149 }
d6cc5d98
PA
5150
5151 if (ignore_event && !swap_terminal)
5152 break;
5153 }
5154
5155 if (ignore_event)
5156 {
5157 switch_to_inferior_no_thread (curr_inf);
5158 prepare_to_wait (ecs);
c4464ade 5159 return true;
f4836ba9
PA
5160 }
5161
5162 /* Go ahead and report the event. */
c4464ade 5163 return false;
f4836ba9
PA
5164}
5165
05ba8510
PA
5166/* Given an execution control state that has been freshly filled in by
5167 an event from the inferior, figure out what it means and take
5168 appropriate action.
5169
5170 The alternatives are:
5171
22bcd14b 5172 1) stop_waiting and return; to really stop and return to the
05ba8510
PA
5173 debugger.
5174
5175 2) keep_going and return; to wait for the next event (set
5176 ecs->event_thread->stepping_over_breakpoint to 1 to single step
5177 once). */
c906108c 5178
ec9499be 5179static void
595915c1 5180handle_inferior_event (struct execution_control_state *ecs)
cd0fc7c3 5181{
595915c1
TT
5182 /* Make sure that all temporary struct value objects that were
5183 created during the handling of the event get deleted at the
5184 end. */
5185 scoped_value_mark free_values;
5186
1eb8556f 5187 infrun_debug_printf ("%s", target_waitstatus_to_string (&ecs->ws).c_str ());
c29705b7 5188
28736962
PA
5189 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
5190 {
5191 /* We had an event in the inferior, but we are not interested in
5192 handling it at this level. The lower layers have already
5193 done what needs to be done, if anything.
5194
5195 One of the possible circumstances for this is when the
5196 inferior produces output for the console. The inferior has
5197 not stopped, and we are ignoring the event. Another possible
5198 circumstance is any event which the lower level knows will be
5199 reported multiple times without an intervening resume. */
28736962
PA
5200 prepare_to_wait (ecs);
5201 return;
5202 }
5203
65706a29
PA
5204 if (ecs->ws.kind == TARGET_WAITKIND_THREAD_EXITED)
5205 {
65706a29
PA
5206 prepare_to_wait (ecs);
5207 return;
5208 }
5209
0e5bf2a8 5210 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
f4836ba9
PA
5211 && handle_no_resumed (ecs))
5212 return;
0e5bf2a8 5213
5b6d1e4f
PA
5214 /* Cache the last target/ptid/waitstatus. */
5215 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
e02bc4cc 5216
ca005067 5217 /* Always clear state belonging to the previous time we stopped. */
aa7d318d 5218 stop_stack_dummy = STOP_NONE;
ca005067 5219
0e5bf2a8
PA
5220 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
5221 {
5222 /* No unwaited-for children left. IOW, all resumed children
5223 have exited. */
c4464ade 5224 stop_print_frame = false;
22bcd14b 5225 stop_waiting (ecs);
0e5bf2a8
PA
5226 return;
5227 }
5228
8c90c137 5229 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
64776a0b 5230 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
359f5fe6 5231 {
5b6d1e4f 5232 ecs->event_thread = find_thread_ptid (ecs->target, ecs->ptid);
359f5fe6
PA
5233 /* If it's a new thread, add it to the thread database. */
5234 if (ecs->event_thread == NULL)
5b6d1e4f 5235 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
c1e36e3e
PA
5236
5237 /* Disable range stepping. If the next step request could use a
5238 range, this will be end up re-enabled then. */
5239 ecs->event_thread->control.may_range_step = 0;
359f5fe6 5240 }
88ed393a
JK
5241
5242 /* Dependent on valid ECS->EVENT_THREAD. */
d8dd4d5f 5243 adjust_pc_after_break (ecs->event_thread, &ecs->ws);
88ed393a
JK
5244
5245 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5246 reinit_frame_cache ();
5247
28736962
PA
5248 breakpoint_retire_moribund ();
5249
2b009048
DJ
5250 /* First, distinguish signals caused by the debugger from signals
5251 that have to do with the program's own actions. Note that
5252 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5253 on the operating system version. Here we detect when a SIGILL or
5254 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5255 something similar for SIGSEGV, since a SIGSEGV will be generated
5256 when we're trying to execute a breakpoint instruction on a
5257 non-executable stack. This happens for call dummy breakpoints
5258 for architectures like SPARC that place call dummies on the
5259 stack. */
2b009048 5260 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
a493e3e2
PA
5261 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
5262 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
5263 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
2b009048 5264 {
00431a78 5265 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
de0a0249 5266
a01bda52 5267 if (breakpoint_inserted_here_p (regcache->aspace (),
de0a0249
UW
5268 regcache_read_pc (regcache)))
5269 {
1eb8556f 5270 infrun_debug_printf ("Treating signal as SIGTRAP");
a493e3e2 5271 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
de0a0249 5272 }
2b009048
DJ
5273 }
5274
293b3ebc 5275 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
8c90c137 5276
488f131b
JB
5277 switch (ecs->ws.kind)
5278 {
5279 case TARGET_WAITKIND_LOADED:
72d383bb
SM
5280 {
5281 context_switch (ecs);
5282 /* Ignore gracefully during startup of the inferior, as it might
5283 be the shell which has just loaded some objects, otherwise
5284 add the symbols for the newly loaded objects. Also ignore at
5285 the beginning of an attach or remote session; we will query
5286 the full list of libraries once the connection is
5287 established. */
5288
5289 stop_kind stop_soon = get_inferior_stop_soon (ecs);
5290 if (stop_soon == NO_STOP_QUIETLY)
5291 {
5292 struct regcache *regcache;
edcc5120 5293
72d383bb 5294 regcache = get_thread_regcache (ecs->event_thread);
edcc5120 5295
72d383bb 5296 handle_solib_event ();
ab04a2af 5297
72d383bb
SM
5298 ecs->event_thread->control.stop_bpstat
5299 = bpstat_stop_status (regcache->aspace (),
1edb66d8 5300 ecs->event_thread->stop_pc (),
72d383bb 5301 ecs->event_thread, &ecs->ws);
c65d6b55 5302
72d383bb 5303 if (handle_stop_requested (ecs))
94c57d6a 5304 return;
488f131b 5305
72d383bb
SM
5306 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5307 {
5308 /* A catchpoint triggered. */
5309 process_event_stop_test (ecs);
5310 return;
5311 }
55409f9d 5312
72d383bb
SM
5313 /* If requested, stop when the dynamic linker notifies
5314 gdb of events. This allows the user to get control
5315 and place breakpoints in initializer routines for
5316 dynamically loaded objects (among other things). */
1edb66d8 5317 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
72d383bb
SM
5318 if (stop_on_solib_events)
5319 {
5320 /* Make sure we print "Stopped due to solib-event" in
5321 normal_stop. */
5322 stop_print_frame = true;
b0f4b84b 5323
72d383bb
SM
5324 stop_waiting (ecs);
5325 return;
5326 }
5327 }
b0f4b84b 5328
72d383bb
SM
5329 /* If we are skipping through a shell, or through shared library
5330 loading that we aren't interested in, resume the program. If
5331 we're running the program normally, also resume. */
5332 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
5333 {
5334 /* Loading of shared libraries might have changed breakpoint
5335 addresses. Make sure new breakpoints are inserted. */
5336 if (stop_soon == NO_STOP_QUIETLY)
5337 insert_breakpoints ();
5338 resume (GDB_SIGNAL_0);
5339 prepare_to_wait (ecs);
5340 return;
5341 }
5c09a2c5 5342
72d383bb
SM
5343 /* But stop if we're attaching or setting up a remote
5344 connection. */
5345 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5346 || stop_soon == STOP_QUIETLY_REMOTE)
5347 {
5348 infrun_debug_printf ("quietly stopped");
5349 stop_waiting (ecs);
5350 return;
5351 }
5352
5353 internal_error (__FILE__, __LINE__,
5354 _("unhandled stop_soon: %d"), (int) stop_soon);
5355 }
c5aa993b 5356
488f131b 5357 case TARGET_WAITKIND_SPURIOUS:
c65d6b55
PA
5358 if (handle_stop_requested (ecs))
5359 return;
00431a78 5360 context_switch (ecs);
64ce06e4 5361 resume (GDB_SIGNAL_0);
488f131b
JB
5362 prepare_to_wait (ecs);
5363 return;
c5aa993b 5364
65706a29 5365 case TARGET_WAITKIND_THREAD_CREATED:
c65d6b55
PA
5366 if (handle_stop_requested (ecs))
5367 return;
00431a78 5368 context_switch (ecs);
65706a29
PA
5369 if (!switch_back_to_stepped_thread (ecs))
5370 keep_going (ecs);
5371 return;
5372
488f131b 5373 case TARGET_WAITKIND_EXITED:
940c3c06 5374 case TARGET_WAITKIND_SIGNALLED:
18493a00
PA
5375 {
5376 /* Depending on the system, ecs->ptid may point to a thread or
5377 to a process. On some targets, target_mourn_inferior may
5378 need to have access to the just-exited thread. That is the
5379 case of GNU/Linux's "checkpoint" support, for example.
5380 Call the switch_to_xxx routine as appropriate. */
5381 thread_info *thr = find_thread_ptid (ecs->target, ecs->ptid);
5382 if (thr != nullptr)
5383 switch_to_thread (thr);
5384 else
5385 {
5386 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5387 switch_to_inferior_no_thread (inf);
5388 }
5389 }
6c95b8df 5390 handle_vfork_child_exec_or_exit (0);
223ffa71 5391 target_terminal::ours (); /* Must do this before mourn anyway. */
488f131b 5392
0c557179
SDJ
5393 /* Clearing any previous state of convenience variables. */
5394 clear_exit_convenience_vars ();
5395
940c3c06
PA
5396 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
5397 {
5398 /* Record the exit code in the convenience variable $_exitcode, so
5399 that the user can inspect this again later. */
5400 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5401 (LONGEST) ecs->ws.value.integer);
5402
5403 /* Also record this in the inferior itself. */
5404 current_inferior ()->has_exit_code = 1;
5405 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
8cf64490 5406
98eb56a4
PA
5407 /* Support the --return-child-result option. */
5408 return_child_result_value = ecs->ws.value.integer;
5409
76727919 5410 gdb::observers::exited.notify (ecs->ws.value.integer);
940c3c06
PA
5411 }
5412 else
0c557179 5413 {
00431a78 5414 struct gdbarch *gdbarch = current_inferior ()->gdbarch;
0c557179
SDJ
5415
5416 if (gdbarch_gdb_signal_to_target_p (gdbarch))
5417 {
5418 /* Set the value of the internal variable $_exitsignal,
5419 which holds the signal uncaught by the inferior. */
5420 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5421 gdbarch_gdb_signal_to_target (gdbarch,
5422 ecs->ws.value.sig));
5423 }
5424 else
5425 {
5426 /* We don't have access to the target's method used for
5427 converting between signal numbers (GDB's internal
5428 representation <-> target's representation).
5429 Therefore, we cannot do a good job at displaying this
5430 information to the user. It's better to just warn
5431 her about it (if infrun debugging is enabled), and
5432 give up. */
1eb8556f
SM
5433 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
5434 "signal number.");
0c557179
SDJ
5435 }
5436
76727919 5437 gdb::observers::signal_exited.notify (ecs->ws.value.sig);
0c557179 5438 }
8cf64490 5439
488f131b 5440 gdb_flush (gdb_stdout);
bc1e6c81 5441 target_mourn_inferior (inferior_ptid);
c4464ade 5442 stop_print_frame = false;
22bcd14b 5443 stop_waiting (ecs);
488f131b 5444 return;
c5aa993b 5445
488f131b 5446 case TARGET_WAITKIND_FORKED:
deb3b17b 5447 case TARGET_WAITKIND_VFORKED:
e2d96639
YQ
5448 /* Check whether the inferior is displaced stepping. */
5449 {
00431a78 5450 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
ac7936df 5451 struct gdbarch *gdbarch = regcache->arch ();
c0aba012 5452 inferior *parent_inf = find_inferior_ptid (ecs->target, ecs->ptid);
e2d96639 5453
aeeb758d
JB
5454 /* If this is a fork (child gets its own address space copy)
5455 and some displaced step buffers were in use at the time of
5456 the fork, restore the displaced step buffer bytes in the
5457 child process.
5458
5459 Architectures which support displaced stepping and fork
5460 events must supply an implementation of
5461 gdbarch_displaced_step_restore_all_in_ptid. This is not
5462 enforced during gdbarch validation to support architectures
5463 which support displaced stepping but not forks. */
5464 if (ecs->ws.kind == TARGET_WAITKIND_FORKED
5465 && gdbarch_supports_displaced_stepping (gdbarch))
187b041e
SM
5466 gdbarch_displaced_step_restore_all_in_ptid
5467 (gdbarch, parent_inf, ecs->ws.value.related_pid);
c0aba012
SM
5468
5469 /* If displaced stepping is supported, and thread ecs->ptid is
5470 displaced stepping. */
00431a78 5471 if (displaced_step_in_progress_thread (ecs->event_thread))
e2d96639 5472 {
e2d96639
YQ
5473 struct regcache *child_regcache;
5474 CORE_ADDR parent_pc;
5475
5476 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5477 indicating that the displaced stepping of syscall instruction
5478 has been done. Perform cleanup for parent process here. Note
5479 that this operation also cleans up the child process for vfork,
5480 because their pages are shared. */
7def77a1 5481 displaced_step_finish (ecs->event_thread, GDB_SIGNAL_TRAP);
c2829269
PA
5482 /* Start a new step-over in another thread if there's one
5483 that needs it. */
5484 start_step_over ();
e2d96639 5485
e2d96639
YQ
5486 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5487 the child's PC is also within the scratchpad. Set the child's PC
5488 to the parent's PC value, which has already been fixed up.
5489 FIXME: we use the parent's aspace here, although we're touching
5490 the child, because the child hasn't been added to the inferior
5491 list yet at this point. */
5492
5493 child_regcache
5b6d1e4f
PA
5494 = get_thread_arch_aspace_regcache (parent_inf->process_target (),
5495 ecs->ws.value.related_pid,
e2d96639
YQ
5496 gdbarch,
5497 parent_inf->aspace);
5498 /* Read PC value of parent process. */
5499 parent_pc = regcache_read_pc (regcache);
5500
136821d9
SM
5501 displaced_debug_printf ("write child pc from %s to %s",
5502 paddress (gdbarch,
5503 regcache_read_pc (child_regcache)),
5504 paddress (gdbarch, parent_pc));
e2d96639
YQ
5505
5506 regcache_write_pc (child_regcache, parent_pc);
5507 }
5508 }
5509
00431a78 5510 context_switch (ecs);
5a2901d9 5511
b242c3c2
PA
5512 /* Immediately detach breakpoints from the child before there's
5513 any chance of letting the user delete breakpoints from the
5514 breakpoint lists. If we don't do this early, it's easy to
5515 leave left over traps in the child, vis: "break foo; catch
5516 fork; c; <fork>; del; c; <child calls foo>". We only follow
5517 the fork on the last `continue', and by that time the
5518 breakpoint at "foo" is long gone from the breakpoint table.
5519 If we vforked, then we don't need to unpatch here, since both
5520 parent and child are sharing the same memory pages; we'll
5521 need to unpatch at follow/detach time instead to be certain
5522 that new breakpoints added between catchpoint hit time and
5523 vfork follow are detached. */
5524 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
5525 {
b242c3c2
PA
5526 /* This won't actually modify the breakpoint list, but will
5527 physically remove the breakpoints from the child. */
d80ee84f 5528 detach_breakpoints (ecs->ws.value.related_pid);
b242c3c2
PA
5529 }
5530
34b7e8a6 5531 delete_just_stopped_threads_single_step_breakpoints ();
d03285ec 5532
e58b0e63
PA
5533 /* In case the event is caught by a catchpoint, remember that
5534 the event is to be followed at the next resume of the thread,
5535 and not immediately. */
5536 ecs->event_thread->pending_follow = ecs->ws;
5537
1edb66d8
SM
5538 ecs->event_thread->set_stop_pc
5539 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
675bf4cb 5540
16c381f0 5541 ecs->event_thread->control.stop_bpstat
a01bda52 5542 = bpstat_stop_status (get_current_regcache ()->aspace (),
1edb66d8 5543 ecs->event_thread->stop_pc (),
f2ffa92b 5544 ecs->event_thread, &ecs->ws);
675bf4cb 5545
c65d6b55
PA
5546 if (handle_stop_requested (ecs))
5547 return;
5548
ce12b012
PA
5549 /* If no catchpoint triggered for this, then keep going. Note
5550 that we're interested in knowing the bpstat actually causes a
5551 stop, not just if it may explain the signal. Software
5552 watchpoints, for example, always appear in the bpstat. */
5553 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5554 {
5ab2fbf1 5555 bool follow_child
3e43a32a 5556 = (follow_fork_mode_string == follow_fork_mode_child);
e58b0e63 5557
1edb66d8 5558 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
e58b0e63 5559
5b6d1e4f
PA
5560 process_stratum_target *targ
5561 = ecs->event_thread->inf->process_target ();
5562
5ab2fbf1 5563 bool should_resume = follow_fork ();
e58b0e63 5564
5b6d1e4f
PA
5565 /* Note that one of these may be an invalid pointer,
5566 depending on detach_fork. */
00431a78 5567 thread_info *parent = ecs->event_thread;
5b6d1e4f
PA
5568 thread_info *child
5569 = find_thread_ptid (targ, ecs->ws.value.related_pid);
6c95b8df 5570
a2077e25
PA
5571 /* At this point, the parent is marked running, and the
5572 child is marked stopped. */
5573
5574 /* If not resuming the parent, mark it stopped. */
5575 if (follow_child && !detach_fork && !non_stop && !sched_multi)
00431a78 5576 parent->set_running (false);
a2077e25
PA
5577
5578 /* If resuming the child, mark it running. */
5579 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
00431a78 5580 child->set_running (true);
a2077e25 5581
6c95b8df 5582 /* In non-stop mode, also resume the other branch. */
fbea99ea
PA
5583 if (!detach_fork && (non_stop
5584 || (sched_multi && target_is_non_stop_p ())))
6c95b8df
PA
5585 {
5586 if (follow_child)
5587 switch_to_thread (parent);
5588 else
5589 switch_to_thread (child);
5590
5591 ecs->event_thread = inferior_thread ();
5592 ecs->ptid = inferior_ptid;
5593 keep_going (ecs);
5594 }
5595
5596 if (follow_child)
5597 switch_to_thread (child);
5598 else
5599 switch_to_thread (parent);
5600
e58b0e63
PA
5601 ecs->event_thread = inferior_thread ();
5602 ecs->ptid = inferior_ptid;
5603
5604 if (should_resume)
5605 keep_going (ecs);
5606 else
22bcd14b 5607 stop_waiting (ecs);
04e68871
DJ
5608 return;
5609 }
94c57d6a
PA
5610 process_event_stop_test (ecs);
5611 return;
488f131b 5612
6c95b8df
PA
5613 case TARGET_WAITKIND_VFORK_DONE:
5614 /* Done with the shared memory region. Re-insert breakpoints in
5615 the parent, and keep going. */
5616
00431a78 5617 context_switch (ecs);
6c95b8df
PA
5618
5619 current_inferior ()->waiting_for_vfork_done = 0;
56710373 5620 current_inferior ()->pspace->breakpoints_not_allowed = 0;
c65d6b55
PA
5621
5622 if (handle_stop_requested (ecs))
5623 return;
5624
6c95b8df
PA
5625 /* This also takes care of reinserting breakpoints in the
5626 previously locked inferior. */
5627 keep_going (ecs);
5628 return;
5629
488f131b 5630 case TARGET_WAITKIND_EXECD:
488f131b 5631
cbd2b4e3
PA
5632 /* Note we can't read registers yet (the stop_pc), because we
5633 don't yet know the inferior's post-exec architecture.
5634 'stop_pc' is explicitly read below instead. */
00431a78 5635 switch_to_thread_no_regs (ecs->event_thread);
5a2901d9 5636
6c95b8df
PA
5637 /* Do whatever is necessary to the parent branch of the vfork. */
5638 handle_vfork_child_exec_or_exit (1);
5639
795e548f 5640 /* This causes the eventpoints and symbol table to be reset.
dda83cd7
SM
5641 Must do this now, before trying to determine whether to
5642 stop. */
71b43ef8 5643 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
795e548f 5644
17d8546e
DB
5645 /* In follow_exec we may have deleted the original thread and
5646 created a new one. Make sure that the event thread is the
5647 execd thread for that case (this is a nop otherwise). */
5648 ecs->event_thread = inferior_thread ();
5649
1edb66d8
SM
5650 ecs->event_thread->set_stop_pc
5651 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
ecdc3a72 5652
16c381f0 5653 ecs->event_thread->control.stop_bpstat
a01bda52 5654 = bpstat_stop_status (get_current_regcache ()->aspace (),
1edb66d8 5655 ecs->event_thread->stop_pc (),
f2ffa92b 5656 ecs->event_thread, &ecs->ws);
795e548f 5657
71b43ef8
PA
5658 /* Note that this may be referenced from inside
5659 bpstat_stop_status above, through inferior_has_execd. */
5660 xfree (ecs->ws.value.execd_pathname);
5661 ecs->ws.value.execd_pathname = NULL;
5662
c65d6b55
PA
5663 if (handle_stop_requested (ecs))
5664 return;
5665
04e68871 5666 /* If no catchpoint triggered for this, then keep going. */
ce12b012 5667 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5668 {
1edb66d8 5669 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
04e68871
DJ
5670 keep_going (ecs);
5671 return;
5672 }
94c57d6a
PA
5673 process_event_stop_test (ecs);
5674 return;
488f131b 5675
b4dc5ffa 5676 /* Be careful not to try to gather much state about a thread
dda83cd7 5677 that's in a syscall. It's frequently a losing proposition. */
488f131b 5678 case TARGET_WAITKIND_SYSCALL_ENTRY:
1777feb0 5679 /* Getting the current syscall number. */
94c57d6a
PA
5680 if (handle_syscall_event (ecs) == 0)
5681 process_event_stop_test (ecs);
5682 return;
c906108c 5683
488f131b 5684 /* Before examining the threads further, step this thread to
dda83cd7
SM
5685 get it entirely out of the syscall. (We get notice of the
5686 event when the thread is just on the verge of exiting a
5687 syscall. Stepping one instruction seems to get it back
5688 into user code.) */
488f131b 5689 case TARGET_WAITKIND_SYSCALL_RETURN:
94c57d6a
PA
5690 if (handle_syscall_event (ecs) == 0)
5691 process_event_stop_test (ecs);
5692 return;
c906108c 5693
488f131b 5694 case TARGET_WAITKIND_STOPPED:
4f5d7f63
PA
5695 handle_signal_stop (ecs);
5696 return;
c906108c 5697
b2175913
MS
5698 case TARGET_WAITKIND_NO_HISTORY:
5699 /* Reverse execution: target ran out of history info. */
eab402df 5700
d1988021 5701 /* Switch to the stopped thread. */
00431a78 5702 context_switch (ecs);
1eb8556f 5703 infrun_debug_printf ("stopped");
d1988021 5704
34b7e8a6 5705 delete_just_stopped_threads_single_step_breakpoints ();
1edb66d8
SM
5706 ecs->event_thread->set_stop_pc
5707 (regcache_read_pc (get_thread_regcache (inferior_thread ())));
c65d6b55
PA
5708
5709 if (handle_stop_requested (ecs))
5710 return;
5711
76727919 5712 gdb::observers::no_history.notify ();
22bcd14b 5713 stop_waiting (ecs);
b2175913 5714 return;
488f131b 5715 }
4f5d7f63
PA
5716}
5717
372316f1
PA
5718/* Restart threads back to what they were trying to do back when we
5719 paused them for an in-line step-over. The EVENT_THREAD thread is
5720 ignored. */
4d9d9d04
PA
5721
5722static void
372316f1
PA
5723restart_threads (struct thread_info *event_thread)
5724{
372316f1
PA
5725 /* In case the instruction just stepped spawned a new thread. */
5726 update_thread_list ();
5727
08036331 5728 for (thread_info *tp : all_non_exited_threads ())
372316f1 5729 {
ac7d717c
PA
5730 if (tp->inf->detaching)
5731 {
5732 infrun_debug_printf ("restart threads: [%s] inferior detaching",
5733 target_pid_to_str (tp->ptid).c_str ());
5734 continue;
5735 }
5736
f3f8ece4
PA
5737 switch_to_thread_no_regs (tp);
5738
372316f1
PA
5739 if (tp == event_thread)
5740 {
1eb8556f
SM
5741 infrun_debug_printf ("restart threads: [%s] is event thread",
5742 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5743 continue;
5744 }
5745
5746 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5747 {
1eb8556f
SM
5748 infrun_debug_printf ("restart threads: [%s] not meant to be running",
5749 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5750 continue;
5751 }
5752
7846f3aa 5753 if (tp->resumed ())
372316f1 5754 {
1eb8556f
SM
5755 infrun_debug_printf ("restart threads: [%s] resumed",
5756 target_pid_to_str (tp->ptid).c_str ());
1edb66d8 5757 gdb_assert (tp->executing || tp->has_pending_waitstatus ());
372316f1
PA
5758 continue;
5759 }
5760
5761 if (thread_is_in_step_over_chain (tp))
5762 {
1eb8556f
SM
5763 infrun_debug_printf ("restart threads: [%s] needs step-over",
5764 target_pid_to_str (tp->ptid).c_str ());
7846f3aa 5765 gdb_assert (!tp->resumed ());
372316f1
PA
5766 continue;
5767 }
5768
5769
1edb66d8 5770 if (tp->has_pending_waitstatus ())
372316f1 5771 {
1eb8556f
SM
5772 infrun_debug_printf ("restart threads: [%s] has pending status",
5773 target_pid_to_str (tp->ptid).c_str ());
7846f3aa 5774 tp->set_resumed (true);
372316f1
PA
5775 continue;
5776 }
5777
c65d6b55
PA
5778 gdb_assert (!tp->stop_requested);
5779
372316f1
PA
5780 /* If some thread needs to start a step-over at this point, it
5781 should still be in the step-over queue, and thus skipped
5782 above. */
5783 if (thread_still_needs_step_over (tp))
5784 {
5785 internal_error (__FILE__, __LINE__,
5786 "thread [%s] needs a step-over, but not in "
5787 "step-over queue\n",
a068643d 5788 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5789 }
5790
5791 if (currently_stepping (tp))
5792 {
1eb8556f
SM
5793 infrun_debug_printf ("restart threads: [%s] was stepping",
5794 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5795 keep_going_stepped_thread (tp);
5796 }
5797 else
5798 {
5799 struct execution_control_state ecss;
5800 struct execution_control_state *ecs = &ecss;
5801
1eb8556f
SM
5802 infrun_debug_printf ("restart threads: [%s] continuing",
5803 target_pid_to_str (tp->ptid).c_str ());
372316f1 5804 reset_ecs (ecs, tp);
00431a78 5805 switch_to_thread (tp);
372316f1
PA
5806 keep_going_pass_signal (ecs);
5807 }
5808 }
5809}
5810
5811/* Callback for iterate_over_threads. Find a resumed thread that has
5812 a pending waitstatus. */
5813
5814static int
5815resumed_thread_with_pending_status (struct thread_info *tp,
5816 void *arg)
5817{
1edb66d8 5818 return tp->resumed () && tp->has_pending_waitstatus ();
372316f1
PA
5819}
5820
5821/* Called when we get an event that may finish an in-line or
5822 out-of-line (displaced stepping) step-over started previously.
5823 Return true if the event is processed and we should go back to the
5824 event loop; false if the caller should continue processing the
5825 event. */
5826
5827static int
4d9d9d04
PA
5828finish_step_over (struct execution_control_state *ecs)
5829{
1edb66d8 5830 displaced_step_finish (ecs->event_thread, ecs->event_thread->stop_signal ());
4d9d9d04 5831
c4464ade 5832 bool had_step_over_info = step_over_info_valid_p ();
372316f1
PA
5833
5834 if (had_step_over_info)
4d9d9d04
PA
5835 {
5836 /* If we're stepping over a breakpoint with all threads locked,
5837 then only the thread that was stepped should be reporting
5838 back an event. */
5839 gdb_assert (ecs->event_thread->control.trap_expected);
5840
c65d6b55 5841 clear_step_over_info ();
4d9d9d04
PA
5842 }
5843
fbea99ea 5844 if (!target_is_non_stop_p ())
372316f1 5845 return 0;
4d9d9d04
PA
5846
5847 /* Start a new step-over in another thread if there's one that
5848 needs it. */
5849 start_step_over ();
372316f1
PA
5850
5851 /* If we were stepping over a breakpoint before, and haven't started
5852 a new in-line step-over sequence, then restart all other threads
5853 (except the event thread). We can't do this in all-stop, as then
5854 e.g., we wouldn't be able to issue any other remote packet until
5855 these other threads stop. */
5856 if (had_step_over_info && !step_over_info_valid_p ())
5857 {
5858 struct thread_info *pending;
5859
5860 /* If we only have threads with pending statuses, the restart
5861 below won't restart any thread and so nothing re-inserts the
5862 breakpoint we just stepped over. But we need it inserted
5863 when we later process the pending events, otherwise if
5864 another thread has a pending event for this breakpoint too,
5865 we'd discard its event (because the breakpoint that
5866 originally caused the event was no longer inserted). */
00431a78 5867 context_switch (ecs);
372316f1
PA
5868 insert_breakpoints ();
5869
5870 restart_threads (ecs->event_thread);
5871
5872 /* If we have events pending, go through handle_inferior_event
5873 again, picking up a pending event at random. This avoids
5874 thread starvation. */
5875
5876 /* But not if we just stepped over a watchpoint in order to let
5877 the instruction execute so we can evaluate its expression.
5878 The set of watchpoints that triggered is recorded in the
5879 breakpoint objects themselves (see bp->watchpoint_triggered).
5880 If we processed another event first, that other event could
5881 clobber this info. */
5882 if (ecs->event_thread->stepping_over_watchpoint)
5883 return 0;
5884
5885 pending = iterate_over_threads (resumed_thread_with_pending_status,
5886 NULL);
5887 if (pending != NULL)
5888 {
5889 struct thread_info *tp = ecs->event_thread;
5890 struct regcache *regcache;
5891
1eb8556f
SM
5892 infrun_debug_printf ("found resumed threads with "
5893 "pending events, saving status");
372316f1
PA
5894
5895 gdb_assert (pending != tp);
5896
5897 /* Record the event thread's event for later. */
5898 save_waitstatus (tp, &ecs->ws);
5899 /* This was cleared early, by handle_inferior_event. Set it
5900 so this pending event is considered by
5901 do_target_wait. */
7846f3aa 5902 tp->set_resumed (true);
372316f1
PA
5903
5904 gdb_assert (!tp->executing);
5905
00431a78 5906 regcache = get_thread_regcache (tp);
1edb66d8 5907 tp->set_stop_pc (regcache_read_pc (regcache));
372316f1 5908
1eb8556f
SM
5909 infrun_debug_printf ("saved stop_pc=%s for %s "
5910 "(currently_stepping=%d)",
1edb66d8 5911 paddress (target_gdbarch (), tp->stop_pc ()),
1eb8556f
SM
5912 target_pid_to_str (tp->ptid).c_str (),
5913 currently_stepping (tp));
372316f1
PA
5914
5915 /* This in-line step-over finished; clear this so we won't
5916 start a new one. This is what handle_signal_stop would
5917 do, if we returned false. */
5918 tp->stepping_over_breakpoint = 0;
5919
5920 /* Wake up the event loop again. */
5921 mark_async_event_handler (infrun_async_inferior_event_token);
5922
5923 prepare_to_wait (ecs);
5924 return 1;
5925 }
5926 }
5927
5928 return 0;
4d9d9d04
PA
5929}
5930
4f5d7f63
PA
5931/* Come here when the program has stopped with a signal. */
5932
5933static void
5934handle_signal_stop (struct execution_control_state *ecs)
5935{
5936 struct frame_info *frame;
5937 struct gdbarch *gdbarch;
5938 int stopped_by_watchpoint;
5939 enum stop_kind stop_soon;
5940 int random_signal;
c906108c 5941
f0407826
DE
5942 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
5943
1edb66d8 5944 ecs->event_thread->set_stop_signal (ecs->ws.value.sig);
c65d6b55 5945
f0407826
DE
5946 /* Do we need to clean up the state of a thread that has
5947 completed a displaced single-step? (Doing so usually affects
5948 the PC, so do it here, before we set stop_pc.) */
372316f1
PA
5949 if (finish_step_over (ecs))
5950 return;
f0407826
DE
5951
5952 /* If we either finished a single-step or hit a breakpoint, but
5953 the user wanted this thread to be stopped, pretend we got a
5954 SIG0 (generic unsignaled stop). */
5955 if (ecs->event_thread->stop_requested
1edb66d8
SM
5956 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
5957 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
237fc4c9 5958
1edb66d8
SM
5959 ecs->event_thread->set_stop_pc
5960 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
488f131b 5961
2ab76a18
PA
5962 context_switch (ecs);
5963
5964 if (deprecated_context_hook)
5965 deprecated_context_hook (ecs->event_thread->global_num);
5966
527159b7 5967 if (debug_infrun)
237fc4c9 5968 {
00431a78 5969 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
b926417a 5970 struct gdbarch *reg_gdbarch = regcache->arch ();
7f82dfc7 5971
1edb66d8
SM
5972 infrun_debug_printf
5973 ("stop_pc=%s", paddress (reg_gdbarch, ecs->event_thread->stop_pc ()));
d92524f1 5974 if (target_stopped_by_watchpoint ())
237fc4c9 5975 {
dda83cd7 5976 CORE_ADDR addr;
abbb1732 5977
1eb8556f 5978 infrun_debug_printf ("stopped by watchpoint");
237fc4c9 5979
328d42d8
SM
5980 if (target_stopped_data_address (current_inferior ()->top_target (),
5981 &addr))
1eb8556f 5982 infrun_debug_printf ("stopped data address=%s",
dda83cd7
SM
5983 paddress (reg_gdbarch, addr));
5984 else
1eb8556f 5985 infrun_debug_printf ("(no data address available)");
237fc4c9
PA
5986 }
5987 }
527159b7 5988
36fa8042
PA
5989 /* This is originated from start_remote(), start_inferior() and
5990 shared libraries hook functions. */
00431a78 5991 stop_soon = get_inferior_stop_soon (ecs);
36fa8042
PA
5992 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
5993 {
1eb8556f 5994 infrun_debug_printf ("quietly stopped");
c4464ade 5995 stop_print_frame = true;
22bcd14b 5996 stop_waiting (ecs);
36fa8042
PA
5997 return;
5998 }
5999
36fa8042
PA
6000 /* This originates from attach_command(). We need to overwrite
6001 the stop_signal here, because some kernels don't ignore a
6002 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6003 See more comments in inferior.h. On the other hand, if we
6004 get a non-SIGSTOP, report it to the user - assume the backend
6005 will handle the SIGSTOP if it should show up later.
6006
6007 Also consider that the attach is complete when we see a
6008 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6009 target extended-remote report it instead of a SIGSTOP
6010 (e.g. gdbserver). We already rely on SIGTRAP being our
6011 signal, so this is no exception.
6012
6013 Also consider that the attach is complete when we see a
6014 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6015 the target to stop all threads of the inferior, in case the
6016 low level attach operation doesn't stop them implicitly. If
6017 they weren't stopped implicitly, then the stub will report a
6018 GDB_SIGNAL_0, meaning: stopped for no particular reason
6019 other than GDB's request. */
6020 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
1edb66d8
SM
6021 && (ecs->event_thread->stop_signal () == GDB_SIGNAL_STOP
6022 || ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6023 || ecs->event_thread->stop_signal () == GDB_SIGNAL_0))
36fa8042 6024 {
c4464ade 6025 stop_print_frame = true;
22bcd14b 6026 stop_waiting (ecs);
1edb66d8 6027 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
36fa8042
PA
6028 return;
6029 }
6030
568d6575
UW
6031 /* At this point, get hold of the now-current thread's frame. */
6032 frame = get_current_frame ();
6033 gdbarch = get_frame_arch (frame);
6034
2adfaa28 6035 /* Pull the single step breakpoints out of the target. */
1edb66d8 6036 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
488f131b 6037 {
af48d08f 6038 struct regcache *regcache;
af48d08f 6039 CORE_ADDR pc;
2adfaa28 6040
00431a78 6041 regcache = get_thread_regcache (ecs->event_thread);
8b86c959
YQ
6042 const address_space *aspace = regcache->aspace ();
6043
af48d08f 6044 pc = regcache_read_pc (regcache);
34b7e8a6 6045
af48d08f
PA
6046 /* However, before doing so, if this single-step breakpoint was
6047 actually for another thread, set this thread up for moving
6048 past it. */
6049 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
6050 aspace, pc))
6051 {
6052 if (single_step_breakpoint_inserted_here_p (aspace, pc))
2adfaa28 6053 {
1eb8556f
SM
6054 infrun_debug_printf ("[%s] hit another thread's single-step "
6055 "breakpoint",
6056 target_pid_to_str (ecs->ptid).c_str ());
af48d08f
PA
6057 ecs->hit_singlestep_breakpoint = 1;
6058 }
6059 }
6060 else
6061 {
1eb8556f
SM
6062 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6063 target_pid_to_str (ecs->ptid).c_str ());
2adfaa28 6064 }
488f131b 6065 }
af48d08f 6066 delete_just_stopped_threads_single_step_breakpoints ();
c906108c 6067
1edb66d8 6068 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
963f9c80
PA
6069 && ecs->event_thread->control.trap_expected
6070 && ecs->event_thread->stepping_over_watchpoint)
d983da9c
DJ
6071 stopped_by_watchpoint = 0;
6072 else
6073 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
6074
6075 /* If necessary, step over this watchpoint. We'll be back to display
6076 it in a moment. */
6077 if (stopped_by_watchpoint
9aed480c 6078 && (target_have_steppable_watchpoint ()
568d6575 6079 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
488f131b 6080 {
488f131b 6081 /* At this point, we are stopped at an instruction which has
dda83cd7
SM
6082 attempted to write to a piece of memory under control of
6083 a watchpoint. The instruction hasn't actually executed
6084 yet. If we were to evaluate the watchpoint expression
6085 now, we would get the old value, and therefore no change
6086 would seem to have occurred.
6087
6088 In order to make watchpoints work `right', we really need
6089 to complete the memory write, and then evaluate the
6090 watchpoint expression. We do this by single-stepping the
d983da9c
DJ
6091 target.
6092
7f89fd65 6093 It may not be necessary to disable the watchpoint to step over
d983da9c
DJ
6094 it. For example, the PA can (with some kernel cooperation)
6095 single step over a watchpoint without disabling the watchpoint.
6096
6097 It is far more common to need to disable a watchpoint to step
6098 the inferior over it. If we have non-steppable watchpoints,
6099 we must disable the current watchpoint; it's simplest to
963f9c80
PA
6100 disable all watchpoints.
6101
6102 Any breakpoint at PC must also be stepped over -- if there's
6103 one, it will have already triggered before the watchpoint
6104 triggered, and we either already reported it to the user, or
6105 it didn't cause a stop and we called keep_going. In either
6106 case, if there was a breakpoint at PC, we must be trying to
6107 step past it. */
6108 ecs->event_thread->stepping_over_watchpoint = 1;
6109 keep_going (ecs);
488f131b
JB
6110 return;
6111 }
6112
4e1c45ea 6113 ecs->event_thread->stepping_over_breakpoint = 0;
963f9c80 6114 ecs->event_thread->stepping_over_watchpoint = 0;
16c381f0
JK
6115 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
6116 ecs->event_thread->control.stop_step = 0;
c4464ade 6117 stop_print_frame = true;
488f131b 6118 stopped_by_random_signal = 0;
ddfe970e 6119 bpstat stop_chain = NULL;
488f131b 6120
edb3359d
DJ
6121 /* Hide inlined functions starting here, unless we just performed stepi or
6122 nexti. After stepi and nexti, always show the innermost frame (not any
6123 inline function call sites). */
16c381f0 6124 if (ecs->event_thread->control.step_range_end != 1)
0574c78f 6125 {
00431a78
PA
6126 const address_space *aspace
6127 = get_thread_regcache (ecs->event_thread)->aspace ();
0574c78f
GB
6128
6129 /* skip_inline_frames is expensive, so we avoid it if we can
6130 determine that the address is one where functions cannot have
6131 been inlined. This improves performance with inferiors that
6132 load a lot of shared libraries, because the solib event
6133 breakpoint is defined as the address of a function (i.e. not
6134 inline). Note that we have to check the previous PC as well
6135 as the current one to catch cases when we have just
6136 single-stepped off a breakpoint prior to reinstating it.
6137 Note that we're assuming that the code we single-step to is
6138 not inline, but that's not definitive: there's nothing
6139 preventing the event breakpoint function from containing
6140 inlined code, and the single-step ending up there. If the
6141 user had set a breakpoint on that inlined code, the missing
6142 skip_inline_frames call would break things. Fortunately
6143 that's an extremely unlikely scenario. */
f2ffa92b 6144 if (!pc_at_non_inline_function (aspace,
1edb66d8 6145 ecs->event_thread->stop_pc (),
f2ffa92b 6146 &ecs->ws)
1edb66d8 6147 && !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
a210c238
MR
6148 && ecs->event_thread->control.trap_expected
6149 && pc_at_non_inline_function (aspace,
6150 ecs->event_thread->prev_pc,
09ac7c10 6151 &ecs->ws)))
1c5a993e 6152 {
f2ffa92b 6153 stop_chain = build_bpstat_chain (aspace,
1edb66d8 6154 ecs->event_thread->stop_pc (),
f2ffa92b 6155 &ecs->ws);
00431a78 6156 skip_inline_frames (ecs->event_thread, stop_chain);
1c5a993e
MR
6157
6158 /* Re-fetch current thread's frame in case that invalidated
6159 the frame cache. */
6160 frame = get_current_frame ();
6161 gdbarch = get_frame_arch (frame);
6162 }
0574c78f 6163 }
edb3359d 6164
1edb66d8 6165 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
16c381f0 6166 && ecs->event_thread->control.trap_expected
568d6575 6167 && gdbarch_single_step_through_delay_p (gdbarch)
4e1c45ea 6168 && currently_stepping (ecs->event_thread))
3352ef37 6169 {
b50d7442 6170 /* We're trying to step off a breakpoint. Turns out that we're
3352ef37 6171 also on an instruction that needs to be stepped multiple
1777feb0 6172 times before it's been fully executing. E.g., architectures
3352ef37
AC
6173 with a delay slot. It needs to be stepped twice, once for
6174 the instruction and once for the delay slot. */
6175 int step_through_delay
568d6575 6176 = gdbarch_single_step_through_delay (gdbarch, frame);
abbb1732 6177
1eb8556f
SM
6178 if (step_through_delay)
6179 infrun_debug_printf ("step through delay");
6180
16c381f0
JK
6181 if (ecs->event_thread->control.step_range_end == 0
6182 && step_through_delay)
3352ef37
AC
6183 {
6184 /* The user issued a continue when stopped at a breakpoint.
6185 Set up for another trap and get out of here. */
dda83cd7
SM
6186 ecs->event_thread->stepping_over_breakpoint = 1;
6187 keep_going (ecs);
6188 return;
3352ef37
AC
6189 }
6190 else if (step_through_delay)
6191 {
6192 /* The user issued a step when stopped at a breakpoint.
6193 Maybe we should stop, maybe we should not - the delay
6194 slot *might* correspond to a line of source. In any
ca67fcb8
VP
6195 case, don't decide that here, just set
6196 ecs->stepping_over_breakpoint, making sure we
6197 single-step again before breakpoints are re-inserted. */
4e1c45ea 6198 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
6199 }
6200 }
6201
ab04a2af
TT
6202 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
6203 handles this event. */
6204 ecs->event_thread->control.stop_bpstat
a01bda52 6205 = bpstat_stop_status (get_current_regcache ()->aspace (),
1edb66d8 6206 ecs->event_thread->stop_pc (),
f2ffa92b 6207 ecs->event_thread, &ecs->ws, stop_chain);
db82e815 6208
ab04a2af
TT
6209 /* Following in case break condition called a
6210 function. */
c4464ade 6211 stop_print_frame = true;
73dd234f 6212
ab04a2af
TT
6213 /* This is where we handle "moribund" watchpoints. Unlike
6214 software breakpoints traps, hardware watchpoint traps are
6215 always distinguishable from random traps. If no high-level
6216 watchpoint is associated with the reported stop data address
6217 anymore, then the bpstat does not explain the signal ---
6218 simply make sure to ignore it if `stopped_by_watchpoint' is
6219 set. */
6220
1edb66d8 6221 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
47591c29 6222 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
427cd150 6223 GDB_SIGNAL_TRAP)
ab04a2af 6224 && stopped_by_watchpoint)
1eb8556f
SM
6225 {
6226 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
6227 "ignoring");
6228 }
73dd234f 6229
bac7d97b 6230 /* NOTE: cagney/2003-03-29: These checks for a random signal
ab04a2af
TT
6231 at one stage in the past included checks for an inferior
6232 function call's call dummy's return breakpoint. The original
6233 comment, that went with the test, read:
03cebad2 6234
ab04a2af
TT
6235 ``End of a stack dummy. Some systems (e.g. Sony news) give
6236 another signal besides SIGTRAP, so check here as well as
6237 above.''
73dd234f 6238
ab04a2af
TT
6239 If someone ever tries to get call dummys on a
6240 non-executable stack to work (where the target would stop
6241 with something like a SIGSEGV), then those tests might need
6242 to be re-instated. Given, however, that the tests were only
6243 enabled when momentary breakpoints were not being used, I
6244 suspect that it won't be the case.
488f131b 6245
ab04a2af
TT
6246 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6247 be necessary for call dummies on a non-executable stack on
6248 SPARC. */
488f131b 6249
bac7d97b 6250 /* See if the breakpoints module can explain the signal. */
47591c29
PA
6251 random_signal
6252 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
1edb66d8 6253 ecs->event_thread->stop_signal ());
bac7d97b 6254
1cf4d951
PA
6255 /* Maybe this was a trap for a software breakpoint that has since
6256 been removed. */
6257 if (random_signal && target_stopped_by_sw_breakpoint ())
6258 {
5133a315 6259 if (gdbarch_program_breakpoint_here_p (gdbarch,
1edb66d8 6260 ecs->event_thread->stop_pc ()))
1cf4d951
PA
6261 {
6262 struct regcache *regcache;
6263 int decr_pc;
6264
6265 /* Re-adjust PC to what the program would see if GDB was not
6266 debugging it. */
00431a78 6267 regcache = get_thread_regcache (ecs->event_thread);
527a273a 6268 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
1cf4d951
PA
6269 if (decr_pc != 0)
6270 {
07036511
TT
6271 gdb::optional<scoped_restore_tmpl<int>>
6272 restore_operation_disable;
1cf4d951
PA
6273
6274 if (record_full_is_used ())
07036511
TT
6275 restore_operation_disable.emplace
6276 (record_full_gdb_operation_disable_set ());
1cf4d951 6277
f2ffa92b 6278 regcache_write_pc (regcache,
1edb66d8 6279 ecs->event_thread->stop_pc () + decr_pc);
1cf4d951
PA
6280 }
6281 }
6282 else
6283 {
6284 /* A delayed software breakpoint event. Ignore the trap. */
1eb8556f 6285 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
1cf4d951
PA
6286 random_signal = 0;
6287 }
6288 }
6289
6290 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6291 has since been removed. */
6292 if (random_signal && target_stopped_by_hw_breakpoint ())
6293 {
6294 /* A delayed hardware breakpoint event. Ignore the trap. */
1eb8556f
SM
6295 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
6296 "trap, ignoring");
1cf4d951
PA
6297 random_signal = 0;
6298 }
6299
bac7d97b
PA
6300 /* If not, perhaps stepping/nexting can. */
6301 if (random_signal)
1edb66d8 6302 random_signal = !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
bac7d97b 6303 && currently_stepping (ecs->event_thread));
ab04a2af 6304
2adfaa28
PA
6305 /* Perhaps the thread hit a single-step breakpoint of _another_
6306 thread. Single-step breakpoints are transparent to the
6307 breakpoints module. */
6308 if (random_signal)
6309 random_signal = !ecs->hit_singlestep_breakpoint;
6310
bac7d97b
PA
6311 /* No? Perhaps we got a moribund watchpoint. */
6312 if (random_signal)
6313 random_signal = !stopped_by_watchpoint;
ab04a2af 6314
c65d6b55
PA
6315 /* Always stop if the user explicitly requested this thread to
6316 remain stopped. */
6317 if (ecs->event_thread->stop_requested)
6318 {
6319 random_signal = 1;
1eb8556f 6320 infrun_debug_printf ("user-requested stop");
c65d6b55
PA
6321 }
6322
488f131b
JB
6323 /* For the program's own signals, act according to
6324 the signal handling tables. */
6325
ce12b012 6326 if (random_signal)
488f131b
JB
6327 {
6328 /* Signal not for debugging purposes. */
1edb66d8 6329 enum gdb_signal stop_signal = ecs->event_thread->stop_signal ();
488f131b 6330
1eb8556f
SM
6331 infrun_debug_printf ("random signal (%s)",
6332 gdb_signal_to_symbol_string (stop_signal));
527159b7 6333
488f131b
JB
6334 stopped_by_random_signal = 1;
6335
252fbfc8
PA
6336 /* Always stop on signals if we're either just gaining control
6337 of the program, or the user explicitly requested this thread
6338 to remain stopped. */
d6b48e9c 6339 if (stop_soon != NO_STOP_QUIETLY
252fbfc8 6340 || ecs->event_thread->stop_requested
1edb66d8 6341 || signal_stop_state (ecs->event_thread->stop_signal ()))
488f131b 6342 {
22bcd14b 6343 stop_waiting (ecs);
488f131b
JB
6344 return;
6345 }
b57bacec
PA
6346
6347 /* Notify observers the signal has "handle print" set. Note we
6348 returned early above if stopping; normal_stop handles the
6349 printing in that case. */
1edb66d8 6350 if (signal_print[ecs->event_thread->stop_signal ()])
b57bacec
PA
6351 {
6352 /* The signal table tells us to print about this signal. */
223ffa71 6353 target_terminal::ours_for_output ();
1edb66d8 6354 gdb::observers::signal_received.notify (ecs->event_thread->stop_signal ());
223ffa71 6355 target_terminal::inferior ();
b57bacec 6356 }
488f131b
JB
6357
6358 /* Clear the signal if it should not be passed. */
1edb66d8
SM
6359 if (signal_program[ecs->event_thread->stop_signal ()] == 0)
6360 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
488f131b 6361
1edb66d8 6362 if (ecs->event_thread->prev_pc == ecs->event_thread->stop_pc ()
16c381f0 6363 && ecs->event_thread->control.trap_expected
8358c15c 6364 && ecs->event_thread->control.step_resume_breakpoint == NULL)
68f53502
AC
6365 {
6366 /* We were just starting a new sequence, attempting to
6367 single-step off of a breakpoint and expecting a SIGTRAP.
237fc4c9 6368 Instead this signal arrives. This signal will take us out
68f53502
AC
6369 of the stepping range so GDB needs to remember to, when
6370 the signal handler returns, resume stepping off that
6371 breakpoint. */
6372 /* To simplify things, "continue" is forced to use the same
6373 code paths as single-step - set a breakpoint at the
6374 signal return address and then, once hit, step off that
6375 breakpoint. */
1eb8556f 6376 infrun_debug_printf ("signal arrived while stepping over breakpoint");
d3169d93 6377
2c03e5be 6378 insert_hp_step_resume_breakpoint_at_frame (frame);
4e1c45ea 6379 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6380 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6381 ecs->event_thread->control.trap_expected = 0;
d137e6dc
PA
6382
6383 /* If we were nexting/stepping some other thread, switch to
6384 it, so that we don't continue it, losing control. */
6385 if (!switch_back_to_stepped_thread (ecs))
6386 keep_going (ecs);
9d799f85 6387 return;
68f53502 6388 }
9d799f85 6389
1edb66d8
SM
6390 if (ecs->event_thread->stop_signal () != GDB_SIGNAL_0
6391 && (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
f2ffa92b 6392 ecs->event_thread)
e5f8a7cc 6393 || ecs->event_thread->control.step_range_end == 1)
edb3359d 6394 && frame_id_eq (get_stack_frame_id (frame),
16c381f0 6395 ecs->event_thread->control.step_stack_frame_id)
8358c15c 6396 && ecs->event_thread->control.step_resume_breakpoint == NULL)
d303a6c7
AC
6397 {
6398 /* The inferior is about to take a signal that will take it
6399 out of the single step range. Set a breakpoint at the
6400 current PC (which is presumably where the signal handler
6401 will eventually return) and then allow the inferior to
6402 run free.
6403
6404 Note that this is only needed for a signal delivered
6405 while in the single-step range. Nested signals aren't a
6406 problem as they eventually all return. */
1eb8556f 6407 infrun_debug_printf ("signal may take us out of single-step range");
237fc4c9 6408
372316f1 6409 clear_step_over_info ();
2c03e5be 6410 insert_hp_step_resume_breakpoint_at_frame (frame);
e5f8a7cc 6411 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6412 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6413 ecs->event_thread->control.trap_expected = 0;
9d799f85
AC
6414 keep_going (ecs);
6415 return;
d303a6c7 6416 }
9d799f85 6417
85102364 6418 /* Note: step_resume_breakpoint may be non-NULL. This occurs
9d799f85
AC
6419 when either there's a nested signal, or when there's a
6420 pending signal enabled just as the signal handler returns
6421 (leaving the inferior at the step-resume-breakpoint without
6422 actually executing it). Either way continue until the
6423 breakpoint is really hit. */
c447ac0b
PA
6424
6425 if (!switch_back_to_stepped_thread (ecs))
6426 {
1eb8556f 6427 infrun_debug_printf ("random signal, keep going");
c447ac0b
PA
6428
6429 keep_going (ecs);
6430 }
6431 return;
488f131b 6432 }
94c57d6a
PA
6433
6434 process_event_stop_test (ecs);
6435}
6436
6437/* Come here when we've got some debug event / signal we can explain
6438 (IOW, not a random signal), and test whether it should cause a
6439 stop, or whether we should resume the inferior (transparently).
6440 E.g., could be a breakpoint whose condition evaluates false; we
6441 could be still stepping within the line; etc. */
6442
6443static void
6444process_event_stop_test (struct execution_control_state *ecs)
6445{
6446 struct symtab_and_line stop_pc_sal;
6447 struct frame_info *frame;
6448 struct gdbarch *gdbarch;
cdaa5b73
PA
6449 CORE_ADDR jmp_buf_pc;
6450 struct bpstat_what what;
94c57d6a 6451
cdaa5b73 6452 /* Handle cases caused by hitting a breakpoint. */
611c83ae 6453
cdaa5b73
PA
6454 frame = get_current_frame ();
6455 gdbarch = get_frame_arch (frame);
fcf3daef 6456
cdaa5b73 6457 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
611c83ae 6458
cdaa5b73
PA
6459 if (what.call_dummy)
6460 {
6461 stop_stack_dummy = what.call_dummy;
6462 }
186c406b 6463
243a9253
PA
6464 /* A few breakpoint types have callbacks associated (e.g.,
6465 bp_jit_event). Run them now. */
6466 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
6467
cdaa5b73
PA
6468 /* If we hit an internal event that triggers symbol changes, the
6469 current frame will be invalidated within bpstat_what (e.g., if we
6470 hit an internal solib event). Re-fetch it. */
6471 frame = get_current_frame ();
6472 gdbarch = get_frame_arch (frame);
e2e4d78b 6473
cdaa5b73
PA
6474 switch (what.main_action)
6475 {
6476 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
6477 /* If we hit the breakpoint at longjmp while stepping, we
6478 install a momentary breakpoint at the target of the
6479 jmp_buf. */
186c406b 6480
1eb8556f 6481 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
186c406b 6482
cdaa5b73 6483 ecs->event_thread->stepping_over_breakpoint = 1;
611c83ae 6484
cdaa5b73
PA
6485 if (what.is_longjmp)
6486 {
6487 struct value *arg_value;
6488
6489 /* If we set the longjmp breakpoint via a SystemTap probe,
6490 then use it to extract the arguments. The destination PC
6491 is the third argument to the probe. */
6492 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6493 if (arg_value)
8fa0c4f8
AA
6494 {
6495 jmp_buf_pc = value_as_address (arg_value);
6496 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6497 }
cdaa5b73
PA
6498 else if (!gdbarch_get_longjmp_target_p (gdbarch)
6499 || !gdbarch_get_longjmp_target (gdbarch,
6500 frame, &jmp_buf_pc))
e2e4d78b 6501 {
1eb8556f
SM
6502 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
6503 "(!gdbarch_get_longjmp_target)");
cdaa5b73
PA
6504 keep_going (ecs);
6505 return;
e2e4d78b 6506 }
e2e4d78b 6507
cdaa5b73
PA
6508 /* Insert a breakpoint at resume address. */
6509 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
6510 }
6511 else
6512 check_exception_resume (ecs, frame);
6513 keep_going (ecs);
6514 return;
e81a37f7 6515
cdaa5b73
PA
6516 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
6517 {
6518 struct frame_info *init_frame;
e81a37f7 6519
cdaa5b73 6520 /* There are several cases to consider.
c906108c 6521
cdaa5b73
PA
6522 1. The initiating frame no longer exists. In this case we
6523 must stop, because the exception or longjmp has gone too
6524 far.
2c03e5be 6525
cdaa5b73
PA
6526 2. The initiating frame exists, and is the same as the
6527 current frame. We stop, because the exception or longjmp
6528 has been caught.
2c03e5be 6529
cdaa5b73
PA
6530 3. The initiating frame exists and is different from the
6531 current frame. This means the exception or longjmp has
6532 been caught beneath the initiating frame, so keep going.
c906108c 6533
cdaa5b73
PA
6534 4. longjmp breakpoint has been placed just to protect
6535 against stale dummy frames and user is not interested in
6536 stopping around longjmps. */
c5aa993b 6537
1eb8556f 6538 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
c5aa993b 6539
cdaa5b73
PA
6540 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
6541 != NULL);
6542 delete_exception_resume_breakpoint (ecs->event_thread);
c5aa993b 6543
cdaa5b73
PA
6544 if (what.is_longjmp)
6545 {
b67a2c6f 6546 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
c5aa993b 6547
cdaa5b73 6548 if (!frame_id_p (ecs->event_thread->initiating_frame))
e5ef252a 6549 {
cdaa5b73
PA
6550 /* Case 4. */
6551 keep_going (ecs);
6552 return;
e5ef252a 6553 }
cdaa5b73 6554 }
c5aa993b 6555
cdaa5b73 6556 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
527159b7 6557
cdaa5b73
PA
6558 if (init_frame)
6559 {
6560 struct frame_id current_id
6561 = get_frame_id (get_current_frame ());
6562 if (frame_id_eq (current_id,
6563 ecs->event_thread->initiating_frame))
6564 {
6565 /* Case 2. Fall through. */
6566 }
6567 else
6568 {
6569 /* Case 3. */
6570 keep_going (ecs);
6571 return;
6572 }
68f53502 6573 }
488f131b 6574
cdaa5b73
PA
6575 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6576 exists. */
6577 delete_step_resume_breakpoint (ecs->event_thread);
e5ef252a 6578
bdc36728 6579 end_stepping_range (ecs);
cdaa5b73
PA
6580 }
6581 return;
e5ef252a 6582
cdaa5b73 6583 case BPSTAT_WHAT_SINGLE:
1eb8556f 6584 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
cdaa5b73
PA
6585 ecs->event_thread->stepping_over_breakpoint = 1;
6586 /* Still need to check other stuff, at least the case where we
6587 are stepping and step out of the right range. */
6588 break;
e5ef252a 6589
cdaa5b73 6590 case BPSTAT_WHAT_STEP_RESUME:
1eb8556f 6591 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
e5ef252a 6592
cdaa5b73
PA
6593 delete_step_resume_breakpoint (ecs->event_thread);
6594 if (ecs->event_thread->control.proceed_to_finish
6595 && execution_direction == EXEC_REVERSE)
6596 {
6597 struct thread_info *tp = ecs->event_thread;
6598
6599 /* We are finishing a function in reverse, and just hit the
6600 step-resume breakpoint at the start address of the
6601 function, and we're almost there -- just need to back up
6602 by one more single-step, which should take us back to the
6603 function call. */
6604 tp->control.step_range_start = tp->control.step_range_end = 1;
6605 keep_going (ecs);
e5ef252a 6606 return;
cdaa5b73
PA
6607 }
6608 fill_in_stop_func (gdbarch, ecs);
1edb66d8 6609 if (ecs->event_thread->stop_pc () == ecs->stop_func_start
cdaa5b73
PA
6610 && execution_direction == EXEC_REVERSE)
6611 {
6612 /* We are stepping over a function call in reverse, and just
6613 hit the step-resume breakpoint at the start address of
6614 the function. Go back to single-stepping, which should
6615 take us back to the function call. */
6616 ecs->event_thread->stepping_over_breakpoint = 1;
6617 keep_going (ecs);
6618 return;
6619 }
6620 break;
e5ef252a 6621
cdaa5b73 6622 case BPSTAT_WHAT_STOP_NOISY:
1eb8556f 6623 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
c4464ade 6624 stop_print_frame = true;
e5ef252a 6625
33bf4c5c 6626 /* Assume the thread stopped for a breakpoint. We'll still check
99619bea
PA
6627 whether a/the breakpoint is there when the thread is next
6628 resumed. */
6629 ecs->event_thread->stepping_over_breakpoint = 1;
e5ef252a 6630
22bcd14b 6631 stop_waiting (ecs);
cdaa5b73 6632 return;
e5ef252a 6633
cdaa5b73 6634 case BPSTAT_WHAT_STOP_SILENT:
1eb8556f 6635 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
c4464ade 6636 stop_print_frame = false;
e5ef252a 6637
33bf4c5c 6638 /* Assume the thread stopped for a breakpoint. We'll still check
99619bea
PA
6639 whether a/the breakpoint is there when the thread is next
6640 resumed. */
6641 ecs->event_thread->stepping_over_breakpoint = 1;
22bcd14b 6642 stop_waiting (ecs);
cdaa5b73
PA
6643 return;
6644
6645 case BPSTAT_WHAT_HP_STEP_RESUME:
1eb8556f 6646 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
cdaa5b73
PA
6647
6648 delete_step_resume_breakpoint (ecs->event_thread);
6649 if (ecs->event_thread->step_after_step_resume_breakpoint)
6650 {
6651 /* Back when the step-resume breakpoint was inserted, we
6652 were trying to single-step off a breakpoint. Go back to
6653 doing that. */
6654 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6655 ecs->event_thread->stepping_over_breakpoint = 1;
6656 keep_going (ecs);
6657 return;
e5ef252a 6658 }
cdaa5b73
PA
6659 break;
6660
6661 case BPSTAT_WHAT_KEEP_CHECKING:
6662 break;
e5ef252a 6663 }
c906108c 6664
af48d08f
PA
6665 /* If we stepped a permanent breakpoint and we had a high priority
6666 step-resume breakpoint for the address we stepped, but we didn't
6667 hit it, then we must have stepped into the signal handler. The
6668 step-resume was only necessary to catch the case of _not_
6669 stepping into the handler, so delete it, and fall through to
6670 checking whether the step finished. */
6671 if (ecs->event_thread->stepped_breakpoint)
6672 {
6673 struct breakpoint *sr_bp
6674 = ecs->event_thread->control.step_resume_breakpoint;
6675
8d707a12
PA
6676 if (sr_bp != NULL
6677 && sr_bp->loc->permanent
af48d08f
PA
6678 && sr_bp->type == bp_hp_step_resume
6679 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6680 {
1eb8556f 6681 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
af48d08f
PA
6682 delete_step_resume_breakpoint (ecs->event_thread);
6683 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6684 }
6685 }
6686
cdaa5b73
PA
6687 /* We come here if we hit a breakpoint but should not stop for it.
6688 Possibly we also were stepping and should stop for that. So fall
6689 through and test for stepping. But, if not stepping, do not
6690 stop. */
c906108c 6691
a7212384
UW
6692 /* In all-stop mode, if we're currently stepping but have stopped in
6693 some other thread, we need to switch back to the stepped thread. */
c447ac0b
PA
6694 if (switch_back_to_stepped_thread (ecs))
6695 return;
776f04fa 6696
8358c15c 6697 if (ecs->event_thread->control.step_resume_breakpoint)
488f131b 6698 {
1eb8556f 6699 infrun_debug_printf ("step-resume breakpoint is inserted");
527159b7 6700
488f131b 6701 /* Having a step-resume breakpoint overrides anything
dda83cd7
SM
6702 else having to do with stepping commands until
6703 that breakpoint is reached. */
488f131b
JB
6704 keep_going (ecs);
6705 return;
6706 }
c5aa993b 6707
16c381f0 6708 if (ecs->event_thread->control.step_range_end == 0)
488f131b 6709 {
1eb8556f 6710 infrun_debug_printf ("no stepping, continue");
488f131b 6711 /* Likewise if we aren't even stepping. */
488f131b
JB
6712 keep_going (ecs);
6713 return;
6714 }
c5aa993b 6715
4b7703ad
JB
6716 /* Re-fetch current thread's frame in case the code above caused
6717 the frame cache to be re-initialized, making our FRAME variable
6718 a dangling pointer. */
6719 frame = get_current_frame ();
628fe4e4 6720 gdbarch = get_frame_arch (frame);
7e324e48 6721 fill_in_stop_func (gdbarch, ecs);
4b7703ad 6722
488f131b 6723 /* If stepping through a line, keep going if still within it.
c906108c 6724
488f131b
JB
6725 Note that step_range_end is the address of the first instruction
6726 beyond the step range, and NOT the address of the last instruction
31410e84
MS
6727 within it!
6728
6729 Note also that during reverse execution, we may be stepping
6730 through a function epilogue and therefore must detect when
6731 the current-frame changes in the middle of a line. */
6732
1edb66d8 6733 if (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
f2ffa92b 6734 ecs->event_thread)
31410e84 6735 && (execution_direction != EXEC_REVERSE
388a8562 6736 || frame_id_eq (get_frame_id (frame),
16c381f0 6737 ecs->event_thread->control.step_frame_id)))
488f131b 6738 {
1eb8556f
SM
6739 infrun_debug_printf
6740 ("stepping inside range [%s-%s]",
6741 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6742 paddress (gdbarch, ecs->event_thread->control.step_range_end));
b2175913 6743
c1e36e3e
PA
6744 /* Tentatively re-enable range stepping; `resume' disables it if
6745 necessary (e.g., if we're stepping over a breakpoint or we
6746 have software watchpoints). */
6747 ecs->event_thread->control.may_range_step = 1;
6748
b2175913
MS
6749 /* When stepping backward, stop at beginning of line range
6750 (unless it's the function entry point, in which case
6751 keep going back to the call point). */
1edb66d8 6752 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
16c381f0 6753 if (stop_pc == ecs->event_thread->control.step_range_start
b2175913
MS
6754 && stop_pc != ecs->stop_func_start
6755 && execution_direction == EXEC_REVERSE)
bdc36728 6756 end_stepping_range (ecs);
b2175913
MS
6757 else
6758 keep_going (ecs);
6759
488f131b
JB
6760 return;
6761 }
c5aa993b 6762
488f131b 6763 /* We stepped out of the stepping range. */
c906108c 6764
488f131b 6765 /* If we are stepping at the source level and entered the runtime
388a8562
MS
6766 loader dynamic symbol resolution code...
6767
6768 EXEC_FORWARD: we keep on single stepping until we exit the run
6769 time loader code and reach the callee's address.
6770
6771 EXEC_REVERSE: we've already executed the callee (backward), and
6772 the runtime loader code is handled just like any other
6773 undebuggable function call. Now we need only keep stepping
6774 backward through the trampoline code, and that's handled further
6775 down, so there is nothing for us to do here. */
6776
6777 if (execution_direction != EXEC_REVERSE
16c381f0 6778 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
1edb66d8 6779 && in_solib_dynsym_resolve_code (ecs->event_thread->stop_pc ()))
488f131b 6780 {
4c8c40e6 6781 CORE_ADDR pc_after_resolver =
1edb66d8 6782 gdbarch_skip_solib_resolver (gdbarch, ecs->event_thread->stop_pc ());
c906108c 6783
1eb8556f 6784 infrun_debug_printf ("stepped into dynsym resolve code");
527159b7 6785
488f131b
JB
6786 if (pc_after_resolver)
6787 {
6788 /* Set up a step-resume breakpoint at the address
6789 indicated by SKIP_SOLIB_RESOLVER. */
51abb421 6790 symtab_and_line sr_sal;
488f131b 6791 sr_sal.pc = pc_after_resolver;
6c95b8df 6792 sr_sal.pspace = get_frame_program_space (frame);
488f131b 6793
a6d9a66e
UW
6794 insert_step_resume_breakpoint_at_sal (gdbarch,
6795 sr_sal, null_frame_id);
c5aa993b 6796 }
c906108c 6797
488f131b
JB
6798 keep_going (ecs);
6799 return;
6800 }
c906108c 6801
1d509aa6
MM
6802 /* Step through an indirect branch thunk. */
6803 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
f2ffa92b 6804 && gdbarch_in_indirect_branch_thunk (gdbarch,
1edb66d8 6805 ecs->event_thread->stop_pc ()))
1d509aa6 6806 {
1eb8556f 6807 infrun_debug_printf ("stepped into indirect branch thunk");
1d509aa6
MM
6808 keep_going (ecs);
6809 return;
6810 }
6811
16c381f0
JK
6812 if (ecs->event_thread->control.step_range_end != 1
6813 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6814 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
568d6575 6815 && get_frame_type (frame) == SIGTRAMP_FRAME)
488f131b 6816 {
1eb8556f 6817 infrun_debug_printf ("stepped into signal trampoline");
42edda50 6818 /* The inferior, while doing a "step" or "next", has ended up in
dda83cd7
SM
6819 a signal trampoline (either by a signal being delivered or by
6820 the signal handler returning). Just single-step until the
6821 inferior leaves the trampoline (either by calling the handler
6822 or returning). */
488f131b
JB
6823 keep_going (ecs);
6824 return;
6825 }
c906108c 6826
14132e89
MR
6827 /* If we're in the return path from a shared library trampoline,
6828 we want to proceed through the trampoline when stepping. */
6829 /* macro/2012-04-25: This needs to come before the subroutine
6830 call check below as on some targets return trampolines look
6831 like subroutine calls (MIPS16 return thunks). */
6832 if (gdbarch_in_solib_return_trampoline (gdbarch,
1edb66d8 6833 ecs->event_thread->stop_pc (),
f2ffa92b 6834 ecs->stop_func_name)
14132e89
MR
6835 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
6836 {
6837 /* Determine where this trampoline returns. */
1edb66d8 6838 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
f2ffa92b
PA
6839 CORE_ADDR real_stop_pc
6840 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
14132e89 6841
1eb8556f 6842 infrun_debug_printf ("stepped into solib return tramp");
14132e89
MR
6843
6844 /* Only proceed through if we know where it's going. */
6845 if (real_stop_pc)
6846 {
6847 /* And put the step-breakpoint there and go until there. */
51abb421 6848 symtab_and_line sr_sal;
14132e89
MR
6849 sr_sal.pc = real_stop_pc;
6850 sr_sal.section = find_pc_overlay (sr_sal.pc);
6851 sr_sal.pspace = get_frame_program_space (frame);
6852
6853 /* Do not specify what the fp should be when we stop since
6854 on some machines the prologue is where the new fp value
6855 is established. */
6856 insert_step_resume_breakpoint_at_sal (gdbarch,
6857 sr_sal, null_frame_id);
6858
6859 /* Restart without fiddling with the step ranges or
6860 other state. */
6861 keep_going (ecs);
6862 return;
6863 }
6864 }
6865
c17eaafe
DJ
6866 /* Check for subroutine calls. The check for the current frame
6867 equalling the step ID is not necessary - the check of the
6868 previous frame's ID is sufficient - but it is a common case and
6869 cheaper than checking the previous frame's ID.
14e60db5
DJ
6870
6871 NOTE: frame_id_eq will never report two invalid frame IDs as
6872 being equal, so to get into this block, both the current and
6873 previous frame must have valid frame IDs. */
005ca36a
JB
6874 /* The outer_frame_id check is a heuristic to detect stepping
6875 through startup code. If we step over an instruction which
6876 sets the stack pointer from an invalid value to a valid value,
6877 we may detect that as a subroutine call from the mythical
6878 "outermost" function. This could be fixed by marking
6879 outermost frames as !stack_p,code_p,special_p. Then the
6880 initial outermost frame, before sp was valid, would
ce6cca6d 6881 have code_addr == &_start. See the comment in frame_id_eq
005ca36a 6882 for more. */
edb3359d 6883 if (!frame_id_eq (get_stack_frame_id (frame),
16c381f0 6884 ecs->event_thread->control.step_stack_frame_id)
005ca36a 6885 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
16c381f0
JK
6886 ecs->event_thread->control.step_stack_frame_id)
6887 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
005ca36a 6888 outer_frame_id)
885eeb5b 6889 || (ecs->event_thread->control.step_start_function
1edb66d8 6890 != find_pc_function (ecs->event_thread->stop_pc ())))))
488f131b 6891 {
1edb66d8 6892 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
95918acb 6893 CORE_ADDR real_stop_pc;
8fb3e588 6894
1eb8556f 6895 infrun_debug_printf ("stepped into subroutine");
527159b7 6896
b7a084be 6897 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
95918acb
AC
6898 {
6899 /* I presume that step_over_calls is only 0 when we're
6900 supposed to be stepping at the assembly language level
6901 ("stepi"). Just stop. */
388a8562 6902 /* And this works the same backward as frontward. MVS */
bdc36728 6903 end_stepping_range (ecs);
95918acb
AC
6904 return;
6905 }
8fb3e588 6906
388a8562
MS
6907 /* Reverse stepping through solib trampolines. */
6908
6909 if (execution_direction == EXEC_REVERSE
16c381f0 6910 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
388a8562
MS
6911 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6912 || (ecs->stop_func_start == 0
6913 && in_solib_dynsym_resolve_code (stop_pc))))
6914 {
6915 /* Any solib trampoline code can be handled in reverse
6916 by simply continuing to single-step. We have already
6917 executed the solib function (backwards), and a few
6918 steps will take us back through the trampoline to the
6919 caller. */
6920 keep_going (ecs);
6921 return;
6922 }
6923
16c381f0 6924 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
8567c30f 6925 {
b2175913
MS
6926 /* We're doing a "next".
6927
6928 Normal (forward) execution: set a breakpoint at the
6929 callee's return address (the address at which the caller
6930 will resume).
6931
6932 Reverse (backward) execution. set the step-resume
6933 breakpoint at the start of the function that we just
6934 stepped into (backwards), and continue to there. When we
6130d0b7 6935 get there, we'll need to single-step back to the caller. */
b2175913
MS
6936
6937 if (execution_direction == EXEC_REVERSE)
6938 {
acf9414f
JK
6939 /* If we're already at the start of the function, we've either
6940 just stepped backward into a single instruction function,
6941 or stepped back out of a signal handler to the first instruction
6942 of the function. Just keep going, which will single-step back
6943 to the caller. */
58c48e72 6944 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
acf9414f 6945 {
acf9414f 6946 /* Normal function call return (static or dynamic). */
51abb421 6947 symtab_and_line sr_sal;
acf9414f
JK
6948 sr_sal.pc = ecs->stop_func_start;
6949 sr_sal.pspace = get_frame_program_space (frame);
6950 insert_step_resume_breakpoint_at_sal (gdbarch,
6951 sr_sal, null_frame_id);
6952 }
b2175913
MS
6953 }
6954 else
568d6575 6955 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6956
8567c30f
AC
6957 keep_going (ecs);
6958 return;
6959 }
a53c66de 6960
95918acb 6961 /* If we are in a function call trampoline (a stub between the
dda83cd7
SM
6962 calling routine and the real function), locate the real
6963 function. That's what tells us (a) whether we want to step
6964 into it at all, and (b) what prologue we want to run to the
6965 end of, if we do step into it. */
568d6575 6966 real_stop_pc = skip_language_trampoline (frame, stop_pc);
95918acb 6967 if (real_stop_pc == 0)
568d6575 6968 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
95918acb
AC
6969 if (real_stop_pc != 0)
6970 ecs->stop_func_start = real_stop_pc;
8fb3e588 6971
db5f024e 6972 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
1b2bfbb9 6973 {
51abb421 6974 symtab_and_line sr_sal;
1b2bfbb9 6975 sr_sal.pc = ecs->stop_func_start;
6c95b8df 6976 sr_sal.pspace = get_frame_program_space (frame);
1b2bfbb9 6977
a6d9a66e
UW
6978 insert_step_resume_breakpoint_at_sal (gdbarch,
6979 sr_sal, null_frame_id);
8fb3e588
AC
6980 keep_going (ecs);
6981 return;
1b2bfbb9
RC
6982 }
6983
95918acb 6984 /* If we have line number information for the function we are
1bfeeb0f
JL
6985 thinking of stepping into and the function isn't on the skip
6986 list, step into it.
95918acb 6987
dda83cd7
SM
6988 If there are several symtabs at that PC (e.g. with include
6989 files), just want to know whether *any* of them have line
6990 numbers. find_pc_line handles this. */
95918acb
AC
6991 {
6992 struct symtab_and_line tmp_sal;
8fb3e588 6993
95918acb 6994 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
2b914b52 6995 if (tmp_sal.line != 0
85817405 6996 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4a4c04f1
BE
6997 tmp_sal)
6998 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
95918acb 6999 {
b2175913 7000 if (execution_direction == EXEC_REVERSE)
568d6575 7001 handle_step_into_function_backward (gdbarch, ecs);
b2175913 7002 else
568d6575 7003 handle_step_into_function (gdbarch, ecs);
95918acb
AC
7004 return;
7005 }
7006 }
7007
7008 /* If we have no line number and the step-stop-if-no-debug is
dda83cd7
SM
7009 set, we stop the step so that the user has a chance to switch
7010 in assembly mode. */
16c381f0 7011 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
078130d0 7012 && step_stop_if_no_debug)
95918acb 7013 {
bdc36728 7014 end_stepping_range (ecs);
95918acb
AC
7015 return;
7016 }
7017
b2175913
MS
7018 if (execution_direction == EXEC_REVERSE)
7019 {
acf9414f
JK
7020 /* If we're already at the start of the function, we've either just
7021 stepped backward into a single instruction function without line
7022 number info, or stepped back out of a signal handler to the first
7023 instruction of the function without line number info. Just keep
7024 going, which will single-step back to the caller. */
7025 if (ecs->stop_func_start != stop_pc)
7026 {
7027 /* Set a breakpoint at callee's start address.
7028 From there we can step once and be back in the caller. */
51abb421 7029 symtab_and_line sr_sal;
acf9414f
JK
7030 sr_sal.pc = ecs->stop_func_start;
7031 sr_sal.pspace = get_frame_program_space (frame);
7032 insert_step_resume_breakpoint_at_sal (gdbarch,
7033 sr_sal, null_frame_id);
7034 }
b2175913
MS
7035 }
7036 else
7037 /* Set a breakpoint at callee's return address (the address
7038 at which the caller will resume). */
568d6575 7039 insert_step_resume_breakpoint_at_caller (frame);
b2175913 7040
95918acb 7041 keep_going (ecs);
488f131b 7042 return;
488f131b 7043 }
c906108c 7044
fdd654f3
MS
7045 /* Reverse stepping through solib trampolines. */
7046
7047 if (execution_direction == EXEC_REVERSE
16c381f0 7048 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
fdd654f3 7049 {
1edb66d8 7050 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
f2ffa92b 7051
fdd654f3
MS
7052 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7053 || (ecs->stop_func_start == 0
7054 && in_solib_dynsym_resolve_code (stop_pc)))
7055 {
7056 /* Any solib trampoline code can be handled in reverse
7057 by simply continuing to single-step. We have already
7058 executed the solib function (backwards), and a few
7059 steps will take us back through the trampoline to the
7060 caller. */
7061 keep_going (ecs);
7062 return;
7063 }
7064 else if (in_solib_dynsym_resolve_code (stop_pc))
7065 {
7066 /* Stepped backward into the solib dynsym resolver.
7067 Set a breakpoint at its start and continue, then
7068 one more step will take us out. */
51abb421 7069 symtab_and_line sr_sal;
fdd654f3 7070 sr_sal.pc = ecs->stop_func_start;
9d1807c3 7071 sr_sal.pspace = get_frame_program_space (frame);
fdd654f3
MS
7072 insert_step_resume_breakpoint_at_sal (gdbarch,
7073 sr_sal, null_frame_id);
7074 keep_going (ecs);
7075 return;
7076 }
7077 }
7078
8c95582d
AB
7079 /* This always returns the sal for the inner-most frame when we are in a
7080 stack of inlined frames, even if GDB actually believes that it is in a
7081 more outer frame. This is checked for below by calls to
7082 inline_skipped_frames. */
1edb66d8 7083 stop_pc_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
7ed0fe66 7084
1b2bfbb9
RC
7085 /* NOTE: tausq/2004-05-24: This if block used to be done before all
7086 the trampoline processing logic, however, there are some trampolines
7087 that have no names, so we should do trampoline handling first. */
16c381f0 7088 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7ed0fe66 7089 && ecs->stop_func_name == NULL
2afb61aa 7090 && stop_pc_sal.line == 0)
1b2bfbb9 7091 {
1eb8556f 7092 infrun_debug_printf ("stepped into undebuggable function");
527159b7 7093
1b2bfbb9 7094 /* The inferior just stepped into, or returned to, an
dda83cd7
SM
7095 undebuggable function (where there is no debugging information
7096 and no line number corresponding to the address where the
7097 inferior stopped). Since we want to skip this kind of code,
7098 we keep going until the inferior returns from this
7099 function - unless the user has asked us not to (via
7100 set step-mode) or we no longer know how to get back
7101 to the call site. */
14e60db5 7102 if (step_stop_if_no_debug
c7ce8faa 7103 || !frame_id_p (frame_unwind_caller_id (frame)))
1b2bfbb9
RC
7104 {
7105 /* If we have no line number and the step-stop-if-no-debug
7106 is set, we stop the step so that the user has a chance to
7107 switch in assembly mode. */
bdc36728 7108 end_stepping_range (ecs);
1b2bfbb9
RC
7109 return;
7110 }
7111 else
7112 {
7113 /* Set a breakpoint at callee's return address (the address
7114 at which the caller will resume). */
568d6575 7115 insert_step_resume_breakpoint_at_caller (frame);
1b2bfbb9
RC
7116 keep_going (ecs);
7117 return;
7118 }
7119 }
7120
16c381f0 7121 if (ecs->event_thread->control.step_range_end == 1)
1b2bfbb9
RC
7122 {
7123 /* It is stepi or nexti. We always want to stop stepping after
dda83cd7 7124 one instruction. */
1eb8556f 7125 infrun_debug_printf ("stepi/nexti");
bdc36728 7126 end_stepping_range (ecs);
1b2bfbb9
RC
7127 return;
7128 }
7129
2afb61aa 7130 if (stop_pc_sal.line == 0)
488f131b
JB
7131 {
7132 /* We have no line number information. That means to stop
dda83cd7
SM
7133 stepping (does this always happen right after one instruction,
7134 when we do "s" in a function with no line numbers,
7135 or can this happen as a result of a return or longjmp?). */
1eb8556f 7136 infrun_debug_printf ("line number info");
bdc36728 7137 end_stepping_range (ecs);
488f131b
JB
7138 return;
7139 }
c906108c 7140
edb3359d
DJ
7141 /* Look for "calls" to inlined functions, part one. If the inline
7142 frame machinery detected some skipped call sites, we have entered
7143 a new inline function. */
7144
7145 if (frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 7146 ecs->event_thread->control.step_frame_id)
00431a78 7147 && inline_skipped_frames (ecs->event_thread))
edb3359d 7148 {
1eb8556f 7149 infrun_debug_printf ("stepped into inlined function");
edb3359d 7150
51abb421 7151 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
edb3359d 7152
16c381f0 7153 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
edb3359d
DJ
7154 {
7155 /* For "step", we're going to stop. But if the call site
7156 for this inlined function is on the same source line as
7157 we were previously stepping, go down into the function
7158 first. Otherwise stop at the call site. */
7159
7160 if (call_sal.line == ecs->event_thread->current_line
7161 && call_sal.symtab == ecs->event_thread->current_symtab)
4a4c04f1
BE
7162 {
7163 step_into_inline_frame (ecs->event_thread);
7164 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
7165 {
7166 keep_going (ecs);
7167 return;
7168 }
7169 }
edb3359d 7170
bdc36728 7171 end_stepping_range (ecs);
edb3359d
DJ
7172 return;
7173 }
7174 else
7175 {
7176 /* For "next", we should stop at the call site if it is on a
7177 different source line. Otherwise continue through the
7178 inlined function. */
7179 if (call_sal.line == ecs->event_thread->current_line
7180 && call_sal.symtab == ecs->event_thread->current_symtab)
7181 keep_going (ecs);
7182 else
bdc36728 7183 end_stepping_range (ecs);
edb3359d
DJ
7184 return;
7185 }
7186 }
7187
7188 /* Look for "calls" to inlined functions, part two. If we are still
7189 in the same real function we were stepping through, but we have
7190 to go further up to find the exact frame ID, we are stepping
7191 through a more inlined call beyond its call site. */
7192
7193 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
7194 && !frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 7195 ecs->event_thread->control.step_frame_id)
edb3359d 7196 && stepped_in_from (get_current_frame (),
16c381f0 7197 ecs->event_thread->control.step_frame_id))
edb3359d 7198 {
1eb8556f 7199 infrun_debug_printf ("stepping through inlined function");
edb3359d 7200
4a4c04f1
BE
7201 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
7202 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
edb3359d
DJ
7203 keep_going (ecs);
7204 else
bdc36728 7205 end_stepping_range (ecs);
edb3359d
DJ
7206 return;
7207 }
7208
8c95582d 7209 bool refresh_step_info = true;
1edb66d8 7210 if ((ecs->event_thread->stop_pc () == stop_pc_sal.pc)
4e1c45ea 7211 && (ecs->event_thread->current_line != stop_pc_sal.line
24b21115 7212 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
488f131b 7213 {
ebde6f2d
TV
7214 /* We are at a different line. */
7215
8c95582d
AB
7216 if (stop_pc_sal.is_stmt)
7217 {
ebde6f2d
TV
7218 /* We are at the start of a statement.
7219
7220 So stop. Note that we don't stop if we step into the middle of a
7221 statement. That is said to make things like for (;;) statements
7222 work better. */
1eb8556f 7223 infrun_debug_printf ("stepped to a different line");
8c95582d
AB
7224 end_stepping_range (ecs);
7225 return;
7226 }
7227 else if (frame_id_eq (get_frame_id (get_current_frame ()),
ebde6f2d 7228 ecs->event_thread->control.step_frame_id))
8c95582d 7229 {
ebde6f2d
TV
7230 /* We are not at the start of a statement, and we have not changed
7231 frame.
7232
7233 We ignore this line table entry, and continue stepping forward,
8c95582d
AB
7234 looking for a better place to stop. */
7235 refresh_step_info = false;
1eb8556f
SM
7236 infrun_debug_printf ("stepped to a different line, but "
7237 "it's not the start of a statement");
8c95582d 7238 }
ebde6f2d
TV
7239 else
7240 {
7241 /* We are not the start of a statement, and we have changed frame.
7242
7243 We ignore this line table entry, and continue stepping forward,
7244 looking for a better place to stop. Keep refresh_step_info at
7245 true to note that the frame has changed, but ignore the line
7246 number to make sure we don't ignore a subsequent entry with the
7247 same line number. */
7248 stop_pc_sal.line = 0;
7249 infrun_debug_printf ("stepped to a different frame, but "
7250 "it's not the start of a statement");
7251 }
488f131b 7252 }
c906108c 7253
488f131b 7254 /* We aren't done stepping.
c906108c 7255
488f131b
JB
7256 Optimize by setting the stepping range to the line.
7257 (We might not be in the original line, but if we entered a
7258 new line in mid-statement, we continue stepping. This makes
8c95582d
AB
7259 things like for(;;) statements work better.)
7260
7261 If we entered a SAL that indicates a non-statement line table entry,
7262 then we update the stepping range, but we don't update the step info,
7263 which includes things like the line number we are stepping away from.
7264 This means we will stop when we find a line table entry that is marked
7265 as is-statement, even if it matches the non-statement one we just
7266 stepped into. */
c906108c 7267
16c381f0
JK
7268 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
7269 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
c1e36e3e 7270 ecs->event_thread->control.may_range_step = 1;
8c95582d
AB
7271 if (refresh_step_info)
7272 set_step_info (ecs->event_thread, frame, stop_pc_sal);
488f131b 7273
1eb8556f 7274 infrun_debug_printf ("keep going");
488f131b 7275 keep_going (ecs);
104c1213
JM
7276}
7277
408f6686
PA
7278static bool restart_stepped_thread (process_stratum_target *resume_target,
7279 ptid_t resume_ptid);
7280
c447ac0b
PA
7281/* In all-stop mode, if we're currently stepping but have stopped in
7282 some other thread, we may need to switch back to the stepped
7283 thread. Returns true we set the inferior running, false if we left
7284 it stopped (and the event needs further processing). */
7285
c4464ade 7286static bool
c447ac0b
PA
7287switch_back_to_stepped_thread (struct execution_control_state *ecs)
7288{
fbea99ea 7289 if (!target_is_non_stop_p ())
c447ac0b 7290 {
99619bea
PA
7291 /* If any thread is blocked on some internal breakpoint, and we
7292 simply need to step over that breakpoint to get it going
7293 again, do that first. */
7294
7295 /* However, if we see an event for the stepping thread, then we
7296 know all other threads have been moved past their breakpoints
7297 already. Let the caller check whether the step is finished,
7298 etc., before deciding to move it past a breakpoint. */
7299 if (ecs->event_thread->control.step_range_end != 0)
c4464ade 7300 return false;
99619bea
PA
7301
7302 /* Check if the current thread is blocked on an incomplete
7303 step-over, interrupted by a random signal. */
7304 if (ecs->event_thread->control.trap_expected
1edb66d8 7305 && ecs->event_thread->stop_signal () != GDB_SIGNAL_TRAP)
c447ac0b 7306 {
1eb8556f
SM
7307 infrun_debug_printf
7308 ("need to finish step-over of [%s]",
7309 target_pid_to_str (ecs->event_thread->ptid).c_str ());
99619bea 7310 keep_going (ecs);
c4464ade 7311 return true;
99619bea 7312 }
2adfaa28 7313
99619bea
PA
7314 /* Check if the current thread is blocked by a single-step
7315 breakpoint of another thread. */
7316 if (ecs->hit_singlestep_breakpoint)
7317 {
1eb8556f
SM
7318 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
7319 target_pid_to_str (ecs->ptid).c_str ());
99619bea 7320 keep_going (ecs);
c4464ade 7321 return true;
99619bea
PA
7322 }
7323
4d9d9d04
PA
7324 /* If this thread needs yet another step-over (e.g., stepping
7325 through a delay slot), do it first before moving on to
7326 another thread. */
7327 if (thread_still_needs_step_over (ecs->event_thread))
7328 {
1eb8556f
SM
7329 infrun_debug_printf
7330 ("thread [%s] still needs step-over",
7331 target_pid_to_str (ecs->event_thread->ptid).c_str ());
4d9d9d04 7332 keep_going (ecs);
c4464ade 7333 return true;
4d9d9d04 7334 }
70509625 7335
483805cf
PA
7336 /* If scheduler locking applies even if not stepping, there's no
7337 need to walk over threads. Above we've checked whether the
7338 current thread is stepping. If some other thread not the
7339 event thread is stepping, then it must be that scheduler
7340 locking is not in effect. */
856e7dd6 7341 if (schedlock_applies (ecs->event_thread))
c4464ade 7342 return false;
483805cf 7343
4d9d9d04
PA
7344 /* Otherwise, we no longer expect a trap in the current thread.
7345 Clear the trap_expected flag before switching back -- this is
7346 what keep_going does as well, if we call it. */
7347 ecs->event_thread->control.trap_expected = 0;
7348
7349 /* Likewise, clear the signal if it should not be passed. */
1edb66d8
SM
7350 if (!signal_program[ecs->event_thread->stop_signal ()])
7351 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
4d9d9d04 7352
408f6686 7353 if (restart_stepped_thread (ecs->target, ecs->ptid))
4d9d9d04
PA
7354 {
7355 prepare_to_wait (ecs);
c4464ade 7356 return true;
4d9d9d04
PA
7357 }
7358
408f6686
PA
7359 switch_to_thread (ecs->event_thread);
7360 }
4d9d9d04 7361
408f6686
PA
7362 return false;
7363}
f3f8ece4 7364
408f6686
PA
7365/* Look for the thread that was stepping, and resume it.
7366 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
7367 is resuming. Return true if a thread was started, false
7368 otherwise. */
483805cf 7369
408f6686
PA
7370static bool
7371restart_stepped_thread (process_stratum_target *resume_target,
7372 ptid_t resume_ptid)
7373{
7374 /* Do all pending step-overs before actually proceeding with
7375 step/next/etc. */
7376 if (start_step_over ())
7377 return true;
483805cf 7378
408f6686
PA
7379 for (thread_info *tp : all_threads_safe ())
7380 {
7381 if (tp->state == THREAD_EXITED)
7382 continue;
7383
1edb66d8 7384 if (tp->has_pending_waitstatus ())
408f6686 7385 continue;
483805cf 7386
408f6686
PA
7387 /* Ignore threads of processes the caller is not
7388 resuming. */
7389 if (!sched_multi
7390 && (tp->inf->process_target () != resume_target
7391 || tp->inf->pid != resume_ptid.pid ()))
7392 continue;
483805cf 7393
408f6686
PA
7394 if (tp->control.trap_expected)
7395 {
7396 infrun_debug_printf ("switching back to stepped thread (step-over)");
483805cf 7397
408f6686
PA
7398 if (keep_going_stepped_thread (tp))
7399 return true;
99619bea 7400 }
408f6686
PA
7401 }
7402
7403 for (thread_info *tp : all_threads_safe ())
7404 {
7405 if (tp->state == THREAD_EXITED)
7406 continue;
7407
1edb66d8 7408 if (tp->has_pending_waitstatus ())
408f6686 7409 continue;
99619bea 7410
408f6686
PA
7411 /* Ignore threads of processes the caller is not
7412 resuming. */
7413 if (!sched_multi
7414 && (tp->inf->process_target () != resume_target
7415 || tp->inf->pid != resume_ptid.pid ()))
7416 continue;
7417
7418 /* Did we find the stepping thread? */
7419 if (tp->control.step_range_end)
99619bea 7420 {
408f6686 7421 infrun_debug_printf ("switching back to stepped thread (stepping)");
c447ac0b 7422
408f6686
PA
7423 if (keep_going_stepped_thread (tp))
7424 return true;
2ac7589c
PA
7425 }
7426 }
2adfaa28 7427
c4464ade 7428 return false;
2ac7589c 7429}
2adfaa28 7430
408f6686
PA
7431/* See infrun.h. */
7432
7433void
7434restart_after_all_stop_detach (process_stratum_target *proc_target)
7435{
7436 /* Note we don't check target_is_non_stop_p() here, because the
7437 current inferior may no longer have a process_stratum target
7438 pushed, as we just detached. */
7439
7440 /* See if we have a THREAD_RUNNING thread that need to be
7441 re-resumed. If we have any thread that is already executing,
7442 then we don't need to resume the target -- it is already been
7443 resumed. With the remote target (in all-stop), it's even
7444 impossible to issue another resumption if the target is already
7445 resumed, until the target reports a stop. */
7446 for (thread_info *thr : all_threads (proc_target))
7447 {
7448 if (thr->state != THREAD_RUNNING)
7449 continue;
7450
7451 /* If we have any thread that is already executing, then we
7452 don't need to resume the target -- it is already been
7453 resumed. */
7454 if (thr->executing)
7455 return;
7456
7457 /* If we have a pending event to process, skip resuming the
7458 target and go straight to processing it. */
1edb66d8 7459 if (thr->resumed () && thr->has_pending_waitstatus ())
408f6686
PA
7460 return;
7461 }
7462
7463 /* Alright, we need to re-resume the target. If a thread was
7464 stepping, we need to restart it stepping. */
7465 if (restart_stepped_thread (proc_target, minus_one_ptid))
7466 return;
7467
7468 /* Otherwise, find the first THREAD_RUNNING thread and resume
7469 it. */
7470 for (thread_info *thr : all_threads (proc_target))
7471 {
7472 if (thr->state != THREAD_RUNNING)
7473 continue;
7474
7475 execution_control_state ecs;
7476 reset_ecs (&ecs, thr);
7477 switch_to_thread (thr);
7478 keep_going (&ecs);
7479 return;
7480 }
7481}
7482
2ac7589c
PA
7483/* Set a previously stepped thread back to stepping. Returns true on
7484 success, false if the resume is not possible (e.g., the thread
7485 vanished). */
7486
c4464ade 7487static bool
2ac7589c
PA
7488keep_going_stepped_thread (struct thread_info *tp)
7489{
7490 struct frame_info *frame;
2ac7589c
PA
7491 struct execution_control_state ecss;
7492 struct execution_control_state *ecs = &ecss;
2adfaa28 7493
2ac7589c
PA
7494 /* If the stepping thread exited, then don't try to switch back and
7495 resume it, which could fail in several different ways depending
7496 on the target. Instead, just keep going.
2adfaa28 7497
2ac7589c
PA
7498 We can find a stepping dead thread in the thread list in two
7499 cases:
2adfaa28 7500
2ac7589c
PA
7501 - The target supports thread exit events, and when the target
7502 tries to delete the thread from the thread list, inferior_ptid
7503 pointed at the exiting thread. In such case, calling
7504 delete_thread does not really remove the thread from the list;
7505 instead, the thread is left listed, with 'exited' state.
64ce06e4 7506
2ac7589c
PA
7507 - The target's debug interface does not support thread exit
7508 events, and so we have no idea whatsoever if the previously
7509 stepping thread is still alive. For that reason, we need to
7510 synchronously query the target now. */
2adfaa28 7511
00431a78 7512 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
2ac7589c 7513 {
1eb8556f
SM
7514 infrun_debug_printf ("not resuming previously stepped thread, it has "
7515 "vanished");
2ac7589c 7516
00431a78 7517 delete_thread (tp);
c4464ade 7518 return false;
c447ac0b 7519 }
2ac7589c 7520
1eb8556f 7521 infrun_debug_printf ("resuming previously stepped thread");
2ac7589c
PA
7522
7523 reset_ecs (ecs, tp);
00431a78 7524 switch_to_thread (tp);
2ac7589c 7525
1edb66d8 7526 tp->set_stop_pc (regcache_read_pc (get_thread_regcache (tp)));
2ac7589c 7527 frame = get_current_frame ();
2ac7589c
PA
7528
7529 /* If the PC of the thread we were trying to single-step has
7530 changed, then that thread has trapped or been signaled, but the
7531 event has not been reported to GDB yet. Re-poll the target
7532 looking for this particular thread's event (i.e. temporarily
7533 enable schedlock) by:
7534
7535 - setting a break at the current PC
7536 - resuming that particular thread, only (by setting trap
7537 expected)
7538
7539 This prevents us continuously moving the single-step breakpoint
7540 forward, one instruction at a time, overstepping. */
7541
1edb66d8 7542 if (tp->stop_pc () != tp->prev_pc)
2ac7589c
PA
7543 {
7544 ptid_t resume_ptid;
7545
1eb8556f
SM
7546 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
7547 paddress (target_gdbarch (), tp->prev_pc),
1edb66d8 7548 paddress (target_gdbarch (), tp->stop_pc ()));
2ac7589c
PA
7549
7550 /* Clear the info of the previous step-over, as it's no longer
7551 valid (if the thread was trying to step over a breakpoint, it
7552 has already succeeded). It's what keep_going would do too,
7553 if we called it. Do this before trying to insert the sss
7554 breakpoint, otherwise if we were previously trying to step
7555 over this exact address in another thread, the breakpoint is
7556 skipped. */
7557 clear_step_over_info ();
7558 tp->control.trap_expected = 0;
7559
7560 insert_single_step_breakpoint (get_frame_arch (frame),
7561 get_frame_address_space (frame),
1edb66d8 7562 tp->stop_pc ());
2ac7589c 7563
7846f3aa 7564 tp->set_resumed (true);
fbea99ea 7565 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
c4464ade 7566 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
2ac7589c
PA
7567 }
7568 else
7569 {
1eb8556f 7570 infrun_debug_printf ("expected thread still hasn't advanced");
2ac7589c
PA
7571
7572 keep_going_pass_signal (ecs);
7573 }
c4464ade
SM
7574
7575 return true;
c447ac0b
PA
7576}
7577
8b061563
PA
7578/* Is thread TP in the middle of (software or hardware)
7579 single-stepping? (Note the result of this function must never be
7580 passed directly as target_resume's STEP parameter.) */
104c1213 7581
c4464ade 7582static bool
b3444185 7583currently_stepping (struct thread_info *tp)
a7212384 7584{
8358c15c
JK
7585 return ((tp->control.step_range_end
7586 && tp->control.step_resume_breakpoint == NULL)
7587 || tp->control.trap_expected
af48d08f 7588 || tp->stepped_breakpoint
8358c15c 7589 || bpstat_should_step ());
a7212384
UW
7590}
7591
b2175913
MS
7592/* Inferior has stepped into a subroutine call with source code that
7593 we should not step over. Do step to the first line of code in
7594 it. */
c2c6d25f
JM
7595
7596static void
568d6575
UW
7597handle_step_into_function (struct gdbarch *gdbarch,
7598 struct execution_control_state *ecs)
c2c6d25f 7599{
7e324e48
GB
7600 fill_in_stop_func (gdbarch, ecs);
7601
f2ffa92b 7602 compunit_symtab *cust
1edb66d8 7603 = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
43f3e411 7604 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7605 ecs->stop_func_start
7606 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
c2c6d25f 7607
51abb421 7608 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
c2c6d25f
JM
7609 /* Use the step_resume_break to step until the end of the prologue,
7610 even if that involves jumps (as it seems to on the vax under
7611 4.2). */
7612 /* If the prologue ends in the middle of a source line, continue to
7613 the end of that source line (if it is still within the function).
7614 Otherwise, just go to end of prologue. */
2afb61aa
PA
7615 if (stop_func_sal.end
7616 && stop_func_sal.pc != ecs->stop_func_start
7617 && stop_func_sal.end < ecs->stop_func_end)
7618 ecs->stop_func_start = stop_func_sal.end;
c2c6d25f 7619
2dbd5e30
KB
7620 /* Architectures which require breakpoint adjustment might not be able
7621 to place a breakpoint at the computed address. If so, the test
7622 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7623 ecs->stop_func_start to an address at which a breakpoint may be
7624 legitimately placed.
8fb3e588 7625
2dbd5e30
KB
7626 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7627 made, GDB will enter an infinite loop when stepping through
7628 optimized code consisting of VLIW instructions which contain
7629 subinstructions corresponding to different source lines. On
7630 FR-V, it's not permitted to place a breakpoint on any but the
7631 first subinstruction of a VLIW instruction. When a breakpoint is
7632 set, GDB will adjust the breakpoint address to the beginning of
7633 the VLIW instruction. Thus, we need to make the corresponding
7634 adjustment here when computing the stop address. */
8fb3e588 7635
568d6575 7636 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
2dbd5e30
KB
7637 {
7638 ecs->stop_func_start
568d6575 7639 = gdbarch_adjust_breakpoint_address (gdbarch,
8fb3e588 7640 ecs->stop_func_start);
2dbd5e30
KB
7641 }
7642
1edb66d8 7643 if (ecs->stop_func_start == ecs->event_thread->stop_pc ())
c2c6d25f
JM
7644 {
7645 /* We are already there: stop now. */
bdc36728 7646 end_stepping_range (ecs);
c2c6d25f
JM
7647 return;
7648 }
7649 else
7650 {
7651 /* Put the step-breakpoint there and go until there. */
51abb421 7652 symtab_and_line sr_sal;
c2c6d25f
JM
7653 sr_sal.pc = ecs->stop_func_start;
7654 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
6c95b8df 7655 sr_sal.pspace = get_frame_program_space (get_current_frame ());
44cbf7b5 7656
c2c6d25f 7657 /* Do not specify what the fp should be when we stop since on
dda83cd7
SM
7658 some machines the prologue is where the new fp value is
7659 established. */
a6d9a66e 7660 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
c2c6d25f
JM
7661
7662 /* And make sure stepping stops right away then. */
16c381f0 7663 ecs->event_thread->control.step_range_end
dda83cd7 7664 = ecs->event_thread->control.step_range_start;
c2c6d25f
JM
7665 }
7666 keep_going (ecs);
7667}
d4f3574e 7668
b2175913
MS
7669/* Inferior has stepped backward into a subroutine call with source
7670 code that we should not step over. Do step to the beginning of the
7671 last line of code in it. */
7672
7673static void
568d6575
UW
7674handle_step_into_function_backward (struct gdbarch *gdbarch,
7675 struct execution_control_state *ecs)
b2175913 7676{
43f3e411 7677 struct compunit_symtab *cust;
167e4384 7678 struct symtab_and_line stop_func_sal;
b2175913 7679
7e324e48
GB
7680 fill_in_stop_func (gdbarch, ecs);
7681
1edb66d8 7682 cust = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
43f3e411 7683 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7684 ecs->stop_func_start
7685 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
b2175913 7686
1edb66d8 7687 stop_func_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
b2175913
MS
7688
7689 /* OK, we're just going to keep stepping here. */
1edb66d8 7690 if (stop_func_sal.pc == ecs->event_thread->stop_pc ())
b2175913
MS
7691 {
7692 /* We're there already. Just stop stepping now. */
bdc36728 7693 end_stepping_range (ecs);
b2175913
MS
7694 }
7695 else
7696 {
7697 /* Else just reset the step range and keep going.
7698 No step-resume breakpoint, they don't work for
7699 epilogues, which can have multiple entry paths. */
16c381f0
JK
7700 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7701 ecs->event_thread->control.step_range_end = stop_func_sal.end;
b2175913
MS
7702 keep_going (ecs);
7703 }
7704 return;
7705}
7706
d3169d93 7707/* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
44cbf7b5
AC
7708 This is used to both functions and to skip over code. */
7709
7710static void
2c03e5be
PA
7711insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7712 struct symtab_and_line sr_sal,
7713 struct frame_id sr_id,
7714 enum bptype sr_type)
44cbf7b5 7715{
611c83ae
PA
7716 /* There should never be more than one step-resume or longjmp-resume
7717 breakpoint per thread, so we should never be setting a new
44cbf7b5 7718 step_resume_breakpoint when one is already active. */
8358c15c 7719 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
2c03e5be 7720 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
d3169d93 7721
1eb8556f
SM
7722 infrun_debug_printf ("inserting step-resume breakpoint at %s",
7723 paddress (gdbarch, sr_sal.pc));
d3169d93 7724
8358c15c 7725 inferior_thread ()->control.step_resume_breakpoint
454dafbd 7726 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
2c03e5be
PA
7727}
7728
9da8c2a0 7729void
2c03e5be
PA
7730insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7731 struct symtab_and_line sr_sal,
7732 struct frame_id sr_id)
7733{
7734 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7735 sr_sal, sr_id,
7736 bp_step_resume);
44cbf7b5 7737}
7ce450bd 7738
2c03e5be
PA
7739/* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7740 This is used to skip a potential signal handler.
7ce450bd 7741
14e60db5
DJ
7742 This is called with the interrupted function's frame. The signal
7743 handler, when it returns, will resume the interrupted function at
7744 RETURN_FRAME.pc. */
d303a6c7
AC
7745
7746static void
2c03e5be 7747insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
d303a6c7 7748{
f4c1edd8 7749 gdb_assert (return_frame != NULL);
d303a6c7 7750
51abb421
PA
7751 struct gdbarch *gdbarch = get_frame_arch (return_frame);
7752
7753 symtab_and_line sr_sal;
568d6575 7754 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
d303a6c7 7755 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7756 sr_sal.pspace = get_frame_program_space (return_frame);
d303a6c7 7757
2c03e5be
PA
7758 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7759 get_stack_frame_id (return_frame),
7760 bp_hp_step_resume);
d303a6c7
AC
7761}
7762
2c03e5be
PA
7763/* Insert a "step-resume breakpoint" at the previous frame's PC. This
7764 is used to skip a function after stepping into it (for "next" or if
7765 the called function has no debugging information).
14e60db5
DJ
7766
7767 The current function has almost always been reached by single
7768 stepping a call or return instruction. NEXT_FRAME belongs to the
7769 current function, and the breakpoint will be set at the caller's
7770 resume address.
7771
7772 This is a separate function rather than reusing
2c03e5be 7773 insert_hp_step_resume_breakpoint_at_frame in order to avoid
14e60db5 7774 get_prev_frame, which may stop prematurely (see the implementation
c7ce8faa 7775 of frame_unwind_caller_id for an example). */
14e60db5
DJ
7776
7777static void
7778insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
7779{
14e60db5
DJ
7780 /* We shouldn't have gotten here if we don't know where the call site
7781 is. */
c7ce8faa 7782 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
14e60db5 7783
51abb421 7784 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
14e60db5 7785
51abb421 7786 symtab_and_line sr_sal;
c7ce8faa
DJ
7787 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
7788 frame_unwind_caller_pc (next_frame));
14e60db5 7789 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7790 sr_sal.pspace = frame_unwind_program_space (next_frame);
14e60db5 7791
a6d9a66e 7792 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
c7ce8faa 7793 frame_unwind_caller_id (next_frame));
14e60db5
DJ
7794}
7795
611c83ae
PA
7796/* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7797 new breakpoint at the target of a jmp_buf. The handling of
7798 longjmp-resume uses the same mechanisms used for handling
7799 "step-resume" breakpoints. */
7800
7801static void
a6d9a66e 7802insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
611c83ae 7803{
e81a37f7
TT
7804 /* There should never be more than one longjmp-resume breakpoint per
7805 thread, so we should never be setting a new
611c83ae 7806 longjmp_resume_breakpoint when one is already active. */
e81a37f7 7807 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
611c83ae 7808
1eb8556f
SM
7809 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
7810 paddress (gdbarch, pc));
611c83ae 7811
e81a37f7 7812 inferior_thread ()->control.exception_resume_breakpoint =
454dafbd 7813 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
611c83ae
PA
7814}
7815
186c406b
TT
7816/* Insert an exception resume breakpoint. TP is the thread throwing
7817 the exception. The block B is the block of the unwinder debug hook
7818 function. FRAME is the frame corresponding to the call to this
7819 function. SYM is the symbol of the function argument holding the
7820 target PC of the exception. */
7821
7822static void
7823insert_exception_resume_breakpoint (struct thread_info *tp,
3977b71f 7824 const struct block *b,
186c406b
TT
7825 struct frame_info *frame,
7826 struct symbol *sym)
7827{
a70b8144 7828 try
186c406b 7829 {
63e43d3a 7830 struct block_symbol vsym;
186c406b
TT
7831 struct value *value;
7832 CORE_ADDR handler;
7833 struct breakpoint *bp;
7834
987012b8 7835 vsym = lookup_symbol_search_name (sym->search_name (),
de63c46b 7836 b, VAR_DOMAIN);
63e43d3a 7837 value = read_var_value (vsym.symbol, vsym.block, frame);
186c406b
TT
7838 /* If the value was optimized out, revert to the old behavior. */
7839 if (! value_optimized_out (value))
7840 {
7841 handler = value_as_address (value);
7842
1eb8556f
SM
7843 infrun_debug_printf ("exception resume at %lx",
7844 (unsigned long) handler);
186c406b
TT
7845
7846 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd
TT
7847 handler,
7848 bp_exception_resume).release ();
c70a6932
JK
7849
7850 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
7851 frame = NULL;
7852
5d5658a1 7853 bp->thread = tp->global_num;
186c406b
TT
7854 inferior_thread ()->control.exception_resume_breakpoint = bp;
7855 }
7856 }
230d2906 7857 catch (const gdb_exception_error &e)
492d29ea
PA
7858 {
7859 /* We want to ignore errors here. */
7860 }
186c406b
TT
7861}
7862
28106bc2
SDJ
7863/* A helper for check_exception_resume that sets an
7864 exception-breakpoint based on a SystemTap probe. */
7865
7866static void
7867insert_exception_resume_from_probe (struct thread_info *tp,
729662a5 7868 const struct bound_probe *probe,
28106bc2
SDJ
7869 struct frame_info *frame)
7870{
7871 struct value *arg_value;
7872 CORE_ADDR handler;
7873 struct breakpoint *bp;
7874
7875 arg_value = probe_safe_evaluate_at_pc (frame, 1);
7876 if (!arg_value)
7877 return;
7878
7879 handler = value_as_address (arg_value);
7880
1eb8556f
SM
7881 infrun_debug_printf ("exception resume at %s",
7882 paddress (probe->objfile->arch (), handler));
28106bc2
SDJ
7883
7884 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd 7885 handler, bp_exception_resume).release ();
5d5658a1 7886 bp->thread = tp->global_num;
28106bc2
SDJ
7887 inferior_thread ()->control.exception_resume_breakpoint = bp;
7888}
7889
186c406b
TT
7890/* This is called when an exception has been intercepted. Check to
7891 see whether the exception's destination is of interest, and if so,
7892 set an exception resume breakpoint there. */
7893
7894static void
7895check_exception_resume (struct execution_control_state *ecs,
28106bc2 7896 struct frame_info *frame)
186c406b 7897{
729662a5 7898 struct bound_probe probe;
28106bc2
SDJ
7899 struct symbol *func;
7900
7901 /* First see if this exception unwinding breakpoint was set via a
7902 SystemTap probe point. If so, the probe has two arguments: the
7903 CFA and the HANDLER. We ignore the CFA, extract the handler, and
7904 set a breakpoint there. */
6bac7473 7905 probe = find_probe_by_pc (get_frame_pc (frame));
935676c9 7906 if (probe.prob)
28106bc2 7907 {
729662a5 7908 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
28106bc2
SDJ
7909 return;
7910 }
7911
7912 func = get_frame_function (frame);
7913 if (!func)
7914 return;
186c406b 7915
a70b8144 7916 try
186c406b 7917 {
3977b71f 7918 const struct block *b;
8157b174 7919 struct block_iterator iter;
186c406b
TT
7920 struct symbol *sym;
7921 int argno = 0;
7922
7923 /* The exception breakpoint is a thread-specific breakpoint on
7924 the unwinder's debug hook, declared as:
7925
7926 void _Unwind_DebugHook (void *cfa, void *handler);
7927
7928 The CFA argument indicates the frame to which control is
7929 about to be transferred. HANDLER is the destination PC.
7930
7931 We ignore the CFA and set a temporary breakpoint at HANDLER.
7932 This is not extremely efficient but it avoids issues in gdb
7933 with computing the DWARF CFA, and it also works even in weird
7934 cases such as throwing an exception from inside a signal
7935 handler. */
7936
7937 b = SYMBOL_BLOCK_VALUE (func);
7938 ALL_BLOCK_SYMBOLS (b, iter, sym)
7939 {
7940 if (!SYMBOL_IS_ARGUMENT (sym))
7941 continue;
7942
7943 if (argno == 0)
7944 ++argno;
7945 else
7946 {
7947 insert_exception_resume_breakpoint (ecs->event_thread,
7948 b, frame, sym);
7949 break;
7950 }
7951 }
7952 }
230d2906 7953 catch (const gdb_exception_error &e)
492d29ea
PA
7954 {
7955 }
186c406b
TT
7956}
7957
104c1213 7958static void
22bcd14b 7959stop_waiting (struct execution_control_state *ecs)
104c1213 7960{
1eb8556f 7961 infrun_debug_printf ("stop_waiting");
527159b7 7962
cd0fc7c3
SS
7963 /* Let callers know we don't want to wait for the inferior anymore. */
7964 ecs->wait_some_more = 0;
fbea99ea 7965
53cccef1 7966 /* If all-stop, but there exists a non-stop target, stop all
fbea99ea 7967 threads now that we're presenting the stop to the user. */
53cccef1 7968 if (!non_stop && exists_non_stop_target ())
fbea99ea 7969 stop_all_threads ();
cd0fc7c3
SS
7970}
7971
4d9d9d04
PA
7972/* Like keep_going, but passes the signal to the inferior, even if the
7973 signal is set to nopass. */
d4f3574e
SS
7974
7975static void
4d9d9d04 7976keep_going_pass_signal (struct execution_control_state *ecs)
d4f3574e 7977{
d7e15655 7978 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
7846f3aa 7979 gdb_assert (!ecs->event_thread->resumed ());
4d9d9d04 7980
d4f3574e 7981 /* Save the pc before execution, to compare with pc after stop. */
fb14de7b 7982 ecs->event_thread->prev_pc
fc75c28b 7983 = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
d4f3574e 7984
4d9d9d04 7985 if (ecs->event_thread->control.trap_expected)
d4f3574e 7986 {
4d9d9d04
PA
7987 struct thread_info *tp = ecs->event_thread;
7988
1eb8556f
SM
7989 infrun_debug_printf ("%s has trap_expected set, "
7990 "resuming to collect trap",
7991 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04 7992
a9ba6bae
PA
7993 /* We haven't yet gotten our trap, and either: intercepted a
7994 non-signal event (e.g., a fork); or took a signal which we
7995 are supposed to pass through to the inferior. Simply
7996 continue. */
1edb66d8 7997 resume (ecs->event_thread->stop_signal ());
d4f3574e 7998 }
372316f1
PA
7999 else if (step_over_info_valid_p ())
8000 {
8001 /* Another thread is stepping over a breakpoint in-line. If
8002 this thread needs a step-over too, queue the request. In
8003 either case, this resume must be deferred for later. */
8004 struct thread_info *tp = ecs->event_thread;
8005
8006 if (ecs->hit_singlestep_breakpoint
8007 || thread_still_needs_step_over (tp))
8008 {
1eb8556f
SM
8009 infrun_debug_printf ("step-over already in progress: "
8010 "step-over for %s deferred",
8011 target_pid_to_str (tp->ptid).c_str ());
28d5518b 8012 global_thread_step_over_chain_enqueue (tp);
372316f1
PA
8013 }
8014 else
8015 {
1eb8556f
SM
8016 infrun_debug_printf ("step-over in progress: resume of %s deferred",
8017 target_pid_to_str (tp->ptid).c_str ());
372316f1 8018 }
372316f1 8019 }
d4f3574e
SS
8020 else
8021 {
31e77af2 8022 struct regcache *regcache = get_current_regcache ();
963f9c80
PA
8023 int remove_bp;
8024 int remove_wps;
8d297bbf 8025 step_over_what step_what;
31e77af2 8026
d4f3574e 8027 /* Either the trap was not expected, but we are continuing
a9ba6bae
PA
8028 anyway (if we got a signal, the user asked it be passed to
8029 the child)
8030 -- or --
8031 We got our expected trap, but decided we should resume from
8032 it.
d4f3574e 8033
a9ba6bae 8034 We're going to run this baby now!
d4f3574e 8035
c36b740a
VP
8036 Note that insert_breakpoints won't try to re-insert
8037 already inserted breakpoints. Therefore, we don't
8038 care if breakpoints were already inserted, or not. */
a9ba6bae 8039
31e77af2
PA
8040 /* If we need to step over a breakpoint, and we're not using
8041 displaced stepping to do so, insert all breakpoints
8042 (watchpoints, etc.) but the one we're stepping over, step one
8043 instruction, and then re-insert the breakpoint when that step
8044 is finished. */
963f9c80 8045
6c4cfb24
PA
8046 step_what = thread_still_needs_step_over (ecs->event_thread);
8047
963f9c80 8048 remove_bp = (ecs->hit_singlestep_breakpoint
6c4cfb24
PA
8049 || (step_what & STEP_OVER_BREAKPOINT));
8050 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
963f9c80 8051
cb71640d
PA
8052 /* We can't use displaced stepping if we need to step past a
8053 watchpoint. The instruction copied to the scratch pad would
8054 still trigger the watchpoint. */
8055 if (remove_bp
3fc8eb30 8056 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
45e8c884 8057 {
a01bda52 8058 set_step_over_info (regcache->aspace (),
21edc42f
YQ
8059 regcache_read_pc (regcache), remove_wps,
8060 ecs->event_thread->global_num);
45e8c884 8061 }
963f9c80 8062 else if (remove_wps)
21edc42f 8063 set_step_over_info (NULL, 0, remove_wps, -1);
372316f1
PA
8064
8065 /* If we now need to do an in-line step-over, we need to stop
8066 all other threads. Note this must be done before
8067 insert_breakpoints below, because that removes the breakpoint
8068 we're about to step over, otherwise other threads could miss
8069 it. */
fbea99ea 8070 if (step_over_info_valid_p () && target_is_non_stop_p ())
372316f1 8071 stop_all_threads ();
abbb1732 8072
31e77af2 8073 /* Stop stepping if inserting breakpoints fails. */
a70b8144 8074 try
31e77af2
PA
8075 {
8076 insert_breakpoints ();
8077 }
230d2906 8078 catch (const gdb_exception_error &e)
31e77af2
PA
8079 {
8080 exception_print (gdb_stderr, e);
22bcd14b 8081 stop_waiting (ecs);
bdf2a94a 8082 clear_step_over_info ();
31e77af2 8083 return;
d4f3574e
SS
8084 }
8085
963f9c80 8086 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
d4f3574e 8087
1edb66d8 8088 resume (ecs->event_thread->stop_signal ());
d4f3574e
SS
8089 }
8090
488f131b 8091 prepare_to_wait (ecs);
d4f3574e
SS
8092}
8093
4d9d9d04
PA
8094/* Called when we should continue running the inferior, because the
8095 current event doesn't cause a user visible stop. This does the
8096 resuming part; waiting for the next event is done elsewhere. */
8097
8098static void
8099keep_going (struct execution_control_state *ecs)
8100{
8101 if (ecs->event_thread->control.trap_expected
1edb66d8 8102 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
4d9d9d04
PA
8103 ecs->event_thread->control.trap_expected = 0;
8104
1edb66d8
SM
8105 if (!signal_program[ecs->event_thread->stop_signal ()])
8106 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
4d9d9d04
PA
8107 keep_going_pass_signal (ecs);
8108}
8109
104c1213
JM
8110/* This function normally comes after a resume, before
8111 handle_inferior_event exits. It takes care of any last bits of
8112 housekeeping, and sets the all-important wait_some_more flag. */
cd0fc7c3 8113
104c1213
JM
8114static void
8115prepare_to_wait (struct execution_control_state *ecs)
cd0fc7c3 8116{
1eb8556f 8117 infrun_debug_printf ("prepare_to_wait");
104c1213 8118
104c1213 8119 ecs->wait_some_more = 1;
0b333c5e 8120
42bd97a6
PA
8121 /* If the target can't async, emulate it by marking the infrun event
8122 handler such that as soon as we get back to the event-loop, we
8123 immediately end up in fetch_inferior_event again calling
8124 target_wait. */
8125 if (!target_can_async_p ())
0b333c5e 8126 mark_infrun_async_event_handler ();
c906108c 8127}
11cf8741 8128
fd664c91 8129/* We are done with the step range of a step/next/si/ni command.
b57bacec 8130 Called once for each n of a "step n" operation. */
fd664c91
PA
8131
8132static void
bdc36728 8133end_stepping_range (struct execution_control_state *ecs)
fd664c91 8134{
bdc36728 8135 ecs->event_thread->control.stop_step = 1;
bdc36728 8136 stop_waiting (ecs);
fd664c91
PA
8137}
8138
33d62d64
JK
8139/* Several print_*_reason functions to print why the inferior has stopped.
8140 We always print something when the inferior exits, or receives a signal.
8141 The rest of the cases are dealt with later on in normal_stop and
8142 print_it_typical. Ideally there should be a call to one of these
8143 print_*_reason functions functions from handle_inferior_event each time
22bcd14b 8144 stop_waiting is called.
33d62d64 8145
fd664c91
PA
8146 Note that we don't call these directly, instead we delegate that to
8147 the interpreters, through observers. Interpreters then call these
8148 with whatever uiout is right. */
33d62d64 8149
fd664c91
PA
8150void
8151print_end_stepping_range_reason (struct ui_out *uiout)
33d62d64 8152{
fd664c91 8153 /* For CLI-like interpreters, print nothing. */
33d62d64 8154
112e8700 8155 if (uiout->is_mi_like_p ())
fd664c91 8156 {
112e8700 8157 uiout->field_string ("reason",
fd664c91
PA
8158 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
8159 }
8160}
33d62d64 8161
fd664c91
PA
8162void
8163print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
11cf8741 8164{
33d62d64 8165 annotate_signalled ();
112e8700
SM
8166 if (uiout->is_mi_like_p ())
8167 uiout->field_string
8168 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
8169 uiout->text ("\nProgram terminated with signal ");
33d62d64 8170 annotate_signal_name ();
112e8700 8171 uiout->field_string ("signal-name",
2ea28649 8172 gdb_signal_to_name (siggnal));
33d62d64 8173 annotate_signal_name_end ();
112e8700 8174 uiout->text (", ");
33d62d64 8175 annotate_signal_string ();
112e8700 8176 uiout->field_string ("signal-meaning",
2ea28649 8177 gdb_signal_to_string (siggnal));
33d62d64 8178 annotate_signal_string_end ();
112e8700
SM
8179 uiout->text (".\n");
8180 uiout->text ("The program no longer exists.\n");
33d62d64
JK
8181}
8182
fd664c91
PA
8183void
8184print_exited_reason (struct ui_out *uiout, int exitstatus)
33d62d64 8185{
fda326dd 8186 struct inferior *inf = current_inferior ();
a068643d 8187 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
fda326dd 8188
33d62d64
JK
8189 annotate_exited (exitstatus);
8190 if (exitstatus)
8191 {
112e8700
SM
8192 if (uiout->is_mi_like_p ())
8193 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
6a831f06
PA
8194 std::string exit_code_str
8195 = string_printf ("0%o", (unsigned int) exitstatus);
8196 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
8197 plongest (inf->num), pidstr.c_str (),
8198 string_field ("exit-code", exit_code_str.c_str ()));
33d62d64
JK
8199 }
8200 else
11cf8741 8201 {
112e8700
SM
8202 if (uiout->is_mi_like_p ())
8203 uiout->field_string
8204 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6a831f06
PA
8205 uiout->message ("[Inferior %s (%s) exited normally]\n",
8206 plongest (inf->num), pidstr.c_str ());
33d62d64 8207 }
33d62d64
JK
8208}
8209
fd664c91
PA
8210void
8211print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
33d62d64 8212{
f303dbd6
PA
8213 struct thread_info *thr = inferior_thread ();
8214
33d62d64
JK
8215 annotate_signal ();
8216
112e8700 8217 if (uiout->is_mi_like_p ())
f303dbd6
PA
8218 ;
8219 else if (show_thread_that_caused_stop ())
33d62d64 8220 {
f303dbd6 8221 const char *name;
33d62d64 8222
112e8700 8223 uiout->text ("\nThread ");
33eca680 8224 uiout->field_string ("thread-id", print_thread_id (thr));
f303dbd6
PA
8225
8226 name = thr->name != NULL ? thr->name : target_thread_name (thr);
8227 if (name != NULL)
8228 {
112e8700 8229 uiout->text (" \"");
33eca680 8230 uiout->field_string ("name", name);
112e8700 8231 uiout->text ("\"");
f303dbd6 8232 }
33d62d64 8233 }
f303dbd6 8234 else
112e8700 8235 uiout->text ("\nProgram");
f303dbd6 8236
112e8700
SM
8237 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
8238 uiout->text (" stopped");
33d62d64
JK
8239 else
8240 {
112e8700 8241 uiout->text (" received signal ");
8b93c638 8242 annotate_signal_name ();
112e8700
SM
8243 if (uiout->is_mi_like_p ())
8244 uiout->field_string
8245 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
8246 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8b93c638 8247 annotate_signal_name_end ();
112e8700 8248 uiout->text (", ");
8b93c638 8249 annotate_signal_string ();
112e8700 8250 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
012b3a21 8251
272bb05c
JB
8252 struct regcache *regcache = get_current_regcache ();
8253 struct gdbarch *gdbarch = regcache->arch ();
8254 if (gdbarch_report_signal_info_p (gdbarch))
8255 gdbarch_report_signal_info (gdbarch, uiout, siggnal);
8256
8b93c638 8257 annotate_signal_string_end ();
33d62d64 8258 }
112e8700 8259 uiout->text (".\n");
33d62d64 8260}
252fbfc8 8261
fd664c91
PA
8262void
8263print_no_history_reason (struct ui_out *uiout)
33d62d64 8264{
112e8700 8265 uiout->text ("\nNo more reverse-execution history.\n");
11cf8741 8266}
43ff13b4 8267
0c7e1a46
PA
8268/* Print current location without a level number, if we have changed
8269 functions or hit a breakpoint. Print source line if we have one.
8270 bpstat_print contains the logic deciding in detail what to print,
8271 based on the event(s) that just occurred. */
8272
243a9253
PA
8273static void
8274print_stop_location (struct target_waitstatus *ws)
0c7e1a46
PA
8275{
8276 int bpstat_ret;
f486487f 8277 enum print_what source_flag;
0c7e1a46
PA
8278 int do_frame_printing = 1;
8279 struct thread_info *tp = inferior_thread ();
8280
8281 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
8282 switch (bpstat_ret)
8283 {
8284 case PRINT_UNKNOWN:
8285 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8286 should) carry around the function and does (or should) use
8287 that when doing a frame comparison. */
8288 if (tp->control.stop_step
8289 && frame_id_eq (tp->control.step_frame_id,
8290 get_frame_id (get_current_frame ()))
f2ffa92b 8291 && (tp->control.step_start_function
1edb66d8 8292 == find_pc_function (tp->stop_pc ())))
0c7e1a46
PA
8293 {
8294 /* Finished step, just print source line. */
8295 source_flag = SRC_LINE;
8296 }
8297 else
8298 {
8299 /* Print location and source line. */
8300 source_flag = SRC_AND_LOC;
8301 }
8302 break;
8303 case PRINT_SRC_AND_LOC:
8304 /* Print location and source line. */
8305 source_flag = SRC_AND_LOC;
8306 break;
8307 case PRINT_SRC_ONLY:
8308 source_flag = SRC_LINE;
8309 break;
8310 case PRINT_NOTHING:
8311 /* Something bogus. */
8312 source_flag = SRC_LINE;
8313 do_frame_printing = 0;
8314 break;
8315 default:
8316 internal_error (__FILE__, __LINE__, _("Unknown value."));
8317 }
8318
8319 /* The behavior of this routine with respect to the source
8320 flag is:
8321 SRC_LINE: Print only source line
8322 LOCATION: Print only location
8323 SRC_AND_LOC: Print location and source line. */
8324 if (do_frame_printing)
8325 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
243a9253
PA
8326}
8327
243a9253
PA
8328/* See infrun.h. */
8329
8330void
4c7d57e7 8331print_stop_event (struct ui_out *uiout, bool displays)
243a9253 8332{
243a9253 8333 struct target_waitstatus last;
243a9253
PA
8334 struct thread_info *tp;
8335
5b6d1e4f 8336 get_last_target_status (nullptr, nullptr, &last);
243a9253 8337
67ad9399
TT
8338 {
8339 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
0c7e1a46 8340
67ad9399 8341 print_stop_location (&last);
243a9253 8342
67ad9399 8343 /* Display the auto-display expressions. */
4c7d57e7
TT
8344 if (displays)
8345 do_displays ();
67ad9399 8346 }
243a9253
PA
8347
8348 tp = inferior_thread ();
8349 if (tp->thread_fsm != NULL
46e3ed7f 8350 && tp->thread_fsm->finished_p ())
243a9253
PA
8351 {
8352 struct return_value_info *rv;
8353
46e3ed7f 8354 rv = tp->thread_fsm->return_value ();
243a9253
PA
8355 if (rv != NULL)
8356 print_return_value (uiout, rv);
8357 }
0c7e1a46
PA
8358}
8359
388a7084
PA
8360/* See infrun.h. */
8361
8362void
8363maybe_remove_breakpoints (void)
8364{
55f6301a 8365 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
388a7084
PA
8366 {
8367 if (remove_breakpoints ())
8368 {
223ffa71 8369 target_terminal::ours_for_output ();
388a7084
PA
8370 printf_filtered (_("Cannot remove breakpoints because "
8371 "program is no longer writable.\nFurther "
8372 "execution is probably impossible.\n"));
8373 }
8374 }
8375}
8376
4c2f2a79
PA
8377/* The execution context that just caused a normal stop. */
8378
8379struct stop_context
8380{
2d844eaf 8381 stop_context ();
2d844eaf
TT
8382
8383 DISABLE_COPY_AND_ASSIGN (stop_context);
8384
8385 bool changed () const;
8386
4c2f2a79
PA
8387 /* The stop ID. */
8388 ULONGEST stop_id;
c906108c 8389
4c2f2a79 8390 /* The event PTID. */
c906108c 8391
4c2f2a79
PA
8392 ptid_t ptid;
8393
8394 /* If stopp for a thread event, this is the thread that caused the
8395 stop. */
d634cd0b 8396 thread_info_ref thread;
4c2f2a79
PA
8397
8398 /* The inferior that caused the stop. */
8399 int inf_num;
8400};
8401
2d844eaf 8402/* Initializes a new stop context. If stopped for a thread event, this
4c2f2a79
PA
8403 takes a strong reference to the thread. */
8404
2d844eaf 8405stop_context::stop_context ()
4c2f2a79 8406{
2d844eaf
TT
8407 stop_id = get_stop_id ();
8408 ptid = inferior_ptid;
8409 inf_num = current_inferior ()->num;
4c2f2a79 8410
d7e15655 8411 if (inferior_ptid != null_ptid)
4c2f2a79
PA
8412 {
8413 /* Take a strong reference so that the thread can't be deleted
8414 yet. */
d634cd0b 8415 thread = thread_info_ref::new_reference (inferior_thread ());
4c2f2a79 8416 }
4c2f2a79
PA
8417}
8418
8419/* Return true if the current context no longer matches the saved stop
8420 context. */
8421
2d844eaf
TT
8422bool
8423stop_context::changed () const
8424{
8425 if (ptid != inferior_ptid)
8426 return true;
8427 if (inf_num != current_inferior ()->num)
8428 return true;
8429 if (thread != NULL && thread->state != THREAD_STOPPED)
8430 return true;
8431 if (get_stop_id () != stop_id)
8432 return true;
8433 return false;
4c2f2a79
PA
8434}
8435
8436/* See infrun.h. */
8437
8438int
96baa820 8439normal_stop (void)
c906108c 8440{
73b65bb0 8441 struct target_waitstatus last;
73b65bb0 8442
5b6d1e4f 8443 get_last_target_status (nullptr, nullptr, &last);
73b65bb0 8444
4c2f2a79
PA
8445 new_stop_id ();
8446
29f49a6a
PA
8447 /* If an exception is thrown from this point on, make sure to
8448 propagate GDB's knowledge of the executing state to the
8449 frontend/user running state. A QUIT is an easy exception to see
8450 here, so do this before any filtered output. */
731f534f 8451
5b6d1e4f 8452 ptid_t finish_ptid = null_ptid;
731f534f 8453
c35b1492 8454 if (!non_stop)
5b6d1e4f 8455 finish_ptid = minus_one_ptid;
e1316e60
PA
8456 else if (last.kind == TARGET_WAITKIND_SIGNALLED
8457 || last.kind == TARGET_WAITKIND_EXITED)
8458 {
8459 /* On some targets, we may still have live threads in the
8460 inferior when we get a process exit event. E.g., for
8461 "checkpoint", when the current checkpoint/fork exits,
8462 linux-fork.c automatically switches to another fork from
8463 within target_mourn_inferior. */
731f534f 8464 if (inferior_ptid != null_ptid)
5b6d1e4f 8465 finish_ptid = ptid_t (inferior_ptid.pid ());
e1316e60
PA
8466 }
8467 else if (last.kind != TARGET_WAITKIND_NO_RESUMED)
5b6d1e4f
PA
8468 finish_ptid = inferior_ptid;
8469
8470 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
8471 if (finish_ptid != null_ptid)
8472 {
8473 maybe_finish_thread_state.emplace
8474 (user_visible_resume_target (finish_ptid), finish_ptid);
8475 }
29f49a6a 8476
b57bacec
PA
8477 /* As we're presenting a stop, and potentially removing breakpoints,
8478 update the thread list so we can tell whether there are threads
8479 running on the target. With target remote, for example, we can
8480 only learn about new threads when we explicitly update the thread
8481 list. Do this before notifying the interpreters about signal
8482 stops, end of stepping ranges, etc., so that the "new thread"
8483 output is emitted before e.g., "Program received signal FOO",
8484 instead of after. */
8485 update_thread_list ();
8486
8487 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
1edb66d8 8488 gdb::observers::signal_received.notify (inferior_thread ()->stop_signal ());
b57bacec 8489
c906108c
SS
8490 /* As with the notification of thread events, we want to delay
8491 notifying the user that we've switched thread context until
8492 the inferior actually stops.
8493
73b65bb0
DJ
8494 There's no point in saying anything if the inferior has exited.
8495 Note that SIGNALLED here means "exited with a signal", not
b65dc60b
PA
8496 "received a signal".
8497
8498 Also skip saying anything in non-stop mode. In that mode, as we
8499 don't want GDB to switch threads behind the user's back, to avoid
8500 races where the user is typing a command to apply to thread x,
8501 but GDB switches to thread y before the user finishes entering
8502 the command, fetch_inferior_event installs a cleanup to restore
8503 the current thread back to the thread the user had selected right
8504 after this event is handled, so we're not really switching, only
8505 informing of a stop. */
4f8d22e3 8506 if (!non_stop
731f534f 8507 && previous_inferior_ptid != inferior_ptid
55f6301a 8508 && target_has_execution ()
73b65bb0 8509 && last.kind != TARGET_WAITKIND_SIGNALLED
0e5bf2a8
PA
8510 && last.kind != TARGET_WAITKIND_EXITED
8511 && last.kind != TARGET_WAITKIND_NO_RESUMED)
c906108c 8512 {
0e454242 8513 SWITCH_THRU_ALL_UIS ()
3b12939d 8514 {
223ffa71 8515 target_terminal::ours_for_output ();
3b12939d 8516 printf_filtered (_("[Switching to %s]\n"),
a068643d 8517 target_pid_to_str (inferior_ptid).c_str ());
3b12939d
PA
8518 annotate_thread_changed ();
8519 }
39f77062 8520 previous_inferior_ptid = inferior_ptid;
c906108c 8521 }
c906108c 8522
0e5bf2a8
PA
8523 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
8524 {
0e454242 8525 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8526 if (current_ui->prompt_state == PROMPT_BLOCKED)
8527 {
223ffa71 8528 target_terminal::ours_for_output ();
3b12939d
PA
8529 printf_filtered (_("No unwaited-for children left.\n"));
8530 }
0e5bf2a8
PA
8531 }
8532
b57bacec 8533 /* Note: this depends on the update_thread_list call above. */
388a7084 8534 maybe_remove_breakpoints ();
c906108c 8535
c906108c
SS
8536 /* If an auto-display called a function and that got a signal,
8537 delete that auto-display to avoid an infinite recursion. */
8538
8539 if (stopped_by_random_signal)
8540 disable_current_display ();
8541
0e454242 8542 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8543 {
8544 async_enable_stdin ();
8545 }
c906108c 8546
388a7084 8547 /* Let the user/frontend see the threads as stopped. */
731f534f 8548 maybe_finish_thread_state.reset ();
388a7084
PA
8549
8550 /* Select innermost stack frame - i.e., current frame is frame 0,
8551 and current location is based on that. Handle the case where the
8552 dummy call is returning after being stopped. E.g. the dummy call
8553 previously hit a breakpoint. (If the dummy call returns
8554 normally, we won't reach here.) Do this before the stop hook is
8555 run, so that it doesn't get to see the temporary dummy frame,
8556 which is not where we'll present the stop. */
8557 if (has_stack_frames ())
8558 {
8559 if (stop_stack_dummy == STOP_STACK_DUMMY)
8560 {
8561 /* Pop the empty frame that contains the stack dummy. This
8562 also restores inferior state prior to the call (struct
8563 infcall_suspend_state). */
8564 struct frame_info *frame = get_current_frame ();
8565
8566 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
8567 frame_pop (frame);
8568 /* frame_pop calls reinit_frame_cache as the last thing it
8569 does which means there's now no selected frame. */
8570 }
8571
8572 select_frame (get_current_frame ());
8573
8574 /* Set the current source location. */
8575 set_current_sal_from_frame (get_current_frame ());
8576 }
dd7e2d2b
PA
8577
8578 /* Look up the hook_stop and run it (CLI internally handles problem
8579 of stop_command's pre-hook not existing). */
4c2f2a79
PA
8580 if (stop_command != NULL)
8581 {
2d844eaf 8582 stop_context saved_context;
4c2f2a79 8583
a70b8144 8584 try
bf469271
PA
8585 {
8586 execute_cmd_pre_hook (stop_command);
8587 }
230d2906 8588 catch (const gdb_exception &ex)
bf469271
PA
8589 {
8590 exception_fprintf (gdb_stderr, ex,
8591 "Error while running hook_stop:\n");
8592 }
4c2f2a79
PA
8593
8594 /* If the stop hook resumes the target, then there's no point in
8595 trying to notify about the previous stop; its context is
8596 gone. Likewise if the command switches thread or inferior --
8597 the observers would print a stop for the wrong
8598 thread/inferior. */
2d844eaf
TT
8599 if (saved_context.changed ())
8600 return 1;
4c2f2a79 8601 }
dd7e2d2b 8602
388a7084
PA
8603 /* Notify observers about the stop. This is where the interpreters
8604 print the stop event. */
d7e15655 8605 if (inferior_ptid != null_ptid)
76727919 8606 gdb::observers::normal_stop.notify (inferior_thread ()->control.stop_bpstat,
24a7f1b5 8607 stop_print_frame);
388a7084 8608 else
76727919 8609 gdb::observers::normal_stop.notify (NULL, stop_print_frame);
347bddb7 8610
243a9253
PA
8611 annotate_stopped ();
8612
55f6301a 8613 if (target_has_execution ())
48844aa6
PA
8614 {
8615 if (last.kind != TARGET_WAITKIND_SIGNALLED
fe726667
PA
8616 && last.kind != TARGET_WAITKIND_EXITED
8617 && last.kind != TARGET_WAITKIND_NO_RESUMED)
48844aa6
PA
8618 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8619 Delete any breakpoint that is to be deleted at the next stop. */
16c381f0 8620 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
94cc34af 8621 }
6c95b8df
PA
8622
8623 /* Try to get rid of automatically added inferiors that are no
8624 longer needed. Keeping those around slows down things linearly.
8625 Note that this never removes the current inferior. */
8626 prune_inferiors ();
4c2f2a79
PA
8627
8628 return 0;
c906108c 8629}
c906108c 8630\f
c5aa993b 8631int
96baa820 8632signal_stop_state (int signo)
c906108c 8633{
d6b48e9c 8634 return signal_stop[signo];
c906108c
SS
8635}
8636
c5aa993b 8637int
96baa820 8638signal_print_state (int signo)
c906108c
SS
8639{
8640 return signal_print[signo];
8641}
8642
c5aa993b 8643int
96baa820 8644signal_pass_state (int signo)
c906108c
SS
8645{
8646 return signal_program[signo];
8647}
8648
2455069d
UW
8649static void
8650signal_cache_update (int signo)
8651{
8652 if (signo == -1)
8653 {
a493e3e2 8654 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
2455069d
UW
8655 signal_cache_update (signo);
8656
8657 return;
8658 }
8659
8660 signal_pass[signo] = (signal_stop[signo] == 0
8661 && signal_print[signo] == 0
ab04a2af
TT
8662 && signal_program[signo] == 1
8663 && signal_catch[signo] == 0);
2455069d
UW
8664}
8665
488f131b 8666int
7bda5e4a 8667signal_stop_update (int signo, int state)
d4f3574e
SS
8668{
8669 int ret = signal_stop[signo];
abbb1732 8670
d4f3574e 8671 signal_stop[signo] = state;
2455069d 8672 signal_cache_update (signo);
d4f3574e
SS
8673 return ret;
8674}
8675
488f131b 8676int
7bda5e4a 8677signal_print_update (int signo, int state)
d4f3574e
SS
8678{
8679 int ret = signal_print[signo];
abbb1732 8680
d4f3574e 8681 signal_print[signo] = state;
2455069d 8682 signal_cache_update (signo);
d4f3574e
SS
8683 return ret;
8684}
8685
488f131b 8686int
7bda5e4a 8687signal_pass_update (int signo, int state)
d4f3574e
SS
8688{
8689 int ret = signal_program[signo];
abbb1732 8690
d4f3574e 8691 signal_program[signo] = state;
2455069d 8692 signal_cache_update (signo);
d4f3574e
SS
8693 return ret;
8694}
8695
ab04a2af
TT
8696/* Update the global 'signal_catch' from INFO and notify the
8697 target. */
8698
8699void
8700signal_catch_update (const unsigned int *info)
8701{
8702 int i;
8703
8704 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8705 signal_catch[i] = info[i] > 0;
8706 signal_cache_update (-1);
adc6a863 8707 target_pass_signals (signal_pass);
ab04a2af
TT
8708}
8709
c906108c 8710static void
96baa820 8711sig_print_header (void)
c906108c 8712{
3e43a32a
MS
8713 printf_filtered (_("Signal Stop\tPrint\tPass "
8714 "to program\tDescription\n"));
c906108c
SS
8715}
8716
8717static void
2ea28649 8718sig_print_info (enum gdb_signal oursig)
c906108c 8719{
2ea28649 8720 const char *name = gdb_signal_to_name (oursig);
c906108c 8721 int name_padding = 13 - strlen (name);
96baa820 8722
c906108c
SS
8723 if (name_padding <= 0)
8724 name_padding = 0;
8725
8726 printf_filtered ("%s", name);
488f131b 8727 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
c906108c
SS
8728 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8729 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
8730 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
2ea28649 8731 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
c906108c
SS
8732}
8733
8734/* Specify how various signals in the inferior should be handled. */
8735
8736static void
0b39b52e 8737handle_command (const char *args, int from_tty)
c906108c 8738{
c906108c 8739 int digits, wordlen;
b926417a 8740 int sigfirst, siglast;
2ea28649 8741 enum gdb_signal oursig;
c906108c 8742 int allsigs;
c906108c
SS
8743
8744 if (args == NULL)
8745 {
e2e0b3e5 8746 error_no_arg (_("signal to handle"));
c906108c
SS
8747 }
8748
1777feb0 8749 /* Allocate and zero an array of flags for which signals to handle. */
c906108c 8750
adc6a863
PA
8751 const size_t nsigs = GDB_SIGNAL_LAST;
8752 unsigned char sigs[nsigs] {};
c906108c 8753
1777feb0 8754 /* Break the command line up into args. */
c906108c 8755
773a1edc 8756 gdb_argv built_argv (args);
c906108c
SS
8757
8758 /* Walk through the args, looking for signal oursigs, signal names, and
8759 actions. Signal numbers and signal names may be interspersed with
8760 actions, with the actions being performed for all signals cumulatively
1777feb0 8761 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
c906108c 8762
773a1edc 8763 for (char *arg : built_argv)
c906108c 8764 {
773a1edc
TT
8765 wordlen = strlen (arg);
8766 for (digits = 0; isdigit (arg[digits]); digits++)
c906108c
SS
8767 {;
8768 }
8769 allsigs = 0;
8770 sigfirst = siglast = -1;
8771
773a1edc 8772 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
c906108c
SS
8773 {
8774 /* Apply action to all signals except those used by the
1777feb0 8775 debugger. Silently skip those. */
c906108c
SS
8776 allsigs = 1;
8777 sigfirst = 0;
8778 siglast = nsigs - 1;
8779 }
773a1edc 8780 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
c906108c
SS
8781 {
8782 SET_SIGS (nsigs, sigs, signal_stop);
8783 SET_SIGS (nsigs, sigs, signal_print);
8784 }
773a1edc 8785 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
c906108c
SS
8786 {
8787 UNSET_SIGS (nsigs, sigs, signal_program);
8788 }
773a1edc 8789 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
c906108c
SS
8790 {
8791 SET_SIGS (nsigs, sigs, signal_print);
8792 }
773a1edc 8793 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
c906108c
SS
8794 {
8795 SET_SIGS (nsigs, sigs, signal_program);
8796 }
773a1edc 8797 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
c906108c
SS
8798 {
8799 UNSET_SIGS (nsigs, sigs, signal_stop);
8800 }
773a1edc 8801 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
c906108c
SS
8802 {
8803 SET_SIGS (nsigs, sigs, signal_program);
8804 }
773a1edc 8805 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
c906108c
SS
8806 {
8807 UNSET_SIGS (nsigs, sigs, signal_print);
8808 UNSET_SIGS (nsigs, sigs, signal_stop);
8809 }
773a1edc 8810 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
c906108c
SS
8811 {
8812 UNSET_SIGS (nsigs, sigs, signal_program);
8813 }
8814 else if (digits > 0)
8815 {
8816 /* It is numeric. The numeric signal refers to our own
8817 internal signal numbering from target.h, not to host/target
8818 signal number. This is a feature; users really should be
8819 using symbolic names anyway, and the common ones like
8820 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8821
8822 sigfirst = siglast = (int)
773a1edc
TT
8823 gdb_signal_from_command (atoi (arg));
8824 if (arg[digits] == '-')
c906108c
SS
8825 {
8826 siglast = (int)
773a1edc 8827 gdb_signal_from_command (atoi (arg + digits + 1));
c906108c
SS
8828 }
8829 if (sigfirst > siglast)
8830 {
1777feb0 8831 /* Bet he didn't figure we'd think of this case... */
b926417a 8832 std::swap (sigfirst, siglast);
c906108c
SS
8833 }
8834 }
8835 else
8836 {
773a1edc 8837 oursig = gdb_signal_from_name (arg);
a493e3e2 8838 if (oursig != GDB_SIGNAL_UNKNOWN)
c906108c
SS
8839 {
8840 sigfirst = siglast = (int) oursig;
8841 }
8842 else
8843 {
8844 /* Not a number and not a recognized flag word => complain. */
773a1edc 8845 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
c906108c
SS
8846 }
8847 }
8848
8849 /* If any signal numbers or symbol names were found, set flags for
dda83cd7 8850 which signals to apply actions to. */
c906108c 8851
b926417a 8852 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
c906108c 8853 {
2ea28649 8854 switch ((enum gdb_signal) signum)
c906108c 8855 {
a493e3e2
PA
8856 case GDB_SIGNAL_TRAP:
8857 case GDB_SIGNAL_INT:
c906108c
SS
8858 if (!allsigs && !sigs[signum])
8859 {
9e2f0ad4 8860 if (query (_("%s is used by the debugger.\n\
3e43a32a 8861Are you sure you want to change it? "),
2ea28649 8862 gdb_signal_to_name ((enum gdb_signal) signum)))
c906108c
SS
8863 {
8864 sigs[signum] = 1;
8865 }
8866 else
c119e040 8867 printf_unfiltered (_("Not confirmed, unchanged.\n"));
c906108c
SS
8868 }
8869 break;
a493e3e2
PA
8870 case GDB_SIGNAL_0:
8871 case GDB_SIGNAL_DEFAULT:
8872 case GDB_SIGNAL_UNKNOWN:
c906108c
SS
8873 /* Make sure that "all" doesn't print these. */
8874 break;
8875 default:
8876 sigs[signum] = 1;
8877 break;
8878 }
8879 }
c906108c
SS
8880 }
8881
b926417a 8882 for (int signum = 0; signum < nsigs; signum++)
3a031f65
PA
8883 if (sigs[signum])
8884 {
2455069d 8885 signal_cache_update (-1);
adc6a863
PA
8886 target_pass_signals (signal_pass);
8887 target_program_signals (signal_program);
c906108c 8888
3a031f65
PA
8889 if (from_tty)
8890 {
8891 /* Show the results. */
8892 sig_print_header ();
8893 for (; signum < nsigs; signum++)
8894 if (sigs[signum])
aead7601 8895 sig_print_info ((enum gdb_signal) signum);
3a031f65
PA
8896 }
8897
8898 break;
8899 }
c906108c
SS
8900}
8901
de0bea00
MF
8902/* Complete the "handle" command. */
8903
eb3ff9a5 8904static void
de0bea00 8905handle_completer (struct cmd_list_element *ignore,
eb3ff9a5 8906 completion_tracker &tracker,
6f937416 8907 const char *text, const char *word)
de0bea00 8908{
de0bea00
MF
8909 static const char * const keywords[] =
8910 {
8911 "all",
8912 "stop",
8913 "ignore",
8914 "print",
8915 "pass",
8916 "nostop",
8917 "noignore",
8918 "noprint",
8919 "nopass",
8920 NULL,
8921 };
8922
eb3ff9a5
PA
8923 signal_completer (ignore, tracker, text, word);
8924 complete_on_enum (tracker, keywords, word, word);
de0bea00
MF
8925}
8926
2ea28649
PA
8927enum gdb_signal
8928gdb_signal_from_command (int num)
ed01b82c
PA
8929{
8930 if (num >= 1 && num <= 15)
2ea28649 8931 return (enum gdb_signal) num;
ed01b82c
PA
8932 error (_("Only signals 1-15 are valid as numeric signals.\n\
8933Use \"info signals\" for a list of symbolic signals."));
8934}
8935
c906108c
SS
8936/* Print current contents of the tables set by the handle command.
8937 It is possible we should just be printing signals actually used
8938 by the current target (but for things to work right when switching
8939 targets, all signals should be in the signal tables). */
8940
8941static void
1d12d88f 8942info_signals_command (const char *signum_exp, int from_tty)
c906108c 8943{
2ea28649 8944 enum gdb_signal oursig;
abbb1732 8945
c906108c
SS
8946 sig_print_header ();
8947
8948 if (signum_exp)
8949 {
8950 /* First see if this is a symbol name. */
2ea28649 8951 oursig = gdb_signal_from_name (signum_exp);
a493e3e2 8952 if (oursig == GDB_SIGNAL_UNKNOWN)
c906108c
SS
8953 {
8954 /* No, try numeric. */
8955 oursig =
2ea28649 8956 gdb_signal_from_command (parse_and_eval_long (signum_exp));
c906108c
SS
8957 }
8958 sig_print_info (oursig);
8959 return;
8960 }
8961
8962 printf_filtered ("\n");
8963 /* These ugly casts brought to you by the native VAX compiler. */
a493e3e2
PA
8964 for (oursig = GDB_SIGNAL_FIRST;
8965 (int) oursig < (int) GDB_SIGNAL_LAST;
2ea28649 8966 oursig = (enum gdb_signal) ((int) oursig + 1))
c906108c
SS
8967 {
8968 QUIT;
8969
a493e3e2
PA
8970 if (oursig != GDB_SIGNAL_UNKNOWN
8971 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
c906108c
SS
8972 sig_print_info (oursig);
8973 }
8974
3e43a32a
MS
8975 printf_filtered (_("\nUse the \"handle\" command "
8976 "to change these tables.\n"));
c906108c 8977}
4aa995e1
PA
8978
8979/* The $_siginfo convenience variable is a bit special. We don't know
8980 for sure the type of the value until we actually have a chance to
7a9dd1b2 8981 fetch the data. The type can change depending on gdbarch, so it is
4aa995e1
PA
8982 also dependent on which thread you have selected.
8983
8984 1. making $_siginfo be an internalvar that creates a new value on
8985 access.
8986
8987 2. making the value of $_siginfo be an lval_computed value. */
8988
8989/* This function implements the lval_computed support for reading a
8990 $_siginfo value. */
8991
8992static void
8993siginfo_value_read (struct value *v)
8994{
8995 LONGEST transferred;
8996
a911d87a
PA
8997 /* If we can access registers, so can we access $_siginfo. Likewise
8998 vice versa. */
8999 validate_registers_access ();
c709acd1 9000
4aa995e1 9001 transferred =
328d42d8
SM
9002 target_read (current_inferior ()->top_target (),
9003 TARGET_OBJECT_SIGNAL_INFO,
4aa995e1
PA
9004 NULL,
9005 value_contents_all_raw (v),
9006 value_offset (v),
9007 TYPE_LENGTH (value_type (v)));
9008
9009 if (transferred != TYPE_LENGTH (value_type (v)))
9010 error (_("Unable to read siginfo"));
9011}
9012
9013/* This function implements the lval_computed support for writing a
9014 $_siginfo value. */
9015
9016static void
9017siginfo_value_write (struct value *v, struct value *fromval)
9018{
9019 LONGEST transferred;
9020
a911d87a
PA
9021 /* If we can access registers, so can we access $_siginfo. Likewise
9022 vice versa. */
9023 validate_registers_access ();
c709acd1 9024
328d42d8 9025 transferred = target_write (current_inferior ()->top_target (),
4aa995e1
PA
9026 TARGET_OBJECT_SIGNAL_INFO,
9027 NULL,
9028 value_contents_all_raw (fromval),
9029 value_offset (v),
9030 TYPE_LENGTH (value_type (fromval)));
9031
9032 if (transferred != TYPE_LENGTH (value_type (fromval)))
9033 error (_("Unable to write siginfo"));
9034}
9035
c8f2448a 9036static const struct lval_funcs siginfo_value_funcs =
4aa995e1
PA
9037 {
9038 siginfo_value_read,
9039 siginfo_value_write
9040 };
9041
9042/* Return a new value with the correct type for the siginfo object of
78267919
UW
9043 the current thread using architecture GDBARCH. Return a void value
9044 if there's no object available. */
4aa995e1 9045
2c0b251b 9046static struct value *
22d2b532
SDJ
9047siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
9048 void *ignore)
4aa995e1 9049{
841de120 9050 if (target_has_stack ()
d7e15655 9051 && inferior_ptid != null_ptid
78267919 9052 && gdbarch_get_siginfo_type_p (gdbarch))
4aa995e1 9053 {
78267919 9054 struct type *type = gdbarch_get_siginfo_type (gdbarch);
abbb1732 9055
78267919 9056 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
4aa995e1
PA
9057 }
9058
78267919 9059 return allocate_value (builtin_type (gdbarch)->builtin_void);
4aa995e1
PA
9060}
9061
c906108c 9062\f
16c381f0
JK
9063/* infcall_suspend_state contains state about the program itself like its
9064 registers and any signal it received when it last stopped.
9065 This state must be restored regardless of how the inferior function call
9066 ends (either successfully, or after it hits a breakpoint or signal)
9067 if the program is to properly continue where it left off. */
9068
6bf78e29 9069class infcall_suspend_state
7a292a7a 9070{
6bf78e29
AB
9071public:
9072 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
9073 once the inferior function call has finished. */
9074 infcall_suspend_state (struct gdbarch *gdbarch,
dda83cd7
SM
9075 const struct thread_info *tp,
9076 struct regcache *regcache)
1edb66d8 9077 : m_registers (new readonly_detached_regcache (*regcache))
6bf78e29 9078 {
1edb66d8
SM
9079 tp->save_suspend_to (m_thread_suspend);
9080
6bf78e29
AB
9081 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
9082
9083 if (gdbarch_get_siginfo_type_p (gdbarch))
9084 {
dda83cd7
SM
9085 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9086 size_t len = TYPE_LENGTH (type);
6bf78e29 9087
dda83cd7 9088 siginfo_data.reset ((gdb_byte *) xmalloc (len));
6bf78e29 9089
328d42d8
SM
9090 if (target_read (current_inferior ()->top_target (),
9091 TARGET_OBJECT_SIGNAL_INFO, NULL,
dda83cd7
SM
9092 siginfo_data.get (), 0, len) != len)
9093 {
9094 /* Errors ignored. */
9095 siginfo_data.reset (nullptr);
9096 }
6bf78e29
AB
9097 }
9098
9099 if (siginfo_data)
9100 {
dda83cd7
SM
9101 m_siginfo_gdbarch = gdbarch;
9102 m_siginfo_data = std::move (siginfo_data);
6bf78e29
AB
9103 }
9104 }
9105
9106 /* Return a pointer to the stored register state. */
16c381f0 9107
6bf78e29
AB
9108 readonly_detached_regcache *registers () const
9109 {
9110 return m_registers.get ();
9111 }
9112
9113 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
9114
9115 void restore (struct gdbarch *gdbarch,
dda83cd7
SM
9116 struct thread_info *tp,
9117 struct regcache *regcache) const
6bf78e29 9118 {
1edb66d8 9119 tp->restore_suspend_from (m_thread_suspend);
6bf78e29
AB
9120
9121 if (m_siginfo_gdbarch == gdbarch)
9122 {
dda83cd7 9123 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6bf78e29 9124
dda83cd7 9125 /* Errors ignored. */
328d42d8
SM
9126 target_write (current_inferior ()->top_target (),
9127 TARGET_OBJECT_SIGNAL_INFO, NULL,
dda83cd7 9128 m_siginfo_data.get (), 0, TYPE_LENGTH (type));
6bf78e29
AB
9129 }
9130
9131 /* The inferior can be gone if the user types "print exit(0)"
9132 (and perhaps other times). */
55f6301a 9133 if (target_has_execution ())
6bf78e29
AB
9134 /* NB: The register write goes through to the target. */
9135 regcache->restore (registers ());
9136 }
9137
9138private:
9139 /* How the current thread stopped before the inferior function call was
9140 executed. */
9141 struct thread_suspend_state m_thread_suspend;
9142
9143 /* The registers before the inferior function call was executed. */
9144 std::unique_ptr<readonly_detached_regcache> m_registers;
1736ad11 9145
35515841 9146 /* Format of SIGINFO_DATA or NULL if it is not present. */
6bf78e29 9147 struct gdbarch *m_siginfo_gdbarch = nullptr;
1736ad11
JK
9148
9149 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
9150 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
9151 content would be invalid. */
6bf78e29 9152 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
b89667eb
DE
9153};
9154
cb524840
TT
9155infcall_suspend_state_up
9156save_infcall_suspend_state ()
b89667eb 9157{
b89667eb 9158 struct thread_info *tp = inferior_thread ();
1736ad11 9159 struct regcache *regcache = get_current_regcache ();
ac7936df 9160 struct gdbarch *gdbarch = regcache->arch ();
1736ad11 9161
6bf78e29
AB
9162 infcall_suspend_state_up inf_state
9163 (new struct infcall_suspend_state (gdbarch, tp, regcache));
1736ad11 9164
6bf78e29
AB
9165 /* Having saved the current state, adjust the thread state, discarding
9166 any stop signal information. The stop signal is not useful when
9167 starting an inferior function call, and run_inferior_call will not use
9168 the signal due to its `proceed' call with GDB_SIGNAL_0. */
1edb66d8 9169 tp->set_stop_signal (GDB_SIGNAL_0);
35515841 9170
b89667eb
DE
9171 return inf_state;
9172}
9173
9174/* Restore inferior session state to INF_STATE. */
9175
9176void
16c381f0 9177restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb
DE
9178{
9179 struct thread_info *tp = inferior_thread ();
1736ad11 9180 struct regcache *regcache = get_current_regcache ();
ac7936df 9181 struct gdbarch *gdbarch = regcache->arch ();
b89667eb 9182
6bf78e29 9183 inf_state->restore (gdbarch, tp, regcache);
16c381f0 9184 discard_infcall_suspend_state (inf_state);
b89667eb
DE
9185}
9186
b89667eb 9187void
16c381f0 9188discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb 9189{
dd848631 9190 delete inf_state;
b89667eb
DE
9191}
9192
daf6667d 9193readonly_detached_regcache *
16c381f0 9194get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
b89667eb 9195{
6bf78e29 9196 return inf_state->registers ();
b89667eb
DE
9197}
9198
16c381f0
JK
9199/* infcall_control_state contains state regarding gdb's control of the
9200 inferior itself like stepping control. It also contains session state like
9201 the user's currently selected frame. */
b89667eb 9202
16c381f0 9203struct infcall_control_state
b89667eb 9204{
16c381f0
JK
9205 struct thread_control_state thread_control;
9206 struct inferior_control_state inferior_control;
d82142e2
JK
9207
9208 /* Other fields: */
ee841dd8
TT
9209 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
9210 int stopped_by_random_signal = 0;
7a292a7a 9211
79952e69
PA
9212 /* ID and level of the selected frame when the inferior function
9213 call was made. */
ee841dd8 9214 struct frame_id selected_frame_id {};
79952e69 9215 int selected_frame_level = -1;
7a292a7a
SS
9216};
9217
c906108c 9218/* Save all of the information associated with the inferior<==>gdb
b89667eb 9219 connection. */
c906108c 9220
cb524840
TT
9221infcall_control_state_up
9222save_infcall_control_state ()
c906108c 9223{
cb524840 9224 infcall_control_state_up inf_status (new struct infcall_control_state);
4e1c45ea 9225 struct thread_info *tp = inferior_thread ();
d6b48e9c 9226 struct inferior *inf = current_inferior ();
7a292a7a 9227
16c381f0
JK
9228 inf_status->thread_control = tp->control;
9229 inf_status->inferior_control = inf->control;
d82142e2 9230
8358c15c 9231 tp->control.step_resume_breakpoint = NULL;
5b79abe7 9232 tp->control.exception_resume_breakpoint = NULL;
8358c15c 9233
16c381f0
JK
9234 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9235 chain. If caller's caller is walking the chain, they'll be happier if we
9236 hand them back the original chain when restore_infcall_control_state is
9237 called. */
9238 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
d82142e2
JK
9239
9240 /* Other fields: */
9241 inf_status->stop_stack_dummy = stop_stack_dummy;
9242 inf_status->stopped_by_random_signal = stopped_by_random_signal;
c5aa993b 9243
79952e69
PA
9244 save_selected_frame (&inf_status->selected_frame_id,
9245 &inf_status->selected_frame_level);
b89667eb 9246
7a292a7a 9247 return inf_status;
c906108c
SS
9248}
9249
b89667eb
DE
9250/* Restore inferior session state to INF_STATUS. */
9251
c906108c 9252void
16c381f0 9253restore_infcall_control_state (struct infcall_control_state *inf_status)
c906108c 9254{
4e1c45ea 9255 struct thread_info *tp = inferior_thread ();
d6b48e9c 9256 struct inferior *inf = current_inferior ();
4e1c45ea 9257
8358c15c
JK
9258 if (tp->control.step_resume_breakpoint)
9259 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
9260
5b79abe7
TT
9261 if (tp->control.exception_resume_breakpoint)
9262 tp->control.exception_resume_breakpoint->disposition
9263 = disp_del_at_next_stop;
9264
d82142e2 9265 /* Handle the bpstat_copy of the chain. */
16c381f0 9266 bpstat_clear (&tp->control.stop_bpstat);
d82142e2 9267
16c381f0
JK
9268 tp->control = inf_status->thread_control;
9269 inf->control = inf_status->inferior_control;
d82142e2
JK
9270
9271 /* Other fields: */
9272 stop_stack_dummy = inf_status->stop_stack_dummy;
9273 stopped_by_random_signal = inf_status->stopped_by_random_signal;
c906108c 9274
841de120 9275 if (target_has_stack ())
c906108c 9276 {
79952e69
PA
9277 restore_selected_frame (inf_status->selected_frame_id,
9278 inf_status->selected_frame_level);
c906108c 9279 }
c906108c 9280
ee841dd8 9281 delete inf_status;
7a292a7a 9282}
c906108c
SS
9283
9284void
16c381f0 9285discard_infcall_control_state (struct infcall_control_state *inf_status)
7a292a7a 9286{
8358c15c
JK
9287 if (inf_status->thread_control.step_resume_breakpoint)
9288 inf_status->thread_control.step_resume_breakpoint->disposition
9289 = disp_del_at_next_stop;
9290
5b79abe7
TT
9291 if (inf_status->thread_control.exception_resume_breakpoint)
9292 inf_status->thread_control.exception_resume_breakpoint->disposition
9293 = disp_del_at_next_stop;
9294
1777feb0 9295 /* See save_infcall_control_state for info on stop_bpstat. */
16c381f0 9296 bpstat_clear (&inf_status->thread_control.stop_bpstat);
8358c15c 9297
ee841dd8 9298 delete inf_status;
7a292a7a 9299}
b89667eb 9300\f
7f89fd65 9301/* See infrun.h. */
0c557179
SDJ
9302
9303void
9304clear_exit_convenience_vars (void)
9305{
9306 clear_internalvar (lookup_internalvar ("_exitsignal"));
9307 clear_internalvar (lookup_internalvar ("_exitcode"));
9308}
c5aa993b 9309\f
488f131b 9310
b2175913
MS
9311/* User interface for reverse debugging:
9312 Set exec-direction / show exec-direction commands
9313 (returns error unless target implements to_set_exec_direction method). */
9314
170742de 9315enum exec_direction_kind execution_direction = EXEC_FORWARD;
b2175913
MS
9316static const char exec_forward[] = "forward";
9317static const char exec_reverse[] = "reverse";
9318static const char *exec_direction = exec_forward;
40478521 9319static const char *const exec_direction_names[] = {
b2175913
MS
9320 exec_forward,
9321 exec_reverse,
9322 NULL
9323};
9324
9325static void
eb4c3f4a 9326set_exec_direction_func (const char *args, int from_tty,
b2175913
MS
9327 struct cmd_list_element *cmd)
9328{
05374cfd 9329 if (target_can_execute_reverse ())
b2175913
MS
9330 {
9331 if (!strcmp (exec_direction, exec_forward))
9332 execution_direction = EXEC_FORWARD;
9333 else if (!strcmp (exec_direction, exec_reverse))
9334 execution_direction = EXEC_REVERSE;
9335 }
8bbed405
MS
9336 else
9337 {
9338 exec_direction = exec_forward;
9339 error (_("Target does not support this operation."));
9340 }
b2175913
MS
9341}
9342
9343static void
9344show_exec_direction_func (struct ui_file *out, int from_tty,
9345 struct cmd_list_element *cmd, const char *value)
9346{
9347 switch (execution_direction) {
9348 case EXEC_FORWARD:
9349 fprintf_filtered (out, _("Forward.\n"));
9350 break;
9351 case EXEC_REVERSE:
9352 fprintf_filtered (out, _("Reverse.\n"));
9353 break;
b2175913 9354 default:
d8b34453
PA
9355 internal_error (__FILE__, __LINE__,
9356 _("bogus execution_direction value: %d"),
9357 (int) execution_direction);
b2175913
MS
9358 }
9359}
9360
d4db2f36
PA
9361static void
9362show_schedule_multiple (struct ui_file *file, int from_tty,
9363 struct cmd_list_element *c, const char *value)
9364{
3e43a32a
MS
9365 fprintf_filtered (file, _("Resuming the execution of threads "
9366 "of all processes is %s.\n"), value);
d4db2f36 9367}
ad52ddc6 9368
22d2b532
SDJ
9369/* Implementation of `siginfo' variable. */
9370
9371static const struct internalvar_funcs siginfo_funcs =
9372{
9373 siginfo_make_value,
9374 NULL,
9375 NULL
9376};
9377
372316f1
PA
9378/* Callback for infrun's target events source. This is marked when a
9379 thread has a pending status to process. */
9380
9381static void
9382infrun_async_inferior_event_handler (gdb_client_data data)
9383{
6b36ddeb 9384 clear_async_event_handler (infrun_async_inferior_event_token);
b1a35af2 9385 inferior_event_handler (INF_REG_EVENT);
372316f1
PA
9386}
9387
8087c3fa 9388#if GDB_SELF_TEST
b161a60d
SM
9389namespace selftests
9390{
9391
9392/* Verify that when two threads with the same ptid exist (from two different
9393 targets) and one of them changes ptid, we only update inferior_ptid if
9394 it is appropriate. */
9395
9396static void
9397infrun_thread_ptid_changed ()
9398{
9399 gdbarch *arch = current_inferior ()->gdbarch;
9400
9401 /* The thread which inferior_ptid represents changes ptid. */
9402 {
9403 scoped_restore_current_pspace_and_thread restore;
9404
9405 scoped_mock_context<test_target_ops> target1 (arch);
9406 scoped_mock_context<test_target_ops> target2 (arch);
b161a60d
SM
9407
9408 ptid_t old_ptid (111, 222);
9409 ptid_t new_ptid (111, 333);
9410
9411 target1.mock_inferior.pid = old_ptid.pid ();
9412 target1.mock_thread.ptid = old_ptid;
922cc93d
SM
9413 target1.mock_inferior.ptid_thread_map.clear ();
9414 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
9415
b161a60d
SM
9416 target2.mock_inferior.pid = old_ptid.pid ();
9417 target2.mock_thread.ptid = old_ptid;
922cc93d
SM
9418 target2.mock_inferior.ptid_thread_map.clear ();
9419 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
b161a60d
SM
9420
9421 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9422 set_current_inferior (&target1.mock_inferior);
9423
9424 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9425
9426 gdb_assert (inferior_ptid == new_ptid);
9427 }
9428
9429 /* A thread with the same ptid as inferior_ptid, but from another target,
9430 changes ptid. */
9431 {
9432 scoped_restore_current_pspace_and_thread restore;
9433
9434 scoped_mock_context<test_target_ops> target1 (arch);
9435 scoped_mock_context<test_target_ops> target2 (arch);
b161a60d
SM
9436
9437 ptid_t old_ptid (111, 222);
9438 ptid_t new_ptid (111, 333);
9439
9440 target1.mock_inferior.pid = old_ptid.pid ();
9441 target1.mock_thread.ptid = old_ptid;
922cc93d
SM
9442 target1.mock_inferior.ptid_thread_map.clear ();
9443 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
9444
b161a60d
SM
9445 target2.mock_inferior.pid = old_ptid.pid ();
9446 target2.mock_thread.ptid = old_ptid;
922cc93d
SM
9447 target2.mock_inferior.ptid_thread_map.clear ();
9448 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
b161a60d
SM
9449
9450 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9451 set_current_inferior (&target2.mock_inferior);
9452
9453 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9454
9455 gdb_assert (inferior_ptid == old_ptid);
9456 }
9457}
9458
9459} /* namespace selftests */
9460
8087c3fa
JB
9461#endif /* GDB_SELF_TEST */
9462
6c265988 9463void _initialize_infrun ();
c906108c 9464void
6c265988 9465_initialize_infrun ()
c906108c 9466{
de0bea00 9467 struct cmd_list_element *c;
c906108c 9468
372316f1
PA
9469 /* Register extra event sources in the event loop. */
9470 infrun_async_inferior_event_token
db20ebdf
SM
9471 = create_async_event_handler (infrun_async_inferior_event_handler, NULL,
9472 "infrun");
372316f1 9473
e0f25bd9
SM
9474 cmd_list_element *info_signals_cmd
9475 = add_info ("signals", info_signals_command, _("\
1bedd215
AC
9476What debugger does when program gets various signals.\n\
9477Specify a signal as argument to print info on that signal only."));
e0f25bd9 9478 add_info_alias ("handle", info_signals_cmd, 0);
c906108c 9479
de0bea00 9480 c = add_com ("handle", class_run, handle_command, _("\
dfbd5e7b 9481Specify how to handle signals.\n\
486c7739 9482Usage: handle SIGNAL [ACTIONS]\n\
c906108c 9483Args are signals and actions to apply to those signals.\n\
dfbd5e7b 9484If no actions are specified, the current settings for the specified signals\n\
486c7739
MF
9485will be displayed instead.\n\
9486\n\
c906108c
SS
9487Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9488from 1-15 are allowed for compatibility with old versions of GDB.\n\
9489Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9490The special arg \"all\" is recognized to mean all signals except those\n\
1bedd215 9491used by the debugger, typically SIGTRAP and SIGINT.\n\
486c7739 9492\n\
1bedd215 9493Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
c906108c
SS
9494\"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9495Stop means reenter debugger if this signal happens (implies print).\n\
9496Print means print a message if this signal happens.\n\
9497Pass means let program see this signal; otherwise program doesn't know.\n\
9498Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
dfbd5e7b
PA
9499Pass and Stop may be combined.\n\
9500\n\
9501Multiple signals may be specified. Signal numbers and signal names\n\
9502may be interspersed with actions, with the actions being performed for\n\
9503all signals cumulatively specified."));
de0bea00 9504 set_cmd_completer (c, handle_completer);
486c7739 9505
c906108c 9506 if (!dbx_commands)
1a966eab
AC
9507 stop_command = add_cmd ("stop", class_obscure,
9508 not_just_help_class_command, _("\
9509There is no `stop' command, but you can set a hook on `stop'.\n\
c906108c 9510This allows you to set a list of commands to be run each time execution\n\
1a966eab 9511of the program stops."), &cmdlist);
c906108c 9512
94ba44a6
SM
9513 add_setshow_boolean_cmd
9514 ("infrun", class_maintenance, &debug_infrun,
9515 _("Set inferior debugging."),
9516 _("Show inferior debugging."),
9517 _("When non-zero, inferior specific debugging is enabled."),
9518 NULL, show_debug_infrun, &setdebuglist, &showdebuglist);
527159b7 9519
ad52ddc6
PA
9520 add_setshow_boolean_cmd ("non-stop", no_class,
9521 &non_stop_1, _("\
9522Set whether gdb controls the inferior in non-stop mode."), _("\
9523Show whether gdb controls the inferior in non-stop mode."), _("\
9524When debugging a multi-threaded program and this setting is\n\
9525off (the default, also called all-stop mode), when one thread stops\n\
9526(for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9527all other threads in the program while you interact with the thread of\n\
9528interest. When you continue or step a thread, you can allow the other\n\
9529threads to run, or have them remain stopped, but while you inspect any\n\
9530thread's state, all threads stop.\n\
9531\n\
9532In non-stop mode, when one thread stops, other threads can continue\n\
9533to run freely. You'll be able to step each thread independently,\n\
9534leave it stopped or free to run as needed."),
9535 set_non_stop,
9536 show_non_stop,
9537 &setlist,
9538 &showlist);
9539
adc6a863 9540 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
c906108c
SS
9541 {
9542 signal_stop[i] = 1;
9543 signal_print[i] = 1;
9544 signal_program[i] = 1;
ab04a2af 9545 signal_catch[i] = 0;
c906108c
SS
9546 }
9547
4d9d9d04
PA
9548 /* Signals caused by debugger's own actions should not be given to
9549 the program afterwards.
9550
9551 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9552 explicitly specifies that it should be delivered to the target
9553 program. Typically, that would occur when a user is debugging a
9554 target monitor on a simulator: the target monitor sets a
9555 breakpoint; the simulator encounters this breakpoint and halts
9556 the simulation handing control to GDB; GDB, noting that the stop
9557 address doesn't map to any known breakpoint, returns control back
9558 to the simulator; the simulator then delivers the hardware
9559 equivalent of a GDB_SIGNAL_TRAP to the program being
9560 debugged. */
a493e3e2
PA
9561 signal_program[GDB_SIGNAL_TRAP] = 0;
9562 signal_program[GDB_SIGNAL_INT] = 0;
c906108c
SS
9563
9564 /* Signals that are not errors should not normally enter the debugger. */
a493e3e2
PA
9565 signal_stop[GDB_SIGNAL_ALRM] = 0;
9566 signal_print[GDB_SIGNAL_ALRM] = 0;
9567 signal_stop[GDB_SIGNAL_VTALRM] = 0;
9568 signal_print[GDB_SIGNAL_VTALRM] = 0;
9569 signal_stop[GDB_SIGNAL_PROF] = 0;
9570 signal_print[GDB_SIGNAL_PROF] = 0;
9571 signal_stop[GDB_SIGNAL_CHLD] = 0;
9572 signal_print[GDB_SIGNAL_CHLD] = 0;
9573 signal_stop[GDB_SIGNAL_IO] = 0;
9574 signal_print[GDB_SIGNAL_IO] = 0;
9575 signal_stop[GDB_SIGNAL_POLL] = 0;
9576 signal_print[GDB_SIGNAL_POLL] = 0;
9577 signal_stop[GDB_SIGNAL_URG] = 0;
9578 signal_print[GDB_SIGNAL_URG] = 0;
9579 signal_stop[GDB_SIGNAL_WINCH] = 0;
9580 signal_print[GDB_SIGNAL_WINCH] = 0;
9581 signal_stop[GDB_SIGNAL_PRIO] = 0;
9582 signal_print[GDB_SIGNAL_PRIO] = 0;
c906108c 9583
cd0fc7c3
SS
9584 /* These signals are used internally by user-level thread
9585 implementations. (See signal(5) on Solaris.) Like the above
9586 signals, a healthy program receives and handles them as part of
9587 its normal operation. */
a493e3e2
PA
9588 signal_stop[GDB_SIGNAL_LWP] = 0;
9589 signal_print[GDB_SIGNAL_LWP] = 0;
9590 signal_stop[GDB_SIGNAL_WAITING] = 0;
9591 signal_print[GDB_SIGNAL_WAITING] = 0;
9592 signal_stop[GDB_SIGNAL_CANCEL] = 0;
9593 signal_print[GDB_SIGNAL_CANCEL] = 0;
bc7b765a
JB
9594 signal_stop[GDB_SIGNAL_LIBRT] = 0;
9595 signal_print[GDB_SIGNAL_LIBRT] = 0;
cd0fc7c3 9596
2455069d
UW
9597 /* Update cached state. */
9598 signal_cache_update (-1);
9599
85c07804
AC
9600 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
9601 &stop_on_solib_events, _("\
9602Set stopping for shared library events."), _("\
9603Show stopping for shared library events."), _("\
c906108c
SS
9604If nonzero, gdb will give control to the user when the dynamic linker\n\
9605notifies gdb of shared library events. The most common event of interest\n\
85c07804 9606to the user would be loading/unloading of a new library."),
f9e14852 9607 set_stop_on_solib_events,
920d2a44 9608 show_stop_on_solib_events,
85c07804 9609 &setlist, &showlist);
c906108c 9610
7ab04401
AC
9611 add_setshow_enum_cmd ("follow-fork-mode", class_run,
9612 follow_fork_mode_kind_names,
9613 &follow_fork_mode_string, _("\
9614Set debugger response to a program call of fork or vfork."), _("\
9615Show debugger response to a program call of fork or vfork."), _("\
c906108c
SS
9616A fork or vfork creates a new process. follow-fork-mode can be:\n\
9617 parent - the original process is debugged after a fork\n\
9618 child - the new process is debugged after a fork\n\
ea1dd7bc 9619The unfollowed process will continue to run.\n\
7ab04401
AC
9620By default, the debugger will follow the parent process."),
9621 NULL,
920d2a44 9622 show_follow_fork_mode_string,
7ab04401
AC
9623 &setlist, &showlist);
9624
6c95b8df
PA
9625 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9626 follow_exec_mode_names,
9627 &follow_exec_mode_string, _("\
9628Set debugger response to a program call of exec."), _("\
9629Show debugger response to a program call of exec."), _("\
9630An exec call replaces the program image of a process.\n\
9631\n\
9632follow-exec-mode can be:\n\
9633\n\
cce7e648 9634 new - the debugger creates a new inferior and rebinds the process\n\
6c95b8df
PA
9635to this new inferior. The program the process was running before\n\
9636the exec call can be restarted afterwards by restarting the original\n\
9637inferior.\n\
9638\n\
9639 same - the debugger keeps the process bound to the same inferior.\n\
9640The new executable image replaces the previous executable loaded in\n\
9641the inferior. Restarting the inferior after the exec call restarts\n\
9642the executable the process was running after the exec call.\n\
9643\n\
9644By default, the debugger will use the same inferior."),
9645 NULL,
9646 show_follow_exec_mode_string,
9647 &setlist, &showlist);
9648
7ab04401
AC
9649 add_setshow_enum_cmd ("scheduler-locking", class_run,
9650 scheduler_enums, &scheduler_mode, _("\
9651Set mode for locking scheduler during execution."), _("\
9652Show mode for locking scheduler during execution."), _("\
f2665db5
MM
9653off == no locking (threads may preempt at any time)\n\
9654on == full locking (no thread except the current thread may run)\n\
dda83cd7 9655 This applies to both normal execution and replay mode.\n\
f2665db5 9656step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
dda83cd7
SM
9657 In this mode, other threads may run during other commands.\n\
9658 This applies to both normal execution and replay mode.\n\
f2665db5 9659replay == scheduler locked in replay mode and unlocked during normal execution."),
7ab04401 9660 set_schedlock_func, /* traps on target vector */
920d2a44 9661 show_scheduler_mode,
7ab04401 9662 &setlist, &showlist);
5fbbeb29 9663
d4db2f36
PA
9664 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9665Set mode for resuming threads of all processes."), _("\
9666Show mode for resuming threads of all processes."), _("\
9667When on, execution commands (such as 'continue' or 'next') resume all\n\
9668threads of all processes. When off (which is the default), execution\n\
9669commands only resume the threads of the current process. The set of\n\
9670threads that are resumed is further refined by the scheduler-locking\n\
9671mode (see help set scheduler-locking)."),
9672 NULL,
9673 show_schedule_multiple,
9674 &setlist, &showlist);
9675
5bf193a2
AC
9676 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9677Set mode of the step operation."), _("\
9678Show mode of the step operation."), _("\
9679When set, doing a step over a function without debug line information\n\
9680will stop at the first instruction of that function. Otherwise, the\n\
9681function is skipped and the step command stops at a different source line."),
9682 NULL,
920d2a44 9683 show_step_stop_if_no_debug,
5bf193a2 9684 &setlist, &showlist);
ca6724c1 9685
72d0e2c5
YQ
9686 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9687 &can_use_displaced_stepping, _("\
237fc4c9
PA
9688Set debugger's willingness to use displaced stepping."), _("\
9689Show debugger's willingness to use displaced stepping."), _("\
fff08868
HZ
9690If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9691supported by the target architecture. If off, gdb will not use displaced\n\
9692stepping to step over breakpoints, even if such is supported by the target\n\
9693architecture. If auto (which is the default), gdb will use displaced stepping\n\
9694if the target architecture supports it and non-stop mode is active, but will not\n\
9695use it in all-stop mode (see help set non-stop)."),
72d0e2c5
YQ
9696 NULL,
9697 show_can_use_displaced_stepping,
9698 &setlist, &showlist);
237fc4c9 9699
b2175913
MS
9700 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9701 &exec_direction, _("Set direction of execution.\n\
9702Options are 'forward' or 'reverse'."),
9703 _("Show direction of execution (forward/reverse)."),
9704 _("Tells gdb whether to execute forward or backward."),
9705 set_exec_direction_func, show_exec_direction_func,
9706 &setlist, &showlist);
9707
6c95b8df
PA
9708 /* Set/show detach-on-fork: user-settable mode. */
9709
9710 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9711Set whether gdb will detach the child of a fork."), _("\
9712Show whether gdb will detach the child of a fork."), _("\
9713Tells gdb whether to detach the child of a fork."),
9714 NULL, NULL, &setlist, &showlist);
9715
03583c20
UW
9716 /* Set/show disable address space randomization mode. */
9717
9718 add_setshow_boolean_cmd ("disable-randomization", class_support,
9719 &disable_randomization, _("\
9720Set disabling of debuggee's virtual address space randomization."), _("\
9721Show disabling of debuggee's virtual address space randomization."), _("\
9722When this mode is on (which is the default), randomization of the virtual\n\
9723address space is disabled. Standalone programs run with the randomization\n\
9724enabled by default on some platforms."),
9725 &set_disable_randomization,
9726 &show_disable_randomization,
9727 &setlist, &showlist);
9728
ca6724c1 9729 /* ptid initializations */
ca6724c1
KB
9730 inferior_ptid = null_ptid;
9731 target_last_wait_ptid = minus_one_ptid;
5231c1fd 9732
c90e7d63
SM
9733 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed,
9734 "infrun");
9735 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested,
9736 "infrun");
9737 gdb::observers::thread_exit.attach (infrun_thread_thread_exit, "infrun");
9738 gdb::observers::inferior_exit.attach (infrun_inferior_exit, "infrun");
9739 gdb::observers::inferior_execd.attach (infrun_inferior_execd, "infrun");
4aa995e1
PA
9740
9741 /* Explicitly create without lookup, since that tries to create a
9742 value with a void typed value, and when we get here, gdbarch
9743 isn't initialized yet. At this point, we're quite sure there
9744 isn't another convenience variable of the same name. */
22d2b532 9745 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
d914c394
SS
9746
9747 add_setshow_boolean_cmd ("observer", no_class,
9748 &observer_mode_1, _("\
9749Set whether gdb controls the inferior in observer mode."), _("\
9750Show whether gdb controls the inferior in observer mode."), _("\
9751In observer mode, GDB can get data from the inferior, but not\n\
9752affect its execution. Registers and memory may not be changed,\n\
9753breakpoints may not be set, and the program cannot be interrupted\n\
9754or signalled."),
9755 set_observer_mode,
9756 show_observer_mode,
9757 &setlist,
9758 &showlist);
b161a60d
SM
9759
9760#if GDB_SELF_TEST
9761 selftests::register_test ("infrun_thread_ptid_changed",
9762 selftests::infrun_thread_ptid_changed);
9763#endif
c906108c 9764}