]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/infrun.c
Automatic date update in version.in
[thirdparty/binutils-gdb.git] / gdb / infrun.c
CommitLineData
ca557f44
AC
1/* Target-struct-independent code to start (run) and stop an inferior
2 process.
8926118c 3
42a4f53d 4 Copyright (C) 1986-2019 Free Software Foundation, Inc.
c906108c 5
c5aa993b 6 This file is part of GDB.
c906108c 7
c5aa993b
JM
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
c5aa993b 11 (at your option) any later version.
c906108c 12
c5aa993b
JM
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
c906108c 17
c5aa993b 18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
c906108c
SS
20
21#include "defs.h"
45741a9c 22#include "infrun.h"
c906108c
SS
23#include <ctype.h>
24#include "symtab.h"
25#include "frame.h"
26#include "inferior.h"
27#include "breakpoint.h"
268a13a5 28#include "gdbsupport/gdb_wait.h"
c906108c
SS
29#include "gdbcore.h"
30#include "gdbcmd.h"
210661e7 31#include "cli/cli-script.h"
c906108c
SS
32#include "target.h"
33#include "gdbthread.h"
34#include "annotate.h"
1adeb98a 35#include "symfile.h"
7a292a7a 36#include "top.h"
c906108c 37#include <signal.h>
2acceee2 38#include "inf-loop.h"
4e052eda 39#include "regcache.h"
fd0407d6 40#include "value.h"
76727919 41#include "observable.h"
f636b87d 42#include "language.h"
a77053c2 43#include "solib.h"
f17517ea 44#include "main.h"
186c406b
TT
45#include "dictionary.h"
46#include "block.h"
034dad6f 47#include "mi/mi-common.h"
4f8d22e3 48#include "event-top.h"
96429cc8 49#include "record.h"
d02ed0bb 50#include "record-full.h"
edb3359d 51#include "inline-frame.h"
4efc6507 52#include "jit.h"
06cd862c 53#include "tracepoint.h"
be34f849 54#include "continuations.h"
b4a14fd0 55#include "interps.h"
1bfeeb0f 56#include "skip.h"
28106bc2
SDJ
57#include "probe.h"
58#include "objfiles.h"
de0bea00 59#include "completer.h"
9107fc8d 60#include "target-descriptions.h"
f15cb84a 61#include "target-dcache.h"
d83ad864 62#include "terminal.h"
ff862be4 63#include "solist.h"
372316f1 64#include "event-loop.h"
243a9253 65#include "thread-fsm.h"
268a13a5 66#include "gdbsupport/enum-flags.h"
5ed8105e 67#include "progspace-and-thread.h"
268a13a5 68#include "gdbsupport/gdb_optional.h"
46a62268 69#include "arch-utils.h"
268a13a5
TT
70#include "gdbsupport/scope-exit.h"
71#include "gdbsupport/forward-scope-exit.h"
c906108c
SS
72
73/* Prototypes for local functions */
74
2ea28649 75static void sig_print_info (enum gdb_signal);
c906108c 76
96baa820 77static void sig_print_header (void);
c906108c 78
4ef3f3be 79static int follow_fork (void);
96baa820 80
d83ad864
DB
81static int follow_fork_inferior (int follow_child, int detach_fork);
82
83static void follow_inferior_reset_breakpoints (void);
84
a289b8f6
JK
85static int currently_stepping (struct thread_info *tp);
86
e58b0e63
PA
87void nullify_last_target_wait_ptid (void);
88
2c03e5be 89static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
2484c66b
UW
90
91static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
92
2484c66b
UW
93static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
94
8550d3b3
YQ
95static int maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
96
aff4e175
AB
97static void resume (gdb_signal sig);
98
372316f1
PA
99/* Asynchronous signal handler registered as event loop source for
100 when we have pending events ready to be passed to the core. */
101static struct async_event_handler *infrun_async_inferior_event_token;
102
103/* Stores whether infrun_async was previously enabled or disabled.
104 Starts off as -1, indicating "never enabled/disabled". */
105static int infrun_is_async = -1;
106
107/* See infrun.h. */
108
109void
110infrun_async (int enable)
111{
112 if (infrun_is_async != enable)
113 {
114 infrun_is_async = enable;
115
116 if (debug_infrun)
117 fprintf_unfiltered (gdb_stdlog,
118 "infrun: infrun_async(%d)\n",
119 enable);
120
121 if (enable)
122 mark_async_event_handler (infrun_async_inferior_event_token);
123 else
124 clear_async_event_handler (infrun_async_inferior_event_token);
125 }
126}
127
0b333c5e
PA
128/* See infrun.h. */
129
130void
131mark_infrun_async_event_handler (void)
132{
133 mark_async_event_handler (infrun_async_inferior_event_token);
134}
135
5fbbeb29
CF
136/* When set, stop the 'step' command if we enter a function which has
137 no line number information. The normal behavior is that we step
138 over such function. */
139int step_stop_if_no_debug = 0;
920d2a44
AC
140static void
141show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
142 struct cmd_list_element *c, const char *value)
143{
144 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
145}
5fbbeb29 146
b9f437de
PA
147/* proceed and normal_stop use this to notify the user when the
148 inferior stopped in a different thread than it had been running
149 in. */
96baa820 150
39f77062 151static ptid_t previous_inferior_ptid;
7a292a7a 152
07107ca6
LM
153/* If set (default for legacy reasons), when following a fork, GDB
154 will detach from one of the fork branches, child or parent.
155 Exactly which branch is detached depends on 'set follow-fork-mode'
156 setting. */
157
158static int detach_fork = 1;
6c95b8df 159
237fc4c9
PA
160int debug_displaced = 0;
161static void
162show_debug_displaced (struct ui_file *file, int from_tty,
163 struct cmd_list_element *c, const char *value)
164{
165 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
166}
167
ccce17b0 168unsigned int debug_infrun = 0;
920d2a44
AC
169static void
170show_debug_infrun (struct ui_file *file, int from_tty,
171 struct cmd_list_element *c, const char *value)
172{
173 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
174}
527159b7 175
03583c20
UW
176
177/* Support for disabling address space randomization. */
178
179int disable_randomization = 1;
180
181static void
182show_disable_randomization (struct ui_file *file, int from_tty,
183 struct cmd_list_element *c, const char *value)
184{
185 if (target_supports_disable_randomization ())
186 fprintf_filtered (file,
187 _("Disabling randomization of debuggee's "
188 "virtual address space is %s.\n"),
189 value);
190 else
191 fputs_filtered (_("Disabling randomization of debuggee's "
192 "virtual address space is unsupported on\n"
193 "this platform.\n"), file);
194}
195
196static void
eb4c3f4a 197set_disable_randomization (const char *args, int from_tty,
03583c20
UW
198 struct cmd_list_element *c)
199{
200 if (!target_supports_disable_randomization ())
201 error (_("Disabling randomization of debuggee's "
202 "virtual address space is unsupported on\n"
203 "this platform."));
204}
205
d32dc48e
PA
206/* User interface for non-stop mode. */
207
208int non_stop = 0;
209static int non_stop_1 = 0;
210
211static void
eb4c3f4a 212set_non_stop (const char *args, int from_tty,
d32dc48e
PA
213 struct cmd_list_element *c)
214{
215 if (target_has_execution)
216 {
217 non_stop_1 = non_stop;
218 error (_("Cannot change this setting while the inferior is running."));
219 }
220
221 non_stop = non_stop_1;
222}
223
224static void
225show_non_stop (struct ui_file *file, int from_tty,
226 struct cmd_list_element *c, const char *value)
227{
228 fprintf_filtered (file,
229 _("Controlling the inferior in non-stop mode is %s.\n"),
230 value);
231}
232
d914c394
SS
233/* "Observer mode" is somewhat like a more extreme version of
234 non-stop, in which all GDB operations that might affect the
235 target's execution have been disabled. */
236
d914c394
SS
237int observer_mode = 0;
238static int observer_mode_1 = 0;
239
240static void
eb4c3f4a 241set_observer_mode (const char *args, int from_tty,
d914c394
SS
242 struct cmd_list_element *c)
243{
d914c394
SS
244 if (target_has_execution)
245 {
246 observer_mode_1 = observer_mode;
247 error (_("Cannot change this setting while the inferior is running."));
248 }
249
250 observer_mode = observer_mode_1;
251
252 may_write_registers = !observer_mode;
253 may_write_memory = !observer_mode;
254 may_insert_breakpoints = !observer_mode;
255 may_insert_tracepoints = !observer_mode;
256 /* We can insert fast tracepoints in or out of observer mode,
257 but enable them if we're going into this mode. */
258 if (observer_mode)
259 may_insert_fast_tracepoints = 1;
260 may_stop = !observer_mode;
261 update_target_permissions ();
262
263 /* Going *into* observer mode we must force non-stop, then
264 going out we leave it that way. */
265 if (observer_mode)
266 {
d914c394
SS
267 pagination_enabled = 0;
268 non_stop = non_stop_1 = 1;
269 }
270
271 if (from_tty)
272 printf_filtered (_("Observer mode is now %s.\n"),
273 (observer_mode ? "on" : "off"));
274}
275
276static void
277show_observer_mode (struct ui_file *file, int from_tty,
278 struct cmd_list_element *c, const char *value)
279{
280 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
281}
282
283/* This updates the value of observer mode based on changes in
284 permissions. Note that we are deliberately ignoring the values of
285 may-write-registers and may-write-memory, since the user may have
286 reason to enable these during a session, for instance to turn on a
287 debugging-related global. */
288
289void
290update_observer_mode (void)
291{
292 int newval;
293
294 newval = (!may_insert_breakpoints
295 && !may_insert_tracepoints
296 && may_insert_fast_tracepoints
297 && !may_stop
298 && non_stop);
299
300 /* Let the user know if things change. */
301 if (newval != observer_mode)
302 printf_filtered (_("Observer mode is now %s.\n"),
303 (newval ? "on" : "off"));
304
305 observer_mode = observer_mode_1 = newval;
306}
c2c6d25f 307
c906108c
SS
308/* Tables of how to react to signals; the user sets them. */
309
adc6a863
PA
310static unsigned char signal_stop[GDB_SIGNAL_LAST];
311static unsigned char signal_print[GDB_SIGNAL_LAST];
312static unsigned char signal_program[GDB_SIGNAL_LAST];
c906108c 313
ab04a2af
TT
314/* Table of signals that are registered with "catch signal". A
315 non-zero entry indicates that the signal is caught by some "catch
adc6a863
PA
316 signal" command. */
317static unsigned char signal_catch[GDB_SIGNAL_LAST];
ab04a2af 318
2455069d
UW
319/* Table of signals that the target may silently handle.
320 This is automatically determined from the flags above,
321 and simply cached here. */
adc6a863 322static unsigned char signal_pass[GDB_SIGNAL_LAST];
2455069d 323
c906108c
SS
324#define SET_SIGS(nsigs,sigs,flags) \
325 do { \
326 int signum = (nsigs); \
327 while (signum-- > 0) \
328 if ((sigs)[signum]) \
329 (flags)[signum] = 1; \
330 } while (0)
331
332#define UNSET_SIGS(nsigs,sigs,flags) \
333 do { \
334 int signum = (nsigs); \
335 while (signum-- > 0) \
336 if ((sigs)[signum]) \
337 (flags)[signum] = 0; \
338 } while (0)
339
9b224c5e
PA
340/* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
341 this function is to avoid exporting `signal_program'. */
342
343void
344update_signals_program_target (void)
345{
adc6a863 346 target_program_signals (signal_program);
9b224c5e
PA
347}
348
1777feb0 349/* Value to pass to target_resume() to cause all threads to resume. */
39f77062 350
edb3359d 351#define RESUME_ALL minus_one_ptid
c906108c
SS
352
353/* Command list pointer for the "stop" placeholder. */
354
355static struct cmd_list_element *stop_command;
356
c906108c
SS
357/* Nonzero if we want to give control to the user when we're notified
358 of shared library events by the dynamic linker. */
628fe4e4 359int stop_on_solib_events;
f9e14852
GB
360
361/* Enable or disable optional shared library event breakpoints
362 as appropriate when the above flag is changed. */
363
364static void
eb4c3f4a
TT
365set_stop_on_solib_events (const char *args,
366 int from_tty, struct cmd_list_element *c)
f9e14852
GB
367{
368 update_solib_breakpoints ();
369}
370
920d2a44
AC
371static void
372show_stop_on_solib_events (struct ui_file *file, int from_tty,
373 struct cmd_list_element *c, const char *value)
374{
375 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
376 value);
377}
c906108c 378
c906108c
SS
379/* Nonzero after stop if current stack frame should be printed. */
380
381static int stop_print_frame;
382
e02bc4cc 383/* This is a cached copy of the pid/waitstatus of the last event
9a4105ab
AC
384 returned by target_wait()/deprecated_target_wait_hook(). This
385 information is returned by get_last_target_status(). */
39f77062 386static ptid_t target_last_wait_ptid;
e02bc4cc
DS
387static struct target_waitstatus target_last_waitstatus;
388
4e1c45ea 389void init_thread_stepping_state (struct thread_info *tss);
0d1e5fa7 390
53904c9e
AC
391static const char follow_fork_mode_child[] = "child";
392static const char follow_fork_mode_parent[] = "parent";
393
40478521 394static const char *const follow_fork_mode_kind_names[] = {
53904c9e
AC
395 follow_fork_mode_child,
396 follow_fork_mode_parent,
397 NULL
ef346e04 398};
c906108c 399
53904c9e 400static const char *follow_fork_mode_string = follow_fork_mode_parent;
920d2a44
AC
401static void
402show_follow_fork_mode_string (struct ui_file *file, int from_tty,
403 struct cmd_list_element *c, const char *value)
404{
3e43a32a
MS
405 fprintf_filtered (file,
406 _("Debugger response to a program "
407 "call of fork or vfork is \"%s\".\n"),
920d2a44
AC
408 value);
409}
c906108c
SS
410\f
411
d83ad864
DB
412/* Handle changes to the inferior list based on the type of fork,
413 which process is being followed, and whether the other process
414 should be detached. On entry inferior_ptid must be the ptid of
415 the fork parent. At return inferior_ptid is the ptid of the
416 followed inferior. */
417
418static int
419follow_fork_inferior (int follow_child, int detach_fork)
420{
421 int has_vforked;
79639e11 422 ptid_t parent_ptid, child_ptid;
d83ad864
DB
423
424 has_vforked = (inferior_thread ()->pending_follow.kind
425 == TARGET_WAITKIND_VFORKED);
79639e11
PA
426 parent_ptid = inferior_ptid;
427 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
d83ad864
DB
428
429 if (has_vforked
430 && !non_stop /* Non-stop always resumes both branches. */
3b12939d 431 && current_ui->prompt_state == PROMPT_BLOCKED
d83ad864
DB
432 && !(follow_child || detach_fork || sched_multi))
433 {
434 /* The parent stays blocked inside the vfork syscall until the
435 child execs or exits. If we don't let the child run, then
436 the parent stays blocked. If we're telling the parent to run
437 in the foreground, the user will not be able to ctrl-c to get
438 back the terminal, effectively hanging the debug session. */
439 fprintf_filtered (gdb_stderr, _("\
440Can not resume the parent process over vfork in the foreground while\n\
441holding the child stopped. Try \"set detach-on-fork\" or \
442\"set schedule-multiple\".\n"));
443 /* FIXME output string > 80 columns. */
444 return 1;
445 }
446
447 if (!follow_child)
448 {
449 /* Detach new forked process? */
450 if (detach_fork)
451 {
d83ad864
DB
452 /* Before detaching from the child, remove all breakpoints
453 from it. If we forked, then this has already been taken
454 care of by infrun.c. If we vforked however, any
455 breakpoint inserted in the parent is visible in the
456 child, even those added while stopped in a vfork
457 catchpoint. This will remove the breakpoints from the
458 parent also, but they'll be reinserted below. */
459 if (has_vforked)
460 {
461 /* Keep breakpoints list in sync. */
00431a78 462 remove_breakpoints_inf (current_inferior ());
d83ad864
DB
463 }
464
f67c0c91 465 if (print_inferior_events)
d83ad864 466 {
8dd06f7a 467 /* Ensure that we have a process ptid. */
e99b03dc 468 ptid_t process_ptid = ptid_t (child_ptid.pid ());
8dd06f7a 469
223ffa71 470 target_terminal::ours_for_output ();
d83ad864 471 fprintf_filtered (gdb_stdlog,
f67c0c91 472 _("[Detaching after %s from child %s]\n"),
6f259a23 473 has_vforked ? "vfork" : "fork",
a068643d 474 target_pid_to_str (process_ptid).c_str ());
d83ad864
DB
475 }
476 }
477 else
478 {
479 struct inferior *parent_inf, *child_inf;
d83ad864
DB
480
481 /* Add process to GDB's tables. */
e99b03dc 482 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
483
484 parent_inf = current_inferior ();
485 child_inf->attach_flag = parent_inf->attach_flag;
486 copy_terminal_info (child_inf, parent_inf);
487 child_inf->gdbarch = parent_inf->gdbarch;
488 copy_inferior_target_desc_info (child_inf, parent_inf);
489
5ed8105e 490 scoped_restore_current_pspace_and_thread restore_pspace_thread;
d83ad864 491
79639e11 492 inferior_ptid = child_ptid;
f67c0c91 493 add_thread_silent (inferior_ptid);
2a00d7ce 494 set_current_inferior (child_inf);
d83ad864
DB
495 child_inf->symfile_flags = SYMFILE_NO_READ;
496
497 /* If this is a vfork child, then the address-space is
498 shared with the parent. */
499 if (has_vforked)
500 {
501 child_inf->pspace = parent_inf->pspace;
502 child_inf->aspace = parent_inf->aspace;
503
504 /* The parent will be frozen until the child is done
505 with the shared region. Keep track of the
506 parent. */
507 child_inf->vfork_parent = parent_inf;
508 child_inf->pending_detach = 0;
509 parent_inf->vfork_child = child_inf;
510 parent_inf->pending_detach = 0;
511 }
512 else
513 {
514 child_inf->aspace = new_address_space ();
564b1e3f 515 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
516 child_inf->removable = 1;
517 set_current_program_space (child_inf->pspace);
518 clone_program_space (child_inf->pspace, parent_inf->pspace);
519
520 /* Let the shared library layer (e.g., solib-svr4) learn
521 about this new process, relocate the cloned exec, pull
522 in shared libraries, and install the solib event
523 breakpoint. If a "cloned-VM" event was propagated
524 better throughout the core, this wouldn't be
525 required. */
526 solib_create_inferior_hook (0);
527 }
d83ad864
DB
528 }
529
530 if (has_vforked)
531 {
532 struct inferior *parent_inf;
533
534 parent_inf = current_inferior ();
535
536 /* If we detached from the child, then we have to be careful
537 to not insert breakpoints in the parent until the child
538 is done with the shared memory region. However, if we're
539 staying attached to the child, then we can and should
540 insert breakpoints, so that we can debug it. A
541 subsequent child exec or exit is enough to know when does
542 the child stops using the parent's address space. */
543 parent_inf->waiting_for_vfork_done = detach_fork;
544 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
545 }
546 }
547 else
548 {
549 /* Follow the child. */
550 struct inferior *parent_inf, *child_inf;
551 struct program_space *parent_pspace;
552
f67c0c91 553 if (print_inferior_events)
d83ad864 554 {
f67c0c91
SDJ
555 std::string parent_pid = target_pid_to_str (parent_ptid);
556 std::string child_pid = target_pid_to_str (child_ptid);
557
223ffa71 558 target_terminal::ours_for_output ();
6f259a23 559 fprintf_filtered (gdb_stdlog,
f67c0c91
SDJ
560 _("[Attaching after %s %s to child %s]\n"),
561 parent_pid.c_str (),
6f259a23 562 has_vforked ? "vfork" : "fork",
f67c0c91 563 child_pid.c_str ());
d83ad864
DB
564 }
565
566 /* Add the new inferior first, so that the target_detach below
567 doesn't unpush the target. */
568
e99b03dc 569 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
570
571 parent_inf = current_inferior ();
572 child_inf->attach_flag = parent_inf->attach_flag;
573 copy_terminal_info (child_inf, parent_inf);
574 child_inf->gdbarch = parent_inf->gdbarch;
575 copy_inferior_target_desc_info (child_inf, parent_inf);
576
577 parent_pspace = parent_inf->pspace;
578
579 /* If we're vforking, we want to hold on to the parent until the
580 child exits or execs. At child exec or exit time we can
581 remove the old breakpoints from the parent and detach or
582 resume debugging it. Otherwise, detach the parent now; we'll
583 want to reuse it's program/address spaces, but we can't set
584 them to the child before removing breakpoints from the
585 parent, otherwise, the breakpoints module could decide to
586 remove breakpoints from the wrong process (since they'd be
587 assigned to the same address space). */
588
589 if (has_vforked)
590 {
591 gdb_assert (child_inf->vfork_parent == NULL);
592 gdb_assert (parent_inf->vfork_child == NULL);
593 child_inf->vfork_parent = parent_inf;
594 child_inf->pending_detach = 0;
595 parent_inf->vfork_child = child_inf;
596 parent_inf->pending_detach = detach_fork;
597 parent_inf->waiting_for_vfork_done = 0;
598 }
599 else if (detach_fork)
6f259a23 600 {
f67c0c91 601 if (print_inferior_events)
6f259a23 602 {
8dd06f7a 603 /* Ensure that we have a process ptid. */
e99b03dc 604 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
8dd06f7a 605
223ffa71 606 target_terminal::ours_for_output ();
6f259a23 607 fprintf_filtered (gdb_stdlog,
f67c0c91
SDJ
608 _("[Detaching after fork from "
609 "parent %s]\n"),
a068643d 610 target_pid_to_str (process_ptid).c_str ());
6f259a23
DB
611 }
612
6e1e1966 613 target_detach (parent_inf, 0);
6f259a23 614 }
d83ad864
DB
615
616 /* Note that the detach above makes PARENT_INF dangling. */
617
618 /* Add the child thread to the appropriate lists, and switch to
619 this new thread, before cloning the program space, and
620 informing the solib layer about this new process. */
621
79639e11 622 inferior_ptid = child_ptid;
f67c0c91 623 add_thread_silent (inferior_ptid);
2a00d7ce 624 set_current_inferior (child_inf);
d83ad864
DB
625
626 /* If this is a vfork child, then the address-space is shared
627 with the parent. If we detached from the parent, then we can
628 reuse the parent's program/address spaces. */
629 if (has_vforked || detach_fork)
630 {
631 child_inf->pspace = parent_pspace;
632 child_inf->aspace = child_inf->pspace->aspace;
633 }
634 else
635 {
636 child_inf->aspace = new_address_space ();
564b1e3f 637 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
638 child_inf->removable = 1;
639 child_inf->symfile_flags = SYMFILE_NO_READ;
640 set_current_program_space (child_inf->pspace);
641 clone_program_space (child_inf->pspace, parent_pspace);
642
643 /* Let the shared library layer (e.g., solib-svr4) learn
644 about this new process, relocate the cloned exec, pull in
645 shared libraries, and install the solib event breakpoint.
646 If a "cloned-VM" event was propagated better throughout
647 the core, this wouldn't be required. */
648 solib_create_inferior_hook (0);
649 }
650 }
651
652 return target_follow_fork (follow_child, detach_fork);
653}
654
e58b0e63
PA
655/* Tell the target to follow the fork we're stopped at. Returns true
656 if the inferior should be resumed; false, if the target for some
657 reason decided it's best not to resume. */
658
6604731b 659static int
4ef3f3be 660follow_fork (void)
c906108c 661{
ea1dd7bc 662 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
e58b0e63
PA
663 int should_resume = 1;
664 struct thread_info *tp;
665
666 /* Copy user stepping state to the new inferior thread. FIXME: the
667 followed fork child thread should have a copy of most of the
4e3990f4
DE
668 parent thread structure's run control related fields, not just these.
669 Initialized to avoid "may be used uninitialized" warnings from gcc. */
670 struct breakpoint *step_resume_breakpoint = NULL;
186c406b 671 struct breakpoint *exception_resume_breakpoint = NULL;
4e3990f4
DE
672 CORE_ADDR step_range_start = 0;
673 CORE_ADDR step_range_end = 0;
674 struct frame_id step_frame_id = { 0 };
8980e177 675 struct thread_fsm *thread_fsm = NULL;
e58b0e63
PA
676
677 if (!non_stop)
678 {
679 ptid_t wait_ptid;
680 struct target_waitstatus wait_status;
681
682 /* Get the last target status returned by target_wait(). */
683 get_last_target_status (&wait_ptid, &wait_status);
684
685 /* If not stopped at a fork event, then there's nothing else to
686 do. */
687 if (wait_status.kind != TARGET_WAITKIND_FORKED
688 && wait_status.kind != TARGET_WAITKIND_VFORKED)
689 return 1;
690
691 /* Check if we switched over from WAIT_PTID, since the event was
692 reported. */
00431a78
PA
693 if (wait_ptid != minus_one_ptid
694 && inferior_ptid != wait_ptid)
e58b0e63
PA
695 {
696 /* We did. Switch back to WAIT_PTID thread, to tell the
697 target to follow it (in either direction). We'll
698 afterwards refuse to resume, and inform the user what
699 happened. */
00431a78
PA
700 thread_info *wait_thread
701 = find_thread_ptid (wait_ptid);
702 switch_to_thread (wait_thread);
e58b0e63
PA
703 should_resume = 0;
704 }
705 }
706
707 tp = inferior_thread ();
708
709 /* If there were any forks/vforks that were caught and are now to be
710 followed, then do so now. */
711 switch (tp->pending_follow.kind)
712 {
713 case TARGET_WAITKIND_FORKED:
714 case TARGET_WAITKIND_VFORKED:
715 {
716 ptid_t parent, child;
717
718 /* If the user did a next/step, etc, over a fork call,
719 preserve the stepping state in the fork child. */
720 if (follow_child && should_resume)
721 {
8358c15c
JK
722 step_resume_breakpoint = clone_momentary_breakpoint
723 (tp->control.step_resume_breakpoint);
16c381f0
JK
724 step_range_start = tp->control.step_range_start;
725 step_range_end = tp->control.step_range_end;
726 step_frame_id = tp->control.step_frame_id;
186c406b
TT
727 exception_resume_breakpoint
728 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
8980e177 729 thread_fsm = tp->thread_fsm;
e58b0e63
PA
730
731 /* For now, delete the parent's sr breakpoint, otherwise,
732 parent/child sr breakpoints are considered duplicates,
733 and the child version will not be installed. Remove
734 this when the breakpoints module becomes aware of
735 inferiors and address spaces. */
736 delete_step_resume_breakpoint (tp);
16c381f0
JK
737 tp->control.step_range_start = 0;
738 tp->control.step_range_end = 0;
739 tp->control.step_frame_id = null_frame_id;
186c406b 740 delete_exception_resume_breakpoint (tp);
8980e177 741 tp->thread_fsm = NULL;
e58b0e63
PA
742 }
743
744 parent = inferior_ptid;
745 child = tp->pending_follow.value.related_pid;
746
d83ad864
DB
747 /* Set up inferior(s) as specified by the caller, and tell the
748 target to do whatever is necessary to follow either parent
749 or child. */
750 if (follow_fork_inferior (follow_child, detach_fork))
e58b0e63
PA
751 {
752 /* Target refused to follow, or there's some other reason
753 we shouldn't resume. */
754 should_resume = 0;
755 }
756 else
757 {
758 /* This pending follow fork event is now handled, one way
759 or another. The previous selected thread may be gone
760 from the lists by now, but if it is still around, need
761 to clear the pending follow request. */
e09875d4 762 tp = find_thread_ptid (parent);
e58b0e63
PA
763 if (tp)
764 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
765
766 /* This makes sure we don't try to apply the "Switched
767 over from WAIT_PID" logic above. */
768 nullify_last_target_wait_ptid ();
769
1777feb0 770 /* If we followed the child, switch to it... */
e58b0e63
PA
771 if (follow_child)
772 {
00431a78
PA
773 thread_info *child_thr = find_thread_ptid (child);
774 switch_to_thread (child_thr);
e58b0e63
PA
775
776 /* ... and preserve the stepping state, in case the
777 user was stepping over the fork call. */
778 if (should_resume)
779 {
780 tp = inferior_thread ();
8358c15c
JK
781 tp->control.step_resume_breakpoint
782 = step_resume_breakpoint;
16c381f0
JK
783 tp->control.step_range_start = step_range_start;
784 tp->control.step_range_end = step_range_end;
785 tp->control.step_frame_id = step_frame_id;
186c406b
TT
786 tp->control.exception_resume_breakpoint
787 = exception_resume_breakpoint;
8980e177 788 tp->thread_fsm = thread_fsm;
e58b0e63
PA
789 }
790 else
791 {
792 /* If we get here, it was because we're trying to
793 resume from a fork catchpoint, but, the user
794 has switched threads away from the thread that
795 forked. In that case, the resume command
796 issued is most likely not applicable to the
797 child, so just warn, and refuse to resume. */
3e43a32a 798 warning (_("Not resuming: switched threads "
fd7dcb94 799 "before following fork child."));
e58b0e63
PA
800 }
801
802 /* Reset breakpoints in the child as appropriate. */
803 follow_inferior_reset_breakpoints ();
804 }
e58b0e63
PA
805 }
806 }
807 break;
808 case TARGET_WAITKIND_SPURIOUS:
809 /* Nothing to follow. */
810 break;
811 default:
812 internal_error (__FILE__, __LINE__,
813 "Unexpected pending_follow.kind %d\n",
814 tp->pending_follow.kind);
815 break;
816 }
c906108c 817
e58b0e63 818 return should_resume;
c906108c
SS
819}
820
d83ad864 821static void
6604731b 822follow_inferior_reset_breakpoints (void)
c906108c 823{
4e1c45ea
PA
824 struct thread_info *tp = inferior_thread ();
825
6604731b
DJ
826 /* Was there a step_resume breakpoint? (There was if the user
827 did a "next" at the fork() call.) If so, explicitly reset its
a1aa2221
LM
828 thread number. Cloned step_resume breakpoints are disabled on
829 creation, so enable it here now that it is associated with the
830 correct thread.
6604731b
DJ
831
832 step_resumes are a form of bp that are made to be per-thread.
833 Since we created the step_resume bp when the parent process
834 was being debugged, and now are switching to the child process,
835 from the breakpoint package's viewpoint, that's a switch of
836 "threads". We must update the bp's notion of which thread
837 it is for, or it'll be ignored when it triggers. */
838
8358c15c 839 if (tp->control.step_resume_breakpoint)
a1aa2221
LM
840 {
841 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
842 tp->control.step_resume_breakpoint->loc->enabled = 1;
843 }
6604731b 844
a1aa2221 845 /* Treat exception_resume breakpoints like step_resume breakpoints. */
186c406b 846 if (tp->control.exception_resume_breakpoint)
a1aa2221
LM
847 {
848 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
849 tp->control.exception_resume_breakpoint->loc->enabled = 1;
850 }
186c406b 851
6604731b
DJ
852 /* Reinsert all breakpoints in the child. The user may have set
853 breakpoints after catching the fork, in which case those
854 were never set in the child, but only in the parent. This makes
855 sure the inserted breakpoints match the breakpoint list. */
856
857 breakpoint_re_set ();
858 insert_breakpoints ();
c906108c 859}
c906108c 860
6c95b8df
PA
861/* The child has exited or execed: resume threads of the parent the
862 user wanted to be executing. */
863
864static int
865proceed_after_vfork_done (struct thread_info *thread,
866 void *arg)
867{
868 int pid = * (int *) arg;
869
00431a78
PA
870 if (thread->ptid.pid () == pid
871 && thread->state == THREAD_RUNNING
872 && !thread->executing
6c95b8df 873 && !thread->stop_requested
a493e3e2 874 && thread->suspend.stop_signal == GDB_SIGNAL_0)
6c95b8df
PA
875 {
876 if (debug_infrun)
877 fprintf_unfiltered (gdb_stdlog,
878 "infrun: resuming vfork parent thread %s\n",
a068643d 879 target_pid_to_str (thread->ptid).c_str ());
6c95b8df 880
00431a78 881 switch_to_thread (thread);
70509625 882 clear_proceed_status (0);
64ce06e4 883 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
6c95b8df
PA
884 }
885
886 return 0;
887}
888
5ed8105e
PA
889/* Save/restore inferior_ptid, current program space and current
890 inferior. Only use this if the current context points at an exited
891 inferior (and therefore there's no current thread to save). */
892class scoped_restore_exited_inferior
893{
894public:
895 scoped_restore_exited_inferior ()
896 : m_saved_ptid (&inferior_ptid)
897 {}
898
899private:
900 scoped_restore_tmpl<ptid_t> m_saved_ptid;
901 scoped_restore_current_program_space m_pspace;
902 scoped_restore_current_inferior m_inferior;
903};
904
6c95b8df
PA
905/* Called whenever we notice an exec or exit event, to handle
906 detaching or resuming a vfork parent. */
907
908static void
909handle_vfork_child_exec_or_exit (int exec)
910{
911 struct inferior *inf = current_inferior ();
912
913 if (inf->vfork_parent)
914 {
915 int resume_parent = -1;
916
917 /* This exec or exit marks the end of the shared memory region
b73715df
TV
918 between the parent and the child. Break the bonds. */
919 inferior *vfork_parent = inf->vfork_parent;
920 inf->vfork_parent->vfork_child = NULL;
921 inf->vfork_parent = NULL;
6c95b8df 922
b73715df
TV
923 /* If the user wanted to detach from the parent, now is the
924 time. */
925 if (vfork_parent->pending_detach)
6c95b8df
PA
926 {
927 struct thread_info *tp;
6c95b8df
PA
928 struct program_space *pspace;
929 struct address_space *aspace;
930
1777feb0 931 /* follow-fork child, detach-on-fork on. */
6c95b8df 932
b73715df 933 vfork_parent->pending_detach = 0;
68c9da30 934
5ed8105e
PA
935 gdb::optional<scoped_restore_exited_inferior>
936 maybe_restore_inferior;
937 gdb::optional<scoped_restore_current_pspace_and_thread>
938 maybe_restore_thread;
939
940 /* If we're handling a child exit, then inferior_ptid points
941 at the inferior's pid, not to a thread. */
f50f4e56 942 if (!exec)
5ed8105e 943 maybe_restore_inferior.emplace ();
f50f4e56 944 else
5ed8105e 945 maybe_restore_thread.emplace ();
6c95b8df
PA
946
947 /* We're letting loose of the parent. */
b73715df 948 tp = any_live_thread_of_inferior (vfork_parent);
00431a78 949 switch_to_thread (tp);
6c95b8df
PA
950
951 /* We're about to detach from the parent, which implicitly
952 removes breakpoints from its address space. There's a
953 catch here: we want to reuse the spaces for the child,
954 but, parent/child are still sharing the pspace at this
955 point, although the exec in reality makes the kernel give
956 the child a fresh set of new pages. The problem here is
957 that the breakpoints module being unaware of this, would
958 likely chose the child process to write to the parent
959 address space. Swapping the child temporarily away from
960 the spaces has the desired effect. Yes, this is "sort
961 of" a hack. */
962
963 pspace = inf->pspace;
964 aspace = inf->aspace;
965 inf->aspace = NULL;
966 inf->pspace = NULL;
967
f67c0c91 968 if (print_inferior_events)
6c95b8df 969 {
a068643d 970 std::string pidstr
b73715df 971 = target_pid_to_str (ptid_t (vfork_parent->pid));
f67c0c91 972
223ffa71 973 target_terminal::ours_for_output ();
6c95b8df
PA
974
975 if (exec)
6f259a23
DB
976 {
977 fprintf_filtered (gdb_stdlog,
f67c0c91 978 _("[Detaching vfork parent %s "
a068643d 979 "after child exec]\n"), pidstr.c_str ());
6f259a23 980 }
6c95b8df 981 else
6f259a23
DB
982 {
983 fprintf_filtered (gdb_stdlog,
f67c0c91 984 _("[Detaching vfork parent %s "
a068643d 985 "after child exit]\n"), pidstr.c_str ());
6f259a23 986 }
6c95b8df
PA
987 }
988
b73715df 989 target_detach (vfork_parent, 0);
6c95b8df
PA
990
991 /* Put it back. */
992 inf->pspace = pspace;
993 inf->aspace = aspace;
6c95b8df
PA
994 }
995 else if (exec)
996 {
997 /* We're staying attached to the parent, so, really give the
998 child a new address space. */
564b1e3f 999 inf->pspace = new program_space (maybe_new_address_space ());
6c95b8df
PA
1000 inf->aspace = inf->pspace->aspace;
1001 inf->removable = 1;
1002 set_current_program_space (inf->pspace);
1003
b73715df 1004 resume_parent = vfork_parent->pid;
6c95b8df
PA
1005 }
1006 else
1007 {
6c95b8df
PA
1008 struct program_space *pspace;
1009
1010 /* If this is a vfork child exiting, then the pspace and
1011 aspaces were shared with the parent. Since we're
1012 reporting the process exit, we'll be mourning all that is
1013 found in the address space, and switching to null_ptid,
1014 preparing to start a new inferior. But, since we don't
1015 want to clobber the parent's address/program spaces, we
1016 go ahead and create a new one for this exiting
1017 inferior. */
1018
5ed8105e
PA
1019 /* Switch to null_ptid while running clone_program_space, so
1020 that clone_program_space doesn't want to read the
1021 selected frame of a dead process. */
1022 scoped_restore restore_ptid
1023 = make_scoped_restore (&inferior_ptid, null_ptid);
6c95b8df
PA
1024
1025 /* This inferior is dead, so avoid giving the breakpoints
1026 module the option to write through to it (cloning a
1027 program space resets breakpoints). */
1028 inf->aspace = NULL;
1029 inf->pspace = NULL;
564b1e3f 1030 pspace = new program_space (maybe_new_address_space ());
6c95b8df
PA
1031 set_current_program_space (pspace);
1032 inf->removable = 1;
7dcd53a0 1033 inf->symfile_flags = SYMFILE_NO_READ;
b73715df 1034 clone_program_space (pspace, vfork_parent->pspace);
6c95b8df
PA
1035 inf->pspace = pspace;
1036 inf->aspace = pspace->aspace;
1037
b73715df 1038 resume_parent = vfork_parent->pid;
6c95b8df
PA
1039 }
1040
6c95b8df
PA
1041 gdb_assert (current_program_space == inf->pspace);
1042
1043 if (non_stop && resume_parent != -1)
1044 {
1045 /* If the user wanted the parent to be running, let it go
1046 free now. */
5ed8105e 1047 scoped_restore_current_thread restore_thread;
6c95b8df
PA
1048
1049 if (debug_infrun)
3e43a32a
MS
1050 fprintf_unfiltered (gdb_stdlog,
1051 "infrun: resuming vfork parent process %d\n",
6c95b8df
PA
1052 resume_parent);
1053
1054 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
6c95b8df
PA
1055 }
1056 }
1057}
1058
eb6c553b 1059/* Enum strings for "set|show follow-exec-mode". */
6c95b8df
PA
1060
1061static const char follow_exec_mode_new[] = "new";
1062static const char follow_exec_mode_same[] = "same";
40478521 1063static const char *const follow_exec_mode_names[] =
6c95b8df
PA
1064{
1065 follow_exec_mode_new,
1066 follow_exec_mode_same,
1067 NULL,
1068};
1069
1070static const char *follow_exec_mode_string = follow_exec_mode_same;
1071static void
1072show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1073 struct cmd_list_element *c, const char *value)
1074{
1075 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1076}
1077
ecf45d2c 1078/* EXEC_FILE_TARGET is assumed to be non-NULL. */
1adeb98a 1079
c906108c 1080static void
4ca51187 1081follow_exec (ptid_t ptid, const char *exec_file_target)
c906108c 1082{
6c95b8df 1083 struct inferior *inf = current_inferior ();
e99b03dc 1084 int pid = ptid.pid ();
94585166 1085 ptid_t process_ptid;
7a292a7a 1086
65d2b333
PW
1087 /* Switch terminal for any messages produced e.g. by
1088 breakpoint_re_set. */
1089 target_terminal::ours_for_output ();
1090
c906108c
SS
1091 /* This is an exec event that we actually wish to pay attention to.
1092 Refresh our symbol table to the newly exec'd program, remove any
1093 momentary bp's, etc.
1094
1095 If there are breakpoints, they aren't really inserted now,
1096 since the exec() transformed our inferior into a fresh set
1097 of instructions.
1098
1099 We want to preserve symbolic breakpoints on the list, since
1100 we have hopes that they can be reset after the new a.out's
1101 symbol table is read.
1102
1103 However, any "raw" breakpoints must be removed from the list
1104 (e.g., the solib bp's), since their address is probably invalid
1105 now.
1106
1107 And, we DON'T want to call delete_breakpoints() here, since
1108 that may write the bp's "shadow contents" (the instruction
1109 value that was overwritten witha TRAP instruction). Since
1777feb0 1110 we now have a new a.out, those shadow contents aren't valid. */
6c95b8df
PA
1111
1112 mark_breakpoints_out ();
1113
95e50b27
PA
1114 /* The target reports the exec event to the main thread, even if
1115 some other thread does the exec, and even if the main thread was
1116 stopped or already gone. We may still have non-leader threads of
1117 the process on our list. E.g., on targets that don't have thread
1118 exit events (like remote); or on native Linux in non-stop mode if
1119 there were only two threads in the inferior and the non-leader
1120 one is the one that execs (and nothing forces an update of the
1121 thread list up to here). When debugging remotely, it's best to
1122 avoid extra traffic, when possible, so avoid syncing the thread
1123 list with the target, and instead go ahead and delete all threads
1124 of the process but one that reported the event. Note this must
1125 be done before calling update_breakpoints_after_exec, as
1126 otherwise clearing the threads' resources would reference stale
1127 thread breakpoints -- it may have been one of these threads that
1128 stepped across the exec. We could just clear their stepping
1129 states, but as long as we're iterating, might as well delete
1130 them. Deleting them now rather than at the next user-visible
1131 stop provides a nicer sequence of events for user and MI
1132 notifications. */
08036331 1133 for (thread_info *th : all_threads_safe ())
d7e15655 1134 if (th->ptid.pid () == pid && th->ptid != ptid)
00431a78 1135 delete_thread (th);
95e50b27
PA
1136
1137 /* We also need to clear any left over stale state for the
1138 leader/event thread. E.g., if there was any step-resume
1139 breakpoint or similar, it's gone now. We cannot truly
1140 step-to-next statement through an exec(). */
08036331 1141 thread_info *th = inferior_thread ();
8358c15c 1142 th->control.step_resume_breakpoint = NULL;
186c406b 1143 th->control.exception_resume_breakpoint = NULL;
34b7e8a6 1144 th->control.single_step_breakpoints = NULL;
16c381f0
JK
1145 th->control.step_range_start = 0;
1146 th->control.step_range_end = 0;
c906108c 1147
95e50b27
PA
1148 /* The user may have had the main thread held stopped in the
1149 previous image (e.g., schedlock on, or non-stop). Release
1150 it now. */
a75724bc
PA
1151 th->stop_requested = 0;
1152
95e50b27
PA
1153 update_breakpoints_after_exec ();
1154
1777feb0 1155 /* What is this a.out's name? */
f2907e49 1156 process_ptid = ptid_t (pid);
6c95b8df 1157 printf_unfiltered (_("%s is executing new program: %s\n"),
a068643d 1158 target_pid_to_str (process_ptid).c_str (),
ecf45d2c 1159 exec_file_target);
c906108c
SS
1160
1161 /* We've followed the inferior through an exec. Therefore, the
1777feb0 1162 inferior has essentially been killed & reborn. */
7a292a7a 1163
6ca15a4b 1164 breakpoint_init_inferior (inf_execd);
e85a822c 1165
797bc1cb
TT
1166 gdb::unique_xmalloc_ptr<char> exec_file_host
1167 = exec_file_find (exec_file_target, NULL);
ff862be4 1168
ecf45d2c
SL
1169 /* If we were unable to map the executable target pathname onto a host
1170 pathname, tell the user that. Otherwise GDB's subsequent behavior
1171 is confusing. Maybe it would even be better to stop at this point
1172 so that the user can specify a file manually before continuing. */
1173 if (exec_file_host == NULL)
1174 warning (_("Could not load symbols for executable %s.\n"
1175 "Do you need \"set sysroot\"?"),
1176 exec_file_target);
c906108c 1177
cce9b6bf
PA
1178 /* Reset the shared library package. This ensures that we get a
1179 shlib event when the child reaches "_start", at which point the
1180 dld will have had a chance to initialize the child. */
1181 /* Also, loading a symbol file below may trigger symbol lookups, and
1182 we don't want those to be satisfied by the libraries of the
1183 previous incarnation of this process. */
1184 no_shared_libraries (NULL, 0);
1185
6c95b8df
PA
1186 if (follow_exec_mode_string == follow_exec_mode_new)
1187 {
6c95b8df
PA
1188 /* The user wants to keep the old inferior and program spaces
1189 around. Create a new fresh one, and switch to it. */
1190
35ed81d4
SM
1191 /* Do exit processing for the original inferior before setting the new
1192 inferior's pid. Having two inferiors with the same pid would confuse
1193 find_inferior_p(t)id. Transfer the terminal state and info from the
1194 old to the new inferior. */
1195 inf = add_inferior_with_spaces ();
1196 swap_terminal_info (inf, current_inferior ());
057302ce 1197 exit_inferior_silent (current_inferior ());
17d8546e 1198
94585166 1199 inf->pid = pid;
ecf45d2c 1200 target_follow_exec (inf, exec_file_target);
6c95b8df
PA
1201
1202 set_current_inferior (inf);
94585166 1203 set_current_program_space (inf->pspace);
c4c17fb0 1204 add_thread (ptid);
6c95b8df 1205 }
9107fc8d
PA
1206 else
1207 {
1208 /* The old description may no longer be fit for the new image.
1209 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1210 old description; we'll read a new one below. No need to do
1211 this on "follow-exec-mode new", as the old inferior stays
1212 around (its description is later cleared/refetched on
1213 restart). */
1214 target_clear_description ();
1215 }
6c95b8df
PA
1216
1217 gdb_assert (current_program_space == inf->pspace);
1218
ecf45d2c
SL
1219 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1220 because the proper displacement for a PIE (Position Independent
1221 Executable) main symbol file will only be computed by
1222 solib_create_inferior_hook below. breakpoint_re_set would fail
1223 to insert the breakpoints with the zero displacement. */
797bc1cb 1224 try_open_exec_file (exec_file_host.get (), inf, SYMFILE_DEFER_BP_RESET);
c906108c 1225
9107fc8d
PA
1226 /* If the target can specify a description, read it. Must do this
1227 after flipping to the new executable (because the target supplied
1228 description must be compatible with the executable's
1229 architecture, and the old executable may e.g., be 32-bit, while
1230 the new one 64-bit), and before anything involving memory or
1231 registers. */
1232 target_find_description ();
1233
268a4a75 1234 solib_create_inferior_hook (0);
c906108c 1235
4efc6507
DE
1236 jit_inferior_created_hook ();
1237
c1e56572
JK
1238 breakpoint_re_set ();
1239
c906108c
SS
1240 /* Reinsert all breakpoints. (Those which were symbolic have
1241 been reset to the proper address in the new a.out, thanks
1777feb0 1242 to symbol_file_command...). */
c906108c
SS
1243 insert_breakpoints ();
1244
1245 /* The next resume of this inferior should bring it to the shlib
1246 startup breakpoints. (If the user had also set bp's on
1247 "main" from the old (parent) process, then they'll auto-
1777feb0 1248 matically get reset there in the new process.). */
c906108c
SS
1249}
1250
c2829269
PA
1251/* The queue of threads that need to do a step-over operation to get
1252 past e.g., a breakpoint. What technique is used to step over the
1253 breakpoint/watchpoint does not matter -- all threads end up in the
1254 same queue, to maintain rough temporal order of execution, in order
1255 to avoid starvation, otherwise, we could e.g., find ourselves
1256 constantly stepping the same couple threads past their breakpoints
1257 over and over, if the single-step finish fast enough. */
1258struct thread_info *step_over_queue_head;
1259
6c4cfb24
PA
1260/* Bit flags indicating what the thread needs to step over. */
1261
8d297bbf 1262enum step_over_what_flag
6c4cfb24
PA
1263 {
1264 /* Step over a breakpoint. */
1265 STEP_OVER_BREAKPOINT = 1,
1266
1267 /* Step past a non-continuable watchpoint, in order to let the
1268 instruction execute so we can evaluate the watchpoint
1269 expression. */
1270 STEP_OVER_WATCHPOINT = 2
1271 };
8d297bbf 1272DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
6c4cfb24 1273
963f9c80 1274/* Info about an instruction that is being stepped over. */
31e77af2
PA
1275
1276struct step_over_info
1277{
963f9c80
PA
1278 /* If we're stepping past a breakpoint, this is the address space
1279 and address of the instruction the breakpoint is set at. We'll
1280 skip inserting all breakpoints here. Valid iff ASPACE is
1281 non-NULL. */
8b86c959 1282 const address_space *aspace;
31e77af2 1283 CORE_ADDR address;
963f9c80
PA
1284
1285 /* The instruction being stepped over triggers a nonsteppable
1286 watchpoint. If true, we'll skip inserting watchpoints. */
1287 int nonsteppable_watchpoint_p;
21edc42f
YQ
1288
1289 /* The thread's global number. */
1290 int thread;
31e77af2
PA
1291};
1292
1293/* The step-over info of the location that is being stepped over.
1294
1295 Note that with async/breakpoint always-inserted mode, a user might
1296 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1297 being stepped over. As setting a new breakpoint inserts all
1298 breakpoints, we need to make sure the breakpoint being stepped over
1299 isn't inserted then. We do that by only clearing the step-over
1300 info when the step-over is actually finished (or aborted).
1301
1302 Presently GDB can only step over one breakpoint at any given time.
1303 Given threads that can't run code in the same address space as the
1304 breakpoint's can't really miss the breakpoint, GDB could be taught
1305 to step-over at most one breakpoint per address space (so this info
1306 could move to the address space object if/when GDB is extended).
1307 The set of breakpoints being stepped over will normally be much
1308 smaller than the set of all breakpoints, so a flag in the
1309 breakpoint location structure would be wasteful. A separate list
1310 also saves complexity and run-time, as otherwise we'd have to go
1311 through all breakpoint locations clearing their flag whenever we
1312 start a new sequence. Similar considerations weigh against storing
1313 this info in the thread object. Plus, not all step overs actually
1314 have breakpoint locations -- e.g., stepping past a single-step
1315 breakpoint, or stepping to complete a non-continuable
1316 watchpoint. */
1317static struct step_over_info step_over_info;
1318
1319/* Record the address of the breakpoint/instruction we're currently
ce0db137
DE
1320 stepping over.
1321 N.B. We record the aspace and address now, instead of say just the thread,
1322 because when we need the info later the thread may be running. */
31e77af2
PA
1323
1324static void
8b86c959 1325set_step_over_info (const address_space *aspace, CORE_ADDR address,
21edc42f
YQ
1326 int nonsteppable_watchpoint_p,
1327 int thread)
31e77af2
PA
1328{
1329 step_over_info.aspace = aspace;
1330 step_over_info.address = address;
963f9c80 1331 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
21edc42f 1332 step_over_info.thread = thread;
31e77af2
PA
1333}
1334
1335/* Called when we're not longer stepping over a breakpoint / an
1336 instruction, so all breakpoints are free to be (re)inserted. */
1337
1338static void
1339clear_step_over_info (void)
1340{
372316f1
PA
1341 if (debug_infrun)
1342 fprintf_unfiltered (gdb_stdlog,
1343 "infrun: clear_step_over_info\n");
31e77af2
PA
1344 step_over_info.aspace = NULL;
1345 step_over_info.address = 0;
963f9c80 1346 step_over_info.nonsteppable_watchpoint_p = 0;
21edc42f 1347 step_over_info.thread = -1;
31e77af2
PA
1348}
1349
7f89fd65 1350/* See infrun.h. */
31e77af2
PA
1351
1352int
1353stepping_past_instruction_at (struct address_space *aspace,
1354 CORE_ADDR address)
1355{
1356 return (step_over_info.aspace != NULL
1357 && breakpoint_address_match (aspace, address,
1358 step_over_info.aspace,
1359 step_over_info.address));
1360}
1361
963f9c80
PA
1362/* See infrun.h. */
1363
21edc42f
YQ
1364int
1365thread_is_stepping_over_breakpoint (int thread)
1366{
1367 return (step_over_info.thread != -1
1368 && thread == step_over_info.thread);
1369}
1370
1371/* See infrun.h. */
1372
963f9c80
PA
1373int
1374stepping_past_nonsteppable_watchpoint (void)
1375{
1376 return step_over_info.nonsteppable_watchpoint_p;
1377}
1378
6cc83d2a
PA
1379/* Returns true if step-over info is valid. */
1380
1381static int
1382step_over_info_valid_p (void)
1383{
963f9c80
PA
1384 return (step_over_info.aspace != NULL
1385 || stepping_past_nonsteppable_watchpoint ());
6cc83d2a
PA
1386}
1387
c906108c 1388\f
237fc4c9
PA
1389/* Displaced stepping. */
1390
1391/* In non-stop debugging mode, we must take special care to manage
1392 breakpoints properly; in particular, the traditional strategy for
1393 stepping a thread past a breakpoint it has hit is unsuitable.
1394 'Displaced stepping' is a tactic for stepping one thread past a
1395 breakpoint it has hit while ensuring that other threads running
1396 concurrently will hit the breakpoint as they should.
1397
1398 The traditional way to step a thread T off a breakpoint in a
1399 multi-threaded program in all-stop mode is as follows:
1400
1401 a0) Initially, all threads are stopped, and breakpoints are not
1402 inserted.
1403 a1) We single-step T, leaving breakpoints uninserted.
1404 a2) We insert breakpoints, and resume all threads.
1405
1406 In non-stop debugging, however, this strategy is unsuitable: we
1407 don't want to have to stop all threads in the system in order to
1408 continue or step T past a breakpoint. Instead, we use displaced
1409 stepping:
1410
1411 n0) Initially, T is stopped, other threads are running, and
1412 breakpoints are inserted.
1413 n1) We copy the instruction "under" the breakpoint to a separate
1414 location, outside the main code stream, making any adjustments
1415 to the instruction, register, and memory state as directed by
1416 T's architecture.
1417 n2) We single-step T over the instruction at its new location.
1418 n3) We adjust the resulting register and memory state as directed
1419 by T's architecture. This includes resetting T's PC to point
1420 back into the main instruction stream.
1421 n4) We resume T.
1422
1423 This approach depends on the following gdbarch methods:
1424
1425 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1426 indicate where to copy the instruction, and how much space must
1427 be reserved there. We use these in step n1.
1428
1429 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1430 address, and makes any necessary adjustments to the instruction,
1431 register contents, and memory. We use this in step n1.
1432
1433 - gdbarch_displaced_step_fixup adjusts registers and memory after
1434 we have successfuly single-stepped the instruction, to yield the
1435 same effect the instruction would have had if we had executed it
1436 at its original address. We use this in step n3.
1437
237fc4c9
PA
1438 The gdbarch_displaced_step_copy_insn and
1439 gdbarch_displaced_step_fixup functions must be written so that
1440 copying an instruction with gdbarch_displaced_step_copy_insn,
1441 single-stepping across the copied instruction, and then applying
1442 gdbarch_displaced_insn_fixup should have the same effects on the
1443 thread's memory and registers as stepping the instruction in place
1444 would have. Exactly which responsibilities fall to the copy and
1445 which fall to the fixup is up to the author of those functions.
1446
1447 See the comments in gdbarch.sh for details.
1448
1449 Note that displaced stepping and software single-step cannot
1450 currently be used in combination, although with some care I think
1451 they could be made to. Software single-step works by placing
1452 breakpoints on all possible subsequent instructions; if the
1453 displaced instruction is a PC-relative jump, those breakpoints
1454 could fall in very strange places --- on pages that aren't
1455 executable, or at addresses that are not proper instruction
1456 boundaries. (We do generally let other threads run while we wait
1457 to hit the software single-step breakpoint, and they might
1458 encounter such a corrupted instruction.) One way to work around
1459 this would be to have gdbarch_displaced_step_copy_insn fully
1460 simulate the effect of PC-relative instructions (and return NULL)
1461 on architectures that use software single-stepping.
1462
1463 In non-stop mode, we can have independent and simultaneous step
1464 requests, so more than one thread may need to simultaneously step
1465 over a breakpoint. The current implementation assumes there is
1466 only one scratch space per process. In this case, we have to
1467 serialize access to the scratch space. If thread A wants to step
1468 over a breakpoint, but we are currently waiting for some other
1469 thread to complete a displaced step, we leave thread A stopped and
1470 place it in the displaced_step_request_queue. Whenever a displaced
1471 step finishes, we pick the next thread in the queue and start a new
1472 displaced step operation on it. See displaced_step_prepare and
1473 displaced_step_fixup for details. */
1474
cfba9872
SM
1475/* Default destructor for displaced_step_closure. */
1476
1477displaced_step_closure::~displaced_step_closure () = default;
1478
fc1cf338
PA
1479/* Get the displaced stepping state of process PID. */
1480
39a36629 1481static displaced_step_inferior_state *
00431a78 1482get_displaced_stepping_state (inferior *inf)
fc1cf338 1483{
d20172fc 1484 return &inf->displaced_step_state;
fc1cf338
PA
1485}
1486
372316f1
PA
1487/* Returns true if any inferior has a thread doing a displaced
1488 step. */
1489
39a36629
SM
1490static bool
1491displaced_step_in_progress_any_inferior ()
372316f1 1492{
d20172fc 1493 for (inferior *i : all_inferiors ())
39a36629 1494 {
d20172fc 1495 if (i->displaced_step_state.step_thread != nullptr)
39a36629
SM
1496 return true;
1497 }
372316f1 1498
39a36629 1499 return false;
372316f1
PA
1500}
1501
c0987663
YQ
1502/* Return true if thread represented by PTID is doing a displaced
1503 step. */
1504
1505static int
00431a78 1506displaced_step_in_progress_thread (thread_info *thread)
c0987663 1507{
00431a78 1508 gdb_assert (thread != NULL);
c0987663 1509
d20172fc 1510 return get_displaced_stepping_state (thread->inf)->step_thread == thread;
c0987663
YQ
1511}
1512
8f572e5c
PA
1513/* Return true if process PID has a thread doing a displaced step. */
1514
1515static int
00431a78 1516displaced_step_in_progress (inferior *inf)
8f572e5c 1517{
d20172fc 1518 return get_displaced_stepping_state (inf)->step_thread != nullptr;
fc1cf338
PA
1519}
1520
a42244db
YQ
1521/* If inferior is in displaced stepping, and ADDR equals to starting address
1522 of copy area, return corresponding displaced_step_closure. Otherwise,
1523 return NULL. */
1524
1525struct displaced_step_closure*
1526get_displaced_step_closure_by_addr (CORE_ADDR addr)
1527{
d20172fc 1528 displaced_step_inferior_state *displaced
00431a78 1529 = get_displaced_stepping_state (current_inferior ());
a42244db
YQ
1530
1531 /* If checking the mode of displaced instruction in copy area. */
d20172fc 1532 if (displaced->step_thread != nullptr
00431a78 1533 && displaced->step_copy == addr)
a42244db
YQ
1534 return displaced->step_closure;
1535
1536 return NULL;
1537}
1538
fc1cf338
PA
1539static void
1540infrun_inferior_exit (struct inferior *inf)
1541{
d20172fc 1542 inf->displaced_step_state.reset ();
fc1cf338 1543}
237fc4c9 1544
fff08868
HZ
1545/* If ON, and the architecture supports it, GDB will use displaced
1546 stepping to step over breakpoints. If OFF, or if the architecture
1547 doesn't support it, GDB will instead use the traditional
1548 hold-and-step approach. If AUTO (which is the default), GDB will
1549 decide which technique to use to step over breakpoints depending on
1550 which of all-stop or non-stop mode is active --- displaced stepping
1551 in non-stop mode; hold-and-step in all-stop mode. */
1552
72d0e2c5 1553static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
fff08868 1554
237fc4c9
PA
1555static void
1556show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1557 struct cmd_list_element *c,
1558 const char *value)
1559{
72d0e2c5 1560 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
3e43a32a
MS
1561 fprintf_filtered (file,
1562 _("Debugger's willingness to use displaced stepping "
1563 "to step over breakpoints is %s (currently %s).\n"),
fbea99ea 1564 value, target_is_non_stop_p () ? "on" : "off");
fff08868 1565 else
3e43a32a
MS
1566 fprintf_filtered (file,
1567 _("Debugger's willingness to use displaced stepping "
1568 "to step over breakpoints is %s.\n"), value);
237fc4c9
PA
1569}
1570
fff08868 1571/* Return non-zero if displaced stepping can/should be used to step
3fc8eb30 1572 over breakpoints of thread TP. */
fff08868 1573
237fc4c9 1574static int
3fc8eb30 1575use_displaced_stepping (struct thread_info *tp)
237fc4c9 1576{
00431a78 1577 struct regcache *regcache = get_thread_regcache (tp);
ac7936df 1578 struct gdbarch *gdbarch = regcache->arch ();
d20172fc
SM
1579 displaced_step_inferior_state *displaced_state
1580 = get_displaced_stepping_state (tp->inf);
3fc8eb30 1581
fbea99ea
PA
1582 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1583 && target_is_non_stop_p ())
72d0e2c5 1584 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
96429cc8 1585 && gdbarch_displaced_step_copy_insn_p (gdbarch)
3fc8eb30 1586 && find_record_target () == NULL
d20172fc 1587 && !displaced_state->failed_before);
237fc4c9
PA
1588}
1589
1590/* Clean out any stray displaced stepping state. */
1591static void
fc1cf338 1592displaced_step_clear (struct displaced_step_inferior_state *displaced)
237fc4c9
PA
1593{
1594 /* Indicate that there is no cleanup pending. */
00431a78 1595 displaced->step_thread = nullptr;
237fc4c9 1596
cfba9872 1597 delete displaced->step_closure;
6d45d4b4 1598 displaced->step_closure = NULL;
237fc4c9
PA
1599}
1600
9799571e
TT
1601/* A cleanup that wraps displaced_step_clear. */
1602using displaced_step_clear_cleanup
1603 = FORWARD_SCOPE_EXIT (displaced_step_clear);
237fc4c9
PA
1604
1605/* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1606void
1607displaced_step_dump_bytes (struct ui_file *file,
1608 const gdb_byte *buf,
1609 size_t len)
1610{
1611 int i;
1612
1613 for (i = 0; i < len; i++)
1614 fprintf_unfiltered (file, "%02x ", buf[i]);
1615 fputs_unfiltered ("\n", file);
1616}
1617
1618/* Prepare to single-step, using displaced stepping.
1619
1620 Note that we cannot use displaced stepping when we have a signal to
1621 deliver. If we have a signal to deliver and an instruction to step
1622 over, then after the step, there will be no indication from the
1623 target whether the thread entered a signal handler or ignored the
1624 signal and stepped over the instruction successfully --- both cases
1625 result in a simple SIGTRAP. In the first case we mustn't do a
1626 fixup, and in the second case we must --- but we can't tell which.
1627 Comments in the code for 'random signals' in handle_inferior_event
1628 explain how we handle this case instead.
1629
1630 Returns 1 if preparing was successful -- this thread is going to be
7f03bd92
PA
1631 stepped now; 0 if displaced stepping this thread got queued; or -1
1632 if this instruction can't be displaced stepped. */
1633
237fc4c9 1634static int
00431a78 1635displaced_step_prepare_throw (thread_info *tp)
237fc4c9 1636{
00431a78 1637 regcache *regcache = get_thread_regcache (tp);
ac7936df 1638 struct gdbarch *gdbarch = regcache->arch ();
8b86c959 1639 const address_space *aspace = regcache->aspace ();
237fc4c9
PA
1640 CORE_ADDR original, copy;
1641 ULONGEST len;
1642 struct displaced_step_closure *closure;
9e529e1d 1643 int status;
237fc4c9
PA
1644
1645 /* We should never reach this function if the architecture does not
1646 support displaced stepping. */
1647 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1648
c2829269
PA
1649 /* Nor if the thread isn't meant to step over a breakpoint. */
1650 gdb_assert (tp->control.trap_expected);
1651
c1e36e3e
PA
1652 /* Disable range stepping while executing in the scratch pad. We
1653 want a single-step even if executing the displaced instruction in
1654 the scratch buffer lands within the stepping range (e.g., a
1655 jump/branch). */
1656 tp->control.may_range_step = 0;
1657
fc1cf338
PA
1658 /* We have to displaced step one thread at a time, as we only have
1659 access to a single scratch space per inferior. */
237fc4c9 1660
d20172fc
SM
1661 displaced_step_inferior_state *displaced
1662 = get_displaced_stepping_state (tp->inf);
fc1cf338 1663
00431a78 1664 if (displaced->step_thread != nullptr)
237fc4c9
PA
1665 {
1666 /* Already waiting for a displaced step to finish. Defer this
1667 request and place in queue. */
237fc4c9
PA
1668
1669 if (debug_displaced)
1670 fprintf_unfiltered (gdb_stdlog,
c2829269 1671 "displaced: deferring step of %s\n",
a068643d 1672 target_pid_to_str (tp->ptid).c_str ());
237fc4c9 1673
c2829269 1674 thread_step_over_chain_enqueue (tp);
237fc4c9
PA
1675 return 0;
1676 }
1677 else
1678 {
1679 if (debug_displaced)
1680 fprintf_unfiltered (gdb_stdlog,
1681 "displaced: stepping %s now\n",
a068643d 1682 target_pid_to_str (tp->ptid).c_str ());
237fc4c9
PA
1683 }
1684
fc1cf338 1685 displaced_step_clear (displaced);
237fc4c9 1686
00431a78
PA
1687 scoped_restore_current_thread restore_thread;
1688
1689 switch_to_thread (tp);
ad53cd71 1690
515630c5 1691 original = regcache_read_pc (regcache);
237fc4c9
PA
1692
1693 copy = gdbarch_displaced_step_location (gdbarch);
1694 len = gdbarch_max_insn_length (gdbarch);
1695
d35ae833
PA
1696 if (breakpoint_in_range_p (aspace, copy, len))
1697 {
1698 /* There's a breakpoint set in the scratch pad location range
1699 (which is usually around the entry point). We'd either
1700 install it before resuming, which would overwrite/corrupt the
1701 scratch pad, or if it was already inserted, this displaced
1702 step would overwrite it. The latter is OK in the sense that
1703 we already assume that no thread is going to execute the code
1704 in the scratch pad range (after initial startup) anyway, but
1705 the former is unacceptable. Simply punt and fallback to
1706 stepping over this breakpoint in-line. */
1707 if (debug_displaced)
1708 {
1709 fprintf_unfiltered (gdb_stdlog,
1710 "displaced: breakpoint set in scratch pad. "
1711 "Stepping over breakpoint in-line instead.\n");
1712 }
1713
d35ae833
PA
1714 return -1;
1715 }
1716
237fc4c9 1717 /* Save the original contents of the copy area. */
d20172fc
SM
1718 displaced->step_saved_copy.resize (len);
1719 status = target_read_memory (copy, displaced->step_saved_copy.data (), len);
9e529e1d
JK
1720 if (status != 0)
1721 throw_error (MEMORY_ERROR,
1722 _("Error accessing memory address %s (%s) for "
1723 "displaced-stepping scratch space."),
1724 paddress (gdbarch, copy), safe_strerror (status));
237fc4c9
PA
1725 if (debug_displaced)
1726 {
5af949e3
UW
1727 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1728 paddress (gdbarch, copy));
fc1cf338 1729 displaced_step_dump_bytes (gdb_stdlog,
d20172fc 1730 displaced->step_saved_copy.data (),
fc1cf338 1731 len);
237fc4c9
PA
1732 };
1733
1734 closure = gdbarch_displaced_step_copy_insn (gdbarch,
ad53cd71 1735 original, copy, regcache);
7f03bd92
PA
1736 if (closure == NULL)
1737 {
1738 /* The architecture doesn't know how or want to displaced step
1739 this instruction or instruction sequence. Fallback to
1740 stepping over the breakpoint in-line. */
7f03bd92
PA
1741 return -1;
1742 }
237fc4c9 1743
9f5a595d
UW
1744 /* Save the information we need to fix things up if the step
1745 succeeds. */
00431a78 1746 displaced->step_thread = tp;
fc1cf338
PA
1747 displaced->step_gdbarch = gdbarch;
1748 displaced->step_closure = closure;
1749 displaced->step_original = original;
1750 displaced->step_copy = copy;
9f5a595d 1751
9799571e
TT
1752 {
1753 displaced_step_clear_cleanup cleanup (displaced);
237fc4c9 1754
9799571e
TT
1755 /* Resume execution at the copy. */
1756 regcache_write_pc (regcache, copy);
237fc4c9 1757
9799571e
TT
1758 cleanup.release ();
1759 }
ad53cd71 1760
237fc4c9 1761 if (debug_displaced)
5af949e3
UW
1762 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1763 paddress (gdbarch, copy));
237fc4c9 1764
237fc4c9
PA
1765 return 1;
1766}
1767
3fc8eb30
PA
1768/* Wrapper for displaced_step_prepare_throw that disabled further
1769 attempts at displaced stepping if we get a memory error. */
1770
1771static int
00431a78 1772displaced_step_prepare (thread_info *thread)
3fc8eb30
PA
1773{
1774 int prepared = -1;
1775
a70b8144 1776 try
3fc8eb30 1777 {
00431a78 1778 prepared = displaced_step_prepare_throw (thread);
3fc8eb30 1779 }
230d2906 1780 catch (const gdb_exception_error &ex)
3fc8eb30
PA
1781 {
1782 struct displaced_step_inferior_state *displaced_state;
1783
16b41842
PA
1784 if (ex.error != MEMORY_ERROR
1785 && ex.error != NOT_SUPPORTED_ERROR)
eedc3f4f 1786 throw;
3fc8eb30
PA
1787
1788 if (debug_infrun)
1789 {
1790 fprintf_unfiltered (gdb_stdlog,
1791 "infrun: disabling displaced stepping: %s\n",
3d6e9d23 1792 ex.what ());
3fc8eb30
PA
1793 }
1794
1795 /* Be verbose if "set displaced-stepping" is "on", silent if
1796 "auto". */
1797 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1798 {
fd7dcb94 1799 warning (_("disabling displaced stepping: %s"),
3d6e9d23 1800 ex.what ());
3fc8eb30
PA
1801 }
1802
1803 /* Disable further displaced stepping attempts. */
1804 displaced_state
00431a78 1805 = get_displaced_stepping_state (thread->inf);
3fc8eb30
PA
1806 displaced_state->failed_before = 1;
1807 }
3fc8eb30
PA
1808
1809 return prepared;
1810}
1811
237fc4c9 1812static void
3e43a32a
MS
1813write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1814 const gdb_byte *myaddr, int len)
237fc4c9 1815{
2989a365 1816 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
abbb1732 1817
237fc4c9
PA
1818 inferior_ptid = ptid;
1819 write_memory (memaddr, myaddr, len);
237fc4c9
PA
1820}
1821
e2d96639
YQ
1822/* Restore the contents of the copy area for thread PTID. */
1823
1824static void
1825displaced_step_restore (struct displaced_step_inferior_state *displaced,
1826 ptid_t ptid)
1827{
1828 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1829
1830 write_memory_ptid (ptid, displaced->step_copy,
d20172fc 1831 displaced->step_saved_copy.data (), len);
e2d96639
YQ
1832 if (debug_displaced)
1833 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
a068643d 1834 target_pid_to_str (ptid).c_str (),
e2d96639
YQ
1835 paddress (displaced->step_gdbarch,
1836 displaced->step_copy));
1837}
1838
372316f1
PA
1839/* If we displaced stepped an instruction successfully, adjust
1840 registers and memory to yield the same effect the instruction would
1841 have had if we had executed it at its original address, and return
1842 1. If the instruction didn't complete, relocate the PC and return
1843 -1. If the thread wasn't displaced stepping, return 0. */
1844
1845static int
00431a78 1846displaced_step_fixup (thread_info *event_thread, enum gdb_signal signal)
237fc4c9 1847{
fc1cf338 1848 struct displaced_step_inferior_state *displaced
00431a78 1849 = get_displaced_stepping_state (event_thread->inf);
372316f1 1850 int ret;
fc1cf338 1851
00431a78
PA
1852 /* Was this event for the thread we displaced? */
1853 if (displaced->step_thread != event_thread)
372316f1 1854 return 0;
237fc4c9 1855
9799571e 1856 displaced_step_clear_cleanup cleanup (displaced);
237fc4c9 1857
00431a78 1858 displaced_step_restore (displaced, displaced->step_thread->ptid);
237fc4c9 1859
cb71640d
PA
1860 /* Fixup may need to read memory/registers. Switch to the thread
1861 that we're fixing up. Also, target_stopped_by_watchpoint checks
1862 the current thread. */
00431a78 1863 switch_to_thread (event_thread);
cb71640d 1864
237fc4c9 1865 /* Did the instruction complete successfully? */
cb71640d
PA
1866 if (signal == GDB_SIGNAL_TRAP
1867 && !(target_stopped_by_watchpoint ()
1868 && (gdbarch_have_nonsteppable_watchpoint (displaced->step_gdbarch)
1869 || target_have_steppable_watchpoint)))
237fc4c9
PA
1870 {
1871 /* Fix up the resulting state. */
fc1cf338
PA
1872 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1873 displaced->step_closure,
1874 displaced->step_original,
1875 displaced->step_copy,
00431a78 1876 get_thread_regcache (displaced->step_thread));
372316f1 1877 ret = 1;
237fc4c9
PA
1878 }
1879 else
1880 {
1881 /* Since the instruction didn't complete, all we can do is
1882 relocate the PC. */
00431a78 1883 struct regcache *regcache = get_thread_regcache (event_thread);
515630c5 1884 CORE_ADDR pc = regcache_read_pc (regcache);
abbb1732 1885
fc1cf338 1886 pc = displaced->step_original + (pc - displaced->step_copy);
515630c5 1887 regcache_write_pc (regcache, pc);
372316f1 1888 ret = -1;
237fc4c9
PA
1889 }
1890
372316f1 1891 return ret;
c2829269 1892}
1c5cfe86 1893
4d9d9d04
PA
1894/* Data to be passed around while handling an event. This data is
1895 discarded between events. */
1896struct execution_control_state
1897{
1898 ptid_t ptid;
1899 /* The thread that got the event, if this was a thread event; NULL
1900 otherwise. */
1901 struct thread_info *event_thread;
1902
1903 struct target_waitstatus ws;
1904 int stop_func_filled_in;
1905 CORE_ADDR stop_func_start;
1906 CORE_ADDR stop_func_end;
1907 const char *stop_func_name;
1908 int wait_some_more;
1909
1910 /* True if the event thread hit the single-step breakpoint of
1911 another thread. Thus the event doesn't cause a stop, the thread
1912 needs to be single-stepped past the single-step breakpoint before
1913 we can switch back to the original stepping thread. */
1914 int hit_singlestep_breakpoint;
1915};
1916
1917/* Clear ECS and set it to point at TP. */
c2829269
PA
1918
1919static void
4d9d9d04
PA
1920reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1921{
1922 memset (ecs, 0, sizeof (*ecs));
1923 ecs->event_thread = tp;
1924 ecs->ptid = tp->ptid;
1925}
1926
1927static void keep_going_pass_signal (struct execution_control_state *ecs);
1928static void prepare_to_wait (struct execution_control_state *ecs);
2ac7589c 1929static int keep_going_stepped_thread (struct thread_info *tp);
8d297bbf 1930static step_over_what thread_still_needs_step_over (struct thread_info *tp);
4d9d9d04
PA
1931
1932/* Are there any pending step-over requests? If so, run all we can
1933 now and return true. Otherwise, return false. */
1934
1935static int
c2829269
PA
1936start_step_over (void)
1937{
1938 struct thread_info *tp, *next;
1939
372316f1
PA
1940 /* Don't start a new step-over if we already have an in-line
1941 step-over operation ongoing. */
1942 if (step_over_info_valid_p ())
1943 return 0;
1944
c2829269 1945 for (tp = step_over_queue_head; tp != NULL; tp = next)
237fc4c9 1946 {
4d9d9d04
PA
1947 struct execution_control_state ecss;
1948 struct execution_control_state *ecs = &ecss;
8d297bbf 1949 step_over_what step_what;
372316f1 1950 int must_be_in_line;
c2829269 1951
c65d6b55
PA
1952 gdb_assert (!tp->stop_requested);
1953
c2829269 1954 next = thread_step_over_chain_next (tp);
237fc4c9 1955
c2829269
PA
1956 /* If this inferior already has a displaced step in process,
1957 don't start a new one. */
00431a78 1958 if (displaced_step_in_progress (tp->inf))
c2829269
PA
1959 continue;
1960
372316f1
PA
1961 step_what = thread_still_needs_step_over (tp);
1962 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
1963 || ((step_what & STEP_OVER_BREAKPOINT)
3fc8eb30 1964 && !use_displaced_stepping (tp)));
372316f1
PA
1965
1966 /* We currently stop all threads of all processes to step-over
1967 in-line. If we need to start a new in-line step-over, let
1968 any pending displaced steps finish first. */
1969 if (must_be_in_line && displaced_step_in_progress_any_inferior ())
1970 return 0;
1971
c2829269
PA
1972 thread_step_over_chain_remove (tp);
1973
1974 if (step_over_queue_head == NULL)
1975 {
1976 if (debug_infrun)
1977 fprintf_unfiltered (gdb_stdlog,
1978 "infrun: step-over queue now empty\n");
1979 }
1980
372316f1
PA
1981 if (tp->control.trap_expected
1982 || tp->resumed
1983 || tp->executing)
ad53cd71 1984 {
4d9d9d04
PA
1985 internal_error (__FILE__, __LINE__,
1986 "[%s] has inconsistent state: "
372316f1 1987 "trap_expected=%d, resumed=%d, executing=%d\n",
a068643d 1988 target_pid_to_str (tp->ptid).c_str (),
4d9d9d04 1989 tp->control.trap_expected,
372316f1 1990 tp->resumed,
4d9d9d04 1991 tp->executing);
ad53cd71 1992 }
1c5cfe86 1993
4d9d9d04
PA
1994 if (debug_infrun)
1995 fprintf_unfiltered (gdb_stdlog,
1996 "infrun: resuming [%s] for step-over\n",
a068643d 1997 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04
PA
1998
1999 /* keep_going_pass_signal skips the step-over if the breakpoint
2000 is no longer inserted. In all-stop, we want to keep looking
2001 for a thread that needs a step-over instead of resuming TP,
2002 because we wouldn't be able to resume anything else until the
2003 target stops again. In non-stop, the resume always resumes
2004 only TP, so it's OK to let the thread resume freely. */
fbea99ea 2005 if (!target_is_non_stop_p () && !step_what)
4d9d9d04 2006 continue;
8550d3b3 2007
00431a78 2008 switch_to_thread (tp);
4d9d9d04
PA
2009 reset_ecs (ecs, tp);
2010 keep_going_pass_signal (ecs);
1c5cfe86 2011
4d9d9d04
PA
2012 if (!ecs->wait_some_more)
2013 error (_("Command aborted."));
1c5cfe86 2014
372316f1
PA
2015 gdb_assert (tp->resumed);
2016
2017 /* If we started a new in-line step-over, we're done. */
2018 if (step_over_info_valid_p ())
2019 {
2020 gdb_assert (tp->control.trap_expected);
2021 return 1;
2022 }
2023
fbea99ea 2024 if (!target_is_non_stop_p ())
4d9d9d04
PA
2025 {
2026 /* On all-stop, shouldn't have resumed unless we needed a
2027 step over. */
2028 gdb_assert (tp->control.trap_expected
2029 || tp->step_after_step_resume_breakpoint);
2030
2031 /* With remote targets (at least), in all-stop, we can't
2032 issue any further remote commands until the program stops
2033 again. */
2034 return 1;
1c5cfe86 2035 }
c2829269 2036
4d9d9d04
PA
2037 /* Either the thread no longer needed a step-over, or a new
2038 displaced stepping sequence started. Even in the latter
2039 case, continue looking. Maybe we can also start another
2040 displaced step on a thread of other process. */
237fc4c9 2041 }
4d9d9d04
PA
2042
2043 return 0;
237fc4c9
PA
2044}
2045
5231c1fd
PA
2046/* Update global variables holding ptids to hold NEW_PTID if they were
2047 holding OLD_PTID. */
2048static void
2049infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
2050{
d7e15655 2051 if (inferior_ptid == old_ptid)
5231c1fd 2052 inferior_ptid = new_ptid;
5231c1fd
PA
2053}
2054
237fc4c9 2055\f
c906108c 2056
53904c9e
AC
2057static const char schedlock_off[] = "off";
2058static const char schedlock_on[] = "on";
2059static const char schedlock_step[] = "step";
f2665db5 2060static const char schedlock_replay[] = "replay";
40478521 2061static const char *const scheduler_enums[] = {
ef346e04
AC
2062 schedlock_off,
2063 schedlock_on,
2064 schedlock_step,
f2665db5 2065 schedlock_replay,
ef346e04
AC
2066 NULL
2067};
f2665db5 2068static const char *scheduler_mode = schedlock_replay;
920d2a44
AC
2069static void
2070show_scheduler_mode (struct ui_file *file, int from_tty,
2071 struct cmd_list_element *c, const char *value)
2072{
3e43a32a
MS
2073 fprintf_filtered (file,
2074 _("Mode for locking scheduler "
2075 "during execution is \"%s\".\n"),
920d2a44
AC
2076 value);
2077}
c906108c
SS
2078
2079static void
eb4c3f4a 2080set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
c906108c 2081{
eefe576e
AC
2082 if (!target_can_lock_scheduler)
2083 {
2084 scheduler_mode = schedlock_off;
2085 error (_("Target '%s' cannot support this command."), target_shortname);
2086 }
c906108c
SS
2087}
2088
d4db2f36
PA
2089/* True if execution commands resume all threads of all processes by
2090 default; otherwise, resume only threads of the current inferior
2091 process. */
2092int sched_multi = 0;
2093
2facfe5c
DD
2094/* Try to setup for software single stepping over the specified location.
2095 Return 1 if target_resume() should use hardware single step.
2096
2097 GDBARCH the current gdbarch.
2098 PC the location to step over. */
2099
2100static int
2101maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2102{
2103 int hw_step = 1;
2104
f02253f1 2105 if (execution_direction == EXEC_FORWARD
93f9a11f
YQ
2106 && gdbarch_software_single_step_p (gdbarch))
2107 hw_step = !insert_single_step_breakpoints (gdbarch);
2108
2facfe5c
DD
2109 return hw_step;
2110}
c906108c 2111
f3263aa4
PA
2112/* See infrun.h. */
2113
09cee04b
PA
2114ptid_t
2115user_visible_resume_ptid (int step)
2116{
f3263aa4 2117 ptid_t resume_ptid;
09cee04b 2118
09cee04b
PA
2119 if (non_stop)
2120 {
2121 /* With non-stop mode on, threads are always handled
2122 individually. */
2123 resume_ptid = inferior_ptid;
2124 }
2125 else if ((scheduler_mode == schedlock_on)
03d46957 2126 || (scheduler_mode == schedlock_step && step))
09cee04b 2127 {
f3263aa4
PA
2128 /* User-settable 'scheduler' mode requires solo thread
2129 resume. */
09cee04b
PA
2130 resume_ptid = inferior_ptid;
2131 }
f2665db5
MM
2132 else if ((scheduler_mode == schedlock_replay)
2133 && target_record_will_replay (minus_one_ptid, execution_direction))
2134 {
2135 /* User-settable 'scheduler' mode requires solo thread resume in replay
2136 mode. */
2137 resume_ptid = inferior_ptid;
2138 }
f3263aa4
PA
2139 else if (!sched_multi && target_supports_multi_process ())
2140 {
2141 /* Resume all threads of the current process (and none of other
2142 processes). */
e99b03dc 2143 resume_ptid = ptid_t (inferior_ptid.pid ());
f3263aa4
PA
2144 }
2145 else
2146 {
2147 /* Resume all threads of all processes. */
2148 resume_ptid = RESUME_ALL;
2149 }
09cee04b
PA
2150
2151 return resume_ptid;
2152}
2153
fbea99ea
PA
2154/* Return a ptid representing the set of threads that we will resume,
2155 in the perspective of the target, assuming run control handling
2156 does not require leaving some threads stopped (e.g., stepping past
2157 breakpoint). USER_STEP indicates whether we're about to start the
2158 target for a stepping command. */
2159
2160static ptid_t
2161internal_resume_ptid (int user_step)
2162{
2163 /* In non-stop, we always control threads individually. Note that
2164 the target may always work in non-stop mode even with "set
2165 non-stop off", in which case user_visible_resume_ptid could
2166 return a wildcard ptid. */
2167 if (target_is_non_stop_p ())
2168 return inferior_ptid;
2169 else
2170 return user_visible_resume_ptid (user_step);
2171}
2172
64ce06e4
PA
2173/* Wrapper for target_resume, that handles infrun-specific
2174 bookkeeping. */
2175
2176static void
2177do_target_resume (ptid_t resume_ptid, int step, enum gdb_signal sig)
2178{
2179 struct thread_info *tp = inferior_thread ();
2180
c65d6b55
PA
2181 gdb_assert (!tp->stop_requested);
2182
64ce06e4 2183 /* Install inferior's terminal modes. */
223ffa71 2184 target_terminal::inferior ();
64ce06e4
PA
2185
2186 /* Avoid confusing the next resume, if the next stop/resume
2187 happens to apply to another thread. */
2188 tp->suspend.stop_signal = GDB_SIGNAL_0;
2189
8f572e5c
PA
2190 /* Advise target which signals may be handled silently.
2191
2192 If we have removed breakpoints because we are stepping over one
2193 in-line (in any thread), we need to receive all signals to avoid
2194 accidentally skipping a breakpoint during execution of a signal
2195 handler.
2196
2197 Likewise if we're displaced stepping, otherwise a trap for a
2198 breakpoint in a signal handler might be confused with the
2199 displaced step finishing. We don't make the displaced_step_fixup
2200 step distinguish the cases instead, because:
2201
2202 - a backtrace while stopped in the signal handler would show the
2203 scratch pad as frame older than the signal handler, instead of
2204 the real mainline code.
2205
2206 - when the thread is later resumed, the signal handler would
2207 return to the scratch pad area, which would no longer be
2208 valid. */
2209 if (step_over_info_valid_p ()
00431a78 2210 || displaced_step_in_progress (tp->inf))
adc6a863 2211 target_pass_signals ({});
64ce06e4 2212 else
adc6a863 2213 target_pass_signals (signal_pass);
64ce06e4
PA
2214
2215 target_resume (resume_ptid, step, sig);
85ad3aaf
PA
2216
2217 target_commit_resume ();
64ce06e4
PA
2218}
2219
d930703d 2220/* Resume the inferior. SIG is the signal to give the inferior
71d378ae
PA
2221 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2222 call 'resume', which handles exceptions. */
c906108c 2223
71d378ae
PA
2224static void
2225resume_1 (enum gdb_signal sig)
c906108c 2226{
515630c5 2227 struct regcache *regcache = get_current_regcache ();
ac7936df 2228 struct gdbarch *gdbarch = regcache->arch ();
4e1c45ea 2229 struct thread_info *tp = inferior_thread ();
515630c5 2230 CORE_ADDR pc = regcache_read_pc (regcache);
8b86c959 2231 const address_space *aspace = regcache->aspace ();
b0f16a3e 2232 ptid_t resume_ptid;
856e7dd6
PA
2233 /* This represents the user's step vs continue request. When
2234 deciding whether "set scheduler-locking step" applies, it's the
2235 user's intention that counts. */
2236 const int user_step = tp->control.stepping_command;
64ce06e4
PA
2237 /* This represents what we'll actually request the target to do.
2238 This can decay from a step to a continue, if e.g., we need to
2239 implement single-stepping with breakpoints (software
2240 single-step). */
6b403daa 2241 int step;
c7e8a53c 2242
c65d6b55 2243 gdb_assert (!tp->stop_requested);
c2829269
PA
2244 gdb_assert (!thread_is_in_step_over_chain (tp));
2245
372316f1
PA
2246 if (tp->suspend.waitstatus_pending_p)
2247 {
2248 if (debug_infrun)
2249 {
23fdd69e
SM
2250 std::string statstr
2251 = target_waitstatus_to_string (&tp->suspend.waitstatus);
372316f1 2252
372316f1 2253 fprintf_unfiltered (gdb_stdlog,
23fdd69e
SM
2254 "infrun: resume: thread %s has pending wait "
2255 "status %s (currently_stepping=%d).\n",
a068643d
TT
2256 target_pid_to_str (tp->ptid).c_str (),
2257 statstr.c_str (),
372316f1 2258 currently_stepping (tp));
372316f1
PA
2259 }
2260
2261 tp->resumed = 1;
2262
2263 /* FIXME: What should we do if we are supposed to resume this
2264 thread with a signal? Maybe we should maintain a queue of
2265 pending signals to deliver. */
2266 if (sig != GDB_SIGNAL_0)
2267 {
fd7dcb94 2268 warning (_("Couldn't deliver signal %s to %s."),
a068643d
TT
2269 gdb_signal_to_name (sig),
2270 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2271 }
2272
2273 tp->suspend.stop_signal = GDB_SIGNAL_0;
372316f1
PA
2274
2275 if (target_can_async_p ())
9516f85a
AB
2276 {
2277 target_async (1);
2278 /* Tell the event loop we have an event to process. */
2279 mark_async_event_handler (infrun_async_inferior_event_token);
2280 }
372316f1
PA
2281 return;
2282 }
2283
2284 tp->stepped_breakpoint = 0;
2285
6b403daa
PA
2286 /* Depends on stepped_breakpoint. */
2287 step = currently_stepping (tp);
2288
74609e71
YQ
2289 if (current_inferior ()->waiting_for_vfork_done)
2290 {
48f9886d
PA
2291 /* Don't try to single-step a vfork parent that is waiting for
2292 the child to get out of the shared memory region (by exec'ing
2293 or exiting). This is particularly important on software
2294 single-step archs, as the child process would trip on the
2295 software single step breakpoint inserted for the parent
2296 process. Since the parent will not actually execute any
2297 instruction until the child is out of the shared region (such
2298 are vfork's semantics), it is safe to simply continue it.
2299 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2300 the parent, and tell it to `keep_going', which automatically
2301 re-sets it stepping. */
74609e71
YQ
2302 if (debug_infrun)
2303 fprintf_unfiltered (gdb_stdlog,
2304 "infrun: resume : clear step\n");
a09dd441 2305 step = 0;
74609e71
YQ
2306 }
2307
527159b7 2308 if (debug_infrun)
237fc4c9 2309 fprintf_unfiltered (gdb_stdlog,
c9737c08 2310 "infrun: resume (step=%d, signal=%s), "
0d9a9a5f 2311 "trap_expected=%d, current thread [%s] at %s\n",
c9737c08
PA
2312 step, gdb_signal_to_symbol_string (sig),
2313 tp->control.trap_expected,
a068643d 2314 target_pid_to_str (inferior_ptid).c_str (),
0d9a9a5f 2315 paddress (gdbarch, pc));
c906108c 2316
c2c6d25f
JM
2317 /* Normally, by the time we reach `resume', the breakpoints are either
2318 removed or inserted, as appropriate. The exception is if we're sitting
2319 at a permanent breakpoint; we need to step over it, but permanent
2320 breakpoints can't be removed. So we have to test for it here. */
6c95b8df 2321 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
6d350bb5 2322 {
af48d08f
PA
2323 if (sig != GDB_SIGNAL_0)
2324 {
2325 /* We have a signal to pass to the inferior. The resume
2326 may, or may not take us to the signal handler. If this
2327 is a step, we'll need to stop in the signal handler, if
2328 there's one, (if the target supports stepping into
2329 handlers), or in the next mainline instruction, if
2330 there's no handler. If this is a continue, we need to be
2331 sure to run the handler with all breakpoints inserted.
2332 In all cases, set a breakpoint at the current address
2333 (where the handler returns to), and once that breakpoint
2334 is hit, resume skipping the permanent breakpoint. If
2335 that breakpoint isn't hit, then we've stepped into the
2336 signal handler (or hit some other event). We'll delete
2337 the step-resume breakpoint then. */
2338
2339 if (debug_infrun)
2340 fprintf_unfiltered (gdb_stdlog,
2341 "infrun: resume: skipping permanent breakpoint, "
2342 "deliver signal first\n");
2343
2344 clear_step_over_info ();
2345 tp->control.trap_expected = 0;
2346
2347 if (tp->control.step_resume_breakpoint == NULL)
2348 {
2349 /* Set a "high-priority" step-resume, as we don't want
2350 user breakpoints at PC to trigger (again) when this
2351 hits. */
2352 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2353 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2354
2355 tp->step_after_step_resume_breakpoint = step;
2356 }
2357
2358 insert_breakpoints ();
2359 }
2360 else
2361 {
2362 /* There's no signal to pass, we can go ahead and skip the
2363 permanent breakpoint manually. */
2364 if (debug_infrun)
2365 fprintf_unfiltered (gdb_stdlog,
2366 "infrun: resume: skipping permanent breakpoint\n");
2367 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2368 /* Update pc to reflect the new address from which we will
2369 execute instructions. */
2370 pc = regcache_read_pc (regcache);
2371
2372 if (step)
2373 {
2374 /* We've already advanced the PC, so the stepping part
2375 is done. Now we need to arrange for a trap to be
2376 reported to handle_inferior_event. Set a breakpoint
2377 at the current PC, and run to it. Don't update
2378 prev_pc, because if we end in
44a1ee51
PA
2379 switch_back_to_stepped_thread, we want the "expected
2380 thread advanced also" branch to be taken. IOW, we
2381 don't want this thread to step further from PC
af48d08f 2382 (overstep). */
1ac806b8 2383 gdb_assert (!step_over_info_valid_p ());
af48d08f
PA
2384 insert_single_step_breakpoint (gdbarch, aspace, pc);
2385 insert_breakpoints ();
2386
fbea99ea 2387 resume_ptid = internal_resume_ptid (user_step);
1ac806b8 2388 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
372316f1 2389 tp->resumed = 1;
af48d08f
PA
2390 return;
2391 }
2392 }
6d350bb5 2393 }
c2c6d25f 2394
c1e36e3e
PA
2395 /* If we have a breakpoint to step over, make sure to do a single
2396 step only. Same if we have software watchpoints. */
2397 if (tp->control.trap_expected || bpstat_should_step ())
2398 tp->control.may_range_step = 0;
2399
237fc4c9
PA
2400 /* If enabled, step over breakpoints by executing a copy of the
2401 instruction at a different address.
2402
2403 We can't use displaced stepping when we have a signal to deliver;
2404 the comments for displaced_step_prepare explain why. The
2405 comments in the handle_inferior event for dealing with 'random
74609e71
YQ
2406 signals' explain what we do instead.
2407
2408 We can't use displaced stepping when we are waiting for vfork_done
2409 event, displaced stepping breaks the vfork child similarly as single
2410 step software breakpoint. */
3fc8eb30
PA
2411 if (tp->control.trap_expected
2412 && use_displaced_stepping (tp)
cb71640d 2413 && !step_over_info_valid_p ()
a493e3e2 2414 && sig == GDB_SIGNAL_0
74609e71 2415 && !current_inferior ()->waiting_for_vfork_done)
237fc4c9 2416 {
00431a78 2417 int prepared = displaced_step_prepare (tp);
fc1cf338 2418
3fc8eb30 2419 if (prepared == 0)
d56b7306 2420 {
4d9d9d04
PA
2421 if (debug_infrun)
2422 fprintf_unfiltered (gdb_stdlog,
2423 "Got placed in step-over queue\n");
2424
2425 tp->control.trap_expected = 0;
d56b7306
VP
2426 return;
2427 }
3fc8eb30
PA
2428 else if (prepared < 0)
2429 {
2430 /* Fallback to stepping over the breakpoint in-line. */
2431
2432 if (target_is_non_stop_p ())
2433 stop_all_threads ();
2434
a01bda52 2435 set_step_over_info (regcache->aspace (),
21edc42f 2436 regcache_read_pc (regcache), 0, tp->global_num);
3fc8eb30
PA
2437
2438 step = maybe_software_singlestep (gdbarch, pc);
2439
2440 insert_breakpoints ();
2441 }
2442 else if (prepared > 0)
2443 {
2444 struct displaced_step_inferior_state *displaced;
99e40580 2445
3fc8eb30
PA
2446 /* Update pc to reflect the new address from which we will
2447 execute instructions due to displaced stepping. */
00431a78 2448 pc = regcache_read_pc (get_thread_regcache (tp));
ca7781d2 2449
00431a78 2450 displaced = get_displaced_stepping_state (tp->inf);
3fc8eb30
PA
2451 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
2452 displaced->step_closure);
2453 }
237fc4c9
PA
2454 }
2455
2facfe5c 2456 /* Do we need to do it the hard way, w/temp breakpoints? */
99e40580 2457 else if (step)
2facfe5c 2458 step = maybe_software_singlestep (gdbarch, pc);
c906108c 2459
30852783
UW
2460 /* Currently, our software single-step implementation leads to different
2461 results than hardware single-stepping in one situation: when stepping
2462 into delivering a signal which has an associated signal handler,
2463 hardware single-step will stop at the first instruction of the handler,
2464 while software single-step will simply skip execution of the handler.
2465
2466 For now, this difference in behavior is accepted since there is no
2467 easy way to actually implement single-stepping into a signal handler
2468 without kernel support.
2469
2470 However, there is one scenario where this difference leads to follow-on
2471 problems: if we're stepping off a breakpoint by removing all breakpoints
2472 and then single-stepping. In this case, the software single-step
2473 behavior means that even if there is a *breakpoint* in the signal
2474 handler, GDB still would not stop.
2475
2476 Fortunately, we can at least fix this particular issue. We detect
2477 here the case where we are about to deliver a signal while software
2478 single-stepping with breakpoints removed. In this situation, we
2479 revert the decisions to remove all breakpoints and insert single-
2480 step breakpoints, and instead we install a step-resume breakpoint
2481 at the current address, deliver the signal without stepping, and
2482 once we arrive back at the step-resume breakpoint, actually step
2483 over the breakpoint we originally wanted to step over. */
34b7e8a6 2484 if (thread_has_single_step_breakpoints_set (tp)
6cc83d2a
PA
2485 && sig != GDB_SIGNAL_0
2486 && step_over_info_valid_p ())
30852783
UW
2487 {
2488 /* If we have nested signals or a pending signal is delivered
2489 immediately after a handler returns, might might already have
2490 a step-resume breakpoint set on the earlier handler. We cannot
2491 set another step-resume breakpoint; just continue on until the
2492 original breakpoint is hit. */
2493 if (tp->control.step_resume_breakpoint == NULL)
2494 {
2c03e5be 2495 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
30852783
UW
2496 tp->step_after_step_resume_breakpoint = 1;
2497 }
2498
34b7e8a6 2499 delete_single_step_breakpoints (tp);
30852783 2500
31e77af2 2501 clear_step_over_info ();
30852783 2502 tp->control.trap_expected = 0;
31e77af2
PA
2503
2504 insert_breakpoints ();
30852783
UW
2505 }
2506
b0f16a3e
SM
2507 /* If STEP is set, it's a request to use hardware stepping
2508 facilities. But in that case, we should never
2509 use singlestep breakpoint. */
34b7e8a6 2510 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
dfcd3bfb 2511
fbea99ea 2512 /* Decide the set of threads to ask the target to resume. */
1946c4cc 2513 if (tp->control.trap_expected)
b0f16a3e
SM
2514 {
2515 /* We're allowing a thread to run past a breakpoint it has
1946c4cc
YQ
2516 hit, either by single-stepping the thread with the breakpoint
2517 removed, or by displaced stepping, with the breakpoint inserted.
2518 In the former case, we need to single-step only this thread,
2519 and keep others stopped, as they can miss this breakpoint if
2520 allowed to run. That's not really a problem for displaced
2521 stepping, but, we still keep other threads stopped, in case
2522 another thread is also stopped for a breakpoint waiting for
2523 its turn in the displaced stepping queue. */
b0f16a3e
SM
2524 resume_ptid = inferior_ptid;
2525 }
fbea99ea
PA
2526 else
2527 resume_ptid = internal_resume_ptid (user_step);
d4db2f36 2528
7f5ef605
PA
2529 if (execution_direction != EXEC_REVERSE
2530 && step && breakpoint_inserted_here_p (aspace, pc))
b0f16a3e 2531 {
372316f1
PA
2532 /* There are two cases where we currently need to step a
2533 breakpoint instruction when we have a signal to deliver:
2534
2535 - See handle_signal_stop where we handle random signals that
2536 could take out us out of the stepping range. Normally, in
2537 that case we end up continuing (instead of stepping) over the
7f5ef605
PA
2538 signal handler with a breakpoint at PC, but there are cases
2539 where we should _always_ single-step, even if we have a
2540 step-resume breakpoint, like when a software watchpoint is
2541 set. Assuming single-stepping and delivering a signal at the
2542 same time would takes us to the signal handler, then we could
2543 have removed the breakpoint at PC to step over it. However,
2544 some hardware step targets (like e.g., Mac OS) can't step
2545 into signal handlers, and for those, we need to leave the
2546 breakpoint at PC inserted, as otherwise if the handler
2547 recurses and executes PC again, it'll miss the breakpoint.
2548 So we leave the breakpoint inserted anyway, but we need to
2549 record that we tried to step a breakpoint instruction, so
372316f1
PA
2550 that adjust_pc_after_break doesn't end up confused.
2551
2552 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2553 in one thread after another thread that was stepping had been
2554 momentarily paused for a step-over. When we re-resume the
2555 stepping thread, it may be resumed from that address with a
2556 breakpoint that hasn't trapped yet. Seen with
2557 gdb.threads/non-stop-fair-events.exp, on targets that don't
2558 do displaced stepping. */
2559
2560 if (debug_infrun)
2561 fprintf_unfiltered (gdb_stdlog,
2562 "infrun: resume: [%s] stepped breakpoint\n",
a068643d 2563 target_pid_to_str (tp->ptid).c_str ());
7f5ef605
PA
2564
2565 tp->stepped_breakpoint = 1;
2566
b0f16a3e
SM
2567 /* Most targets can step a breakpoint instruction, thus
2568 executing it normally. But if this one cannot, just
2569 continue and we will hit it anyway. */
7f5ef605 2570 if (gdbarch_cannot_step_breakpoint (gdbarch))
b0f16a3e
SM
2571 step = 0;
2572 }
ef5cf84e 2573
b0f16a3e 2574 if (debug_displaced
cb71640d 2575 && tp->control.trap_expected
3fc8eb30 2576 && use_displaced_stepping (tp)
cb71640d 2577 && !step_over_info_valid_p ())
b0f16a3e 2578 {
00431a78 2579 struct regcache *resume_regcache = get_thread_regcache (tp);
ac7936df 2580 struct gdbarch *resume_gdbarch = resume_regcache->arch ();
b0f16a3e
SM
2581 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2582 gdb_byte buf[4];
2583
2584 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2585 paddress (resume_gdbarch, actual_pc));
2586 read_memory (actual_pc, buf, sizeof (buf));
2587 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2588 }
237fc4c9 2589
b0f16a3e
SM
2590 if (tp->control.may_range_step)
2591 {
2592 /* If we're resuming a thread with the PC out of the step
2593 range, then we're doing some nested/finer run control
2594 operation, like stepping the thread out of the dynamic
2595 linker or the displaced stepping scratch pad. We
2596 shouldn't have allowed a range step then. */
2597 gdb_assert (pc_in_thread_step_range (pc, tp));
2598 }
c1e36e3e 2599
64ce06e4 2600 do_target_resume (resume_ptid, step, sig);
372316f1 2601 tp->resumed = 1;
c906108c 2602}
71d378ae
PA
2603
2604/* Resume the inferior. SIG is the signal to give the inferior
2605 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2606 rolls back state on error. */
2607
aff4e175 2608static void
71d378ae
PA
2609resume (gdb_signal sig)
2610{
a70b8144 2611 try
71d378ae
PA
2612 {
2613 resume_1 (sig);
2614 }
230d2906 2615 catch (const gdb_exception &ex)
71d378ae
PA
2616 {
2617 /* If resuming is being aborted for any reason, delete any
2618 single-step breakpoint resume_1 may have created, to avoid
2619 confusing the following resumption, and to avoid leaving
2620 single-step breakpoints perturbing other threads, in case
2621 we're running in non-stop mode. */
2622 if (inferior_ptid != null_ptid)
2623 delete_single_step_breakpoints (inferior_thread ());
eedc3f4f 2624 throw;
71d378ae 2625 }
71d378ae
PA
2626}
2627
c906108c 2628\f
237fc4c9 2629/* Proceeding. */
c906108c 2630
4c2f2a79
PA
2631/* See infrun.h. */
2632
2633/* Counter that tracks number of user visible stops. This can be used
2634 to tell whether a command has proceeded the inferior past the
2635 current location. This allows e.g., inferior function calls in
2636 breakpoint commands to not interrupt the command list. When the
2637 call finishes successfully, the inferior is standing at the same
2638 breakpoint as if nothing happened (and so we don't call
2639 normal_stop). */
2640static ULONGEST current_stop_id;
2641
2642/* See infrun.h. */
2643
2644ULONGEST
2645get_stop_id (void)
2646{
2647 return current_stop_id;
2648}
2649
2650/* Called when we report a user visible stop. */
2651
2652static void
2653new_stop_id (void)
2654{
2655 current_stop_id++;
2656}
2657
c906108c
SS
2658/* Clear out all variables saying what to do when inferior is continued.
2659 First do this, then set the ones you want, then call `proceed'. */
2660
a7212384
UW
2661static void
2662clear_proceed_status_thread (struct thread_info *tp)
c906108c 2663{
a7212384
UW
2664 if (debug_infrun)
2665 fprintf_unfiltered (gdb_stdlog,
2666 "infrun: clear_proceed_status_thread (%s)\n",
a068643d 2667 target_pid_to_str (tp->ptid).c_str ());
d6b48e9c 2668
372316f1
PA
2669 /* If we're starting a new sequence, then the previous finished
2670 single-step is no longer relevant. */
2671 if (tp->suspend.waitstatus_pending_p)
2672 {
2673 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
2674 {
2675 if (debug_infrun)
2676 fprintf_unfiltered (gdb_stdlog,
2677 "infrun: clear_proceed_status: pending "
2678 "event of %s was a finished step. "
2679 "Discarding.\n",
a068643d 2680 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2681
2682 tp->suspend.waitstatus_pending_p = 0;
2683 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
2684 }
2685 else if (debug_infrun)
2686 {
23fdd69e
SM
2687 std::string statstr
2688 = target_waitstatus_to_string (&tp->suspend.waitstatus);
372316f1 2689
372316f1
PA
2690 fprintf_unfiltered (gdb_stdlog,
2691 "infrun: clear_proceed_status_thread: thread %s "
2692 "has pending wait status %s "
2693 "(currently_stepping=%d).\n",
a068643d
TT
2694 target_pid_to_str (tp->ptid).c_str (),
2695 statstr.c_str (),
372316f1 2696 currently_stepping (tp));
372316f1
PA
2697 }
2698 }
2699
70509625
PA
2700 /* If this signal should not be seen by program, give it zero.
2701 Used for debugging signals. */
2702 if (!signal_pass_state (tp->suspend.stop_signal))
2703 tp->suspend.stop_signal = GDB_SIGNAL_0;
2704
46e3ed7f 2705 delete tp->thread_fsm;
243a9253
PA
2706 tp->thread_fsm = NULL;
2707
16c381f0
JK
2708 tp->control.trap_expected = 0;
2709 tp->control.step_range_start = 0;
2710 tp->control.step_range_end = 0;
c1e36e3e 2711 tp->control.may_range_step = 0;
16c381f0
JK
2712 tp->control.step_frame_id = null_frame_id;
2713 tp->control.step_stack_frame_id = null_frame_id;
2714 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
885eeb5b 2715 tp->control.step_start_function = NULL;
a7212384 2716 tp->stop_requested = 0;
4e1c45ea 2717
16c381f0 2718 tp->control.stop_step = 0;
32400beb 2719
16c381f0 2720 tp->control.proceed_to_finish = 0;
414c69f7 2721
856e7dd6 2722 tp->control.stepping_command = 0;
17b2616c 2723
a7212384 2724 /* Discard any remaining commands or status from previous stop. */
16c381f0 2725 bpstat_clear (&tp->control.stop_bpstat);
a7212384 2726}
32400beb 2727
a7212384 2728void
70509625 2729clear_proceed_status (int step)
a7212384 2730{
f2665db5
MM
2731 /* With scheduler-locking replay, stop replaying other threads if we're
2732 not replaying the user-visible resume ptid.
2733
2734 This is a convenience feature to not require the user to explicitly
2735 stop replaying the other threads. We're assuming that the user's
2736 intent is to resume tracing the recorded process. */
2737 if (!non_stop && scheduler_mode == schedlock_replay
2738 && target_record_is_replaying (minus_one_ptid)
2739 && !target_record_will_replay (user_visible_resume_ptid (step),
2740 execution_direction))
2741 target_record_stop_replaying ();
2742
08036331 2743 if (!non_stop && inferior_ptid != null_ptid)
6c95b8df 2744 {
08036331 2745 ptid_t resume_ptid = user_visible_resume_ptid (step);
70509625
PA
2746
2747 /* In all-stop mode, delete the per-thread status of all threads
2748 we're about to resume, implicitly and explicitly. */
08036331
PA
2749 for (thread_info *tp : all_non_exited_threads (resume_ptid))
2750 clear_proceed_status_thread (tp);
6c95b8df
PA
2751 }
2752
d7e15655 2753 if (inferior_ptid != null_ptid)
a7212384
UW
2754 {
2755 struct inferior *inferior;
2756
2757 if (non_stop)
2758 {
6c95b8df
PA
2759 /* If in non-stop mode, only delete the per-thread status of
2760 the current thread. */
a7212384
UW
2761 clear_proceed_status_thread (inferior_thread ());
2762 }
6c95b8df 2763
d6b48e9c 2764 inferior = current_inferior ();
16c381f0 2765 inferior->control.stop_soon = NO_STOP_QUIETLY;
4e1c45ea
PA
2766 }
2767
76727919 2768 gdb::observers::about_to_proceed.notify ();
c906108c
SS
2769}
2770
99619bea
PA
2771/* Returns true if TP is still stopped at a breakpoint that needs
2772 stepping-over in order to make progress. If the breakpoint is gone
2773 meanwhile, we can skip the whole step-over dance. */
ea67f13b
DJ
2774
2775static int
6c4cfb24 2776thread_still_needs_step_over_bp (struct thread_info *tp)
99619bea
PA
2777{
2778 if (tp->stepping_over_breakpoint)
2779 {
00431a78 2780 struct regcache *regcache = get_thread_regcache (tp);
99619bea 2781
a01bda52 2782 if (breakpoint_here_p (regcache->aspace (),
af48d08f
PA
2783 regcache_read_pc (regcache))
2784 == ordinary_breakpoint_here)
99619bea
PA
2785 return 1;
2786
2787 tp->stepping_over_breakpoint = 0;
2788 }
2789
2790 return 0;
2791}
2792
6c4cfb24
PA
2793/* Check whether thread TP still needs to start a step-over in order
2794 to make progress when resumed. Returns an bitwise or of enum
2795 step_over_what bits, indicating what needs to be stepped over. */
2796
8d297bbf 2797static step_over_what
6c4cfb24
PA
2798thread_still_needs_step_over (struct thread_info *tp)
2799{
8d297bbf 2800 step_over_what what = 0;
6c4cfb24
PA
2801
2802 if (thread_still_needs_step_over_bp (tp))
2803 what |= STEP_OVER_BREAKPOINT;
2804
2805 if (tp->stepping_over_watchpoint
2806 && !target_have_steppable_watchpoint)
2807 what |= STEP_OVER_WATCHPOINT;
2808
2809 return what;
2810}
2811
483805cf
PA
2812/* Returns true if scheduler locking applies. STEP indicates whether
2813 we're about to do a step/next-like command to a thread. */
2814
2815static int
856e7dd6 2816schedlock_applies (struct thread_info *tp)
483805cf
PA
2817{
2818 return (scheduler_mode == schedlock_on
2819 || (scheduler_mode == schedlock_step
f2665db5
MM
2820 && tp->control.stepping_command)
2821 || (scheduler_mode == schedlock_replay
2822 && target_record_will_replay (minus_one_ptid,
2823 execution_direction)));
483805cf
PA
2824}
2825
c906108c
SS
2826/* Basic routine for continuing the program in various fashions.
2827
2828 ADDR is the address to resume at, or -1 for resume where stopped.
aff4e175
AB
2829 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
2830 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
c906108c
SS
2831
2832 You should call clear_proceed_status before calling proceed. */
2833
2834void
64ce06e4 2835proceed (CORE_ADDR addr, enum gdb_signal siggnal)
c906108c 2836{
e58b0e63
PA
2837 struct regcache *regcache;
2838 struct gdbarch *gdbarch;
e58b0e63 2839 CORE_ADDR pc;
4d9d9d04
PA
2840 ptid_t resume_ptid;
2841 struct execution_control_state ecss;
2842 struct execution_control_state *ecs = &ecss;
4d9d9d04 2843 int started;
c906108c 2844
e58b0e63
PA
2845 /* If we're stopped at a fork/vfork, follow the branch set by the
2846 "set follow-fork-mode" command; otherwise, we'll just proceed
2847 resuming the current thread. */
2848 if (!follow_fork ())
2849 {
2850 /* The target for some reason decided not to resume. */
2851 normal_stop ();
f148b27e
PA
2852 if (target_can_async_p ())
2853 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
e58b0e63
PA
2854 return;
2855 }
2856
842951eb
PA
2857 /* We'll update this if & when we switch to a new thread. */
2858 previous_inferior_ptid = inferior_ptid;
2859
e58b0e63 2860 regcache = get_current_regcache ();
ac7936df 2861 gdbarch = regcache->arch ();
8b86c959
YQ
2862 const address_space *aspace = regcache->aspace ();
2863
e58b0e63 2864 pc = regcache_read_pc (regcache);
08036331 2865 thread_info *cur_thr = inferior_thread ();
e58b0e63 2866
99619bea 2867 /* Fill in with reasonable starting values. */
08036331 2868 init_thread_stepping_state (cur_thr);
99619bea 2869
08036331 2870 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
c2829269 2871
2acceee2 2872 if (addr == (CORE_ADDR) -1)
c906108c 2873 {
08036331 2874 if (pc == cur_thr->suspend.stop_pc
af48d08f 2875 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
b2175913 2876 && execution_direction != EXEC_REVERSE)
3352ef37
AC
2877 /* There is a breakpoint at the address we will resume at,
2878 step one instruction before inserting breakpoints so that
2879 we do not stop right away (and report a second hit at this
b2175913
MS
2880 breakpoint).
2881
2882 Note, we don't do this in reverse, because we won't
2883 actually be executing the breakpoint insn anyway.
2884 We'll be (un-)executing the previous instruction. */
08036331 2885 cur_thr->stepping_over_breakpoint = 1;
515630c5
UW
2886 else if (gdbarch_single_step_through_delay_p (gdbarch)
2887 && gdbarch_single_step_through_delay (gdbarch,
2888 get_current_frame ()))
3352ef37
AC
2889 /* We stepped onto an instruction that needs to be stepped
2890 again before re-inserting the breakpoint, do so. */
08036331 2891 cur_thr->stepping_over_breakpoint = 1;
c906108c
SS
2892 }
2893 else
2894 {
515630c5 2895 regcache_write_pc (regcache, addr);
c906108c
SS
2896 }
2897
70509625 2898 if (siggnal != GDB_SIGNAL_DEFAULT)
08036331 2899 cur_thr->suspend.stop_signal = siggnal;
70509625 2900
08036331 2901 resume_ptid = user_visible_resume_ptid (cur_thr->control.stepping_command);
4d9d9d04
PA
2902
2903 /* If an exception is thrown from this point on, make sure to
2904 propagate GDB's knowledge of the executing state to the
2905 frontend/user running state. */
731f534f 2906 scoped_finish_thread_state finish_state (resume_ptid);
4d9d9d04
PA
2907
2908 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
2909 threads (e.g., we might need to set threads stepping over
2910 breakpoints first), from the user/frontend's point of view, all
2911 threads in RESUME_PTID are now running. Unless we're calling an
2912 inferior function, as in that case we pretend the inferior
2913 doesn't run at all. */
08036331 2914 if (!cur_thr->control.in_infcall)
4d9d9d04 2915 set_running (resume_ptid, 1);
17b2616c 2916
527159b7 2917 if (debug_infrun)
8a9de0e4 2918 fprintf_unfiltered (gdb_stdlog,
64ce06e4 2919 "infrun: proceed (addr=%s, signal=%s)\n",
c9737c08 2920 paddress (gdbarch, addr),
64ce06e4 2921 gdb_signal_to_symbol_string (siggnal));
527159b7 2922
4d9d9d04
PA
2923 annotate_starting ();
2924
2925 /* Make sure that output from GDB appears before output from the
2926 inferior. */
2927 gdb_flush (gdb_stdout);
2928
d930703d
PA
2929 /* Since we've marked the inferior running, give it the terminal. A
2930 QUIT/Ctrl-C from here on is forwarded to the target (which can
2931 still detect attempts to unblock a stuck connection with repeated
2932 Ctrl-C from within target_pass_ctrlc). */
2933 target_terminal::inferior ();
2934
4d9d9d04
PA
2935 /* In a multi-threaded task we may select another thread and
2936 then continue or step.
2937
2938 But if a thread that we're resuming had stopped at a breakpoint,
2939 it will immediately cause another breakpoint stop without any
2940 execution (i.e. it will report a breakpoint hit incorrectly). So
2941 we must step over it first.
2942
2943 Look for threads other than the current (TP) that reported a
2944 breakpoint hit and haven't been resumed yet since. */
2945
2946 /* If scheduler locking applies, we can avoid iterating over all
2947 threads. */
08036331 2948 if (!non_stop && !schedlock_applies (cur_thr))
94cc34af 2949 {
08036331
PA
2950 for (thread_info *tp : all_non_exited_threads (resume_ptid))
2951 {
4d9d9d04
PA
2952 /* Ignore the current thread here. It's handled
2953 afterwards. */
08036331 2954 if (tp == cur_thr)
4d9d9d04 2955 continue;
c906108c 2956
4d9d9d04
PA
2957 if (!thread_still_needs_step_over (tp))
2958 continue;
2959
2960 gdb_assert (!thread_is_in_step_over_chain (tp));
c906108c 2961
99619bea
PA
2962 if (debug_infrun)
2963 fprintf_unfiltered (gdb_stdlog,
2964 "infrun: need to step-over [%s] first\n",
a068643d 2965 target_pid_to_str (tp->ptid).c_str ());
99619bea 2966
4d9d9d04 2967 thread_step_over_chain_enqueue (tp);
2adfaa28 2968 }
30852783
UW
2969 }
2970
4d9d9d04
PA
2971 /* Enqueue the current thread last, so that we move all other
2972 threads over their breakpoints first. */
08036331
PA
2973 if (cur_thr->stepping_over_breakpoint)
2974 thread_step_over_chain_enqueue (cur_thr);
30852783 2975
4d9d9d04
PA
2976 /* If the thread isn't started, we'll still need to set its prev_pc,
2977 so that switch_back_to_stepped_thread knows the thread hasn't
2978 advanced. Must do this before resuming any thread, as in
2979 all-stop/remote, once we resume we can't send any other packet
2980 until the target stops again. */
08036331 2981 cur_thr->prev_pc = regcache_read_pc (regcache);
99619bea 2982
a9bc57b9
TT
2983 {
2984 scoped_restore save_defer_tc = make_scoped_defer_target_commit_resume ();
85ad3aaf 2985
a9bc57b9 2986 started = start_step_over ();
c906108c 2987
a9bc57b9
TT
2988 if (step_over_info_valid_p ())
2989 {
2990 /* Either this thread started a new in-line step over, or some
2991 other thread was already doing one. In either case, don't
2992 resume anything else until the step-over is finished. */
2993 }
2994 else if (started && !target_is_non_stop_p ())
2995 {
2996 /* A new displaced stepping sequence was started. In all-stop,
2997 we can't talk to the target anymore until it next stops. */
2998 }
2999 else if (!non_stop && target_is_non_stop_p ())
3000 {
3001 /* In all-stop, but the target is always in non-stop mode.
3002 Start all other threads that are implicitly resumed too. */
08036331 3003 for (thread_info *tp : all_non_exited_threads (resume_ptid))
fbea99ea 3004 {
fbea99ea
PA
3005 if (tp->resumed)
3006 {
3007 if (debug_infrun)
3008 fprintf_unfiltered (gdb_stdlog,
3009 "infrun: proceed: [%s] resumed\n",
a068643d 3010 target_pid_to_str (tp->ptid).c_str ());
fbea99ea
PA
3011 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
3012 continue;
3013 }
3014
3015 if (thread_is_in_step_over_chain (tp))
3016 {
3017 if (debug_infrun)
3018 fprintf_unfiltered (gdb_stdlog,
3019 "infrun: proceed: [%s] needs step-over\n",
a068643d 3020 target_pid_to_str (tp->ptid).c_str ());
fbea99ea
PA
3021 continue;
3022 }
3023
3024 if (debug_infrun)
3025 fprintf_unfiltered (gdb_stdlog,
3026 "infrun: proceed: resuming %s\n",
a068643d 3027 target_pid_to_str (tp->ptid).c_str ());
fbea99ea
PA
3028
3029 reset_ecs (ecs, tp);
00431a78 3030 switch_to_thread (tp);
fbea99ea
PA
3031 keep_going_pass_signal (ecs);
3032 if (!ecs->wait_some_more)
fd7dcb94 3033 error (_("Command aborted."));
fbea99ea 3034 }
a9bc57b9 3035 }
08036331 3036 else if (!cur_thr->resumed && !thread_is_in_step_over_chain (cur_thr))
a9bc57b9
TT
3037 {
3038 /* The thread wasn't started, and isn't queued, run it now. */
08036331
PA
3039 reset_ecs (ecs, cur_thr);
3040 switch_to_thread (cur_thr);
a9bc57b9
TT
3041 keep_going_pass_signal (ecs);
3042 if (!ecs->wait_some_more)
3043 error (_("Command aborted."));
3044 }
3045 }
c906108c 3046
85ad3aaf
PA
3047 target_commit_resume ();
3048
731f534f 3049 finish_state.release ();
c906108c 3050
0b333c5e
PA
3051 /* Tell the event loop to wait for it to stop. If the target
3052 supports asynchronous execution, it'll do this from within
3053 target_resume. */
362646f5 3054 if (!target_can_async_p ())
0b333c5e 3055 mark_async_event_handler (infrun_async_inferior_event_token);
c906108c 3056}
c906108c
SS
3057\f
3058
3059/* Start remote-debugging of a machine over a serial link. */
96baa820 3060
c906108c 3061void
8621d6a9 3062start_remote (int from_tty)
c906108c 3063{
d6b48e9c 3064 struct inferior *inferior;
d6b48e9c
PA
3065
3066 inferior = current_inferior ();
16c381f0 3067 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
43ff13b4 3068
1777feb0 3069 /* Always go on waiting for the target, regardless of the mode. */
6426a772 3070 /* FIXME: cagney/1999-09-23: At present it isn't possible to
7e73cedf 3071 indicate to wait_for_inferior that a target should timeout if
6426a772
JM
3072 nothing is returned (instead of just blocking). Because of this,
3073 targets expecting an immediate response need to, internally, set
3074 things up so that the target_wait() is forced to eventually
1777feb0 3075 timeout. */
6426a772
JM
3076 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3077 differentiate to its caller what the state of the target is after
3078 the initial open has been performed. Here we're assuming that
3079 the target has stopped. It should be possible to eventually have
3080 target_open() return to the caller an indication that the target
3081 is currently running and GDB state should be set to the same as
1777feb0 3082 for an async run. */
e4c8541f 3083 wait_for_inferior ();
8621d6a9
DJ
3084
3085 /* Now that the inferior has stopped, do any bookkeeping like
3086 loading shared libraries. We want to do this before normal_stop,
3087 so that the displayed frame is up to date. */
8b88a78e 3088 post_create_inferior (current_top_target (), from_tty);
8621d6a9 3089
6426a772 3090 normal_stop ();
c906108c
SS
3091}
3092
3093/* Initialize static vars when a new inferior begins. */
3094
3095void
96baa820 3096init_wait_for_inferior (void)
c906108c
SS
3097{
3098 /* These are meaningless until the first time through wait_for_inferior. */
c906108c 3099
c906108c
SS
3100 breakpoint_init_inferior (inf_starting);
3101
70509625 3102 clear_proceed_status (0);
9f976b41 3103
ca005067 3104 target_last_wait_ptid = minus_one_ptid;
237fc4c9 3105
842951eb 3106 previous_inferior_ptid = inferior_ptid;
c906108c 3107}
237fc4c9 3108
c906108c 3109\f
488f131b 3110
ec9499be 3111static void handle_inferior_event (struct execution_control_state *ecs);
cd0fc7c3 3112
568d6575
UW
3113static void handle_step_into_function (struct gdbarch *gdbarch,
3114 struct execution_control_state *ecs);
3115static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3116 struct execution_control_state *ecs);
4f5d7f63 3117static void handle_signal_stop (struct execution_control_state *ecs);
186c406b 3118static void check_exception_resume (struct execution_control_state *,
28106bc2 3119 struct frame_info *);
611c83ae 3120
bdc36728 3121static void end_stepping_range (struct execution_control_state *ecs);
22bcd14b 3122static void stop_waiting (struct execution_control_state *ecs);
d4f3574e 3123static void keep_going (struct execution_control_state *ecs);
94c57d6a 3124static void process_event_stop_test (struct execution_control_state *ecs);
c447ac0b 3125static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
104c1213 3126
252fbfc8
PA
3127/* This function is attached as a "thread_stop_requested" observer.
3128 Cleanup local state that assumed the PTID was to be resumed, and
3129 report the stop to the frontend. */
3130
2c0b251b 3131static void
252fbfc8
PA
3132infrun_thread_stop_requested (ptid_t ptid)
3133{
c65d6b55
PA
3134 /* PTID was requested to stop. If the thread was already stopped,
3135 but the user/frontend doesn't know about that yet (e.g., the
3136 thread had been temporarily paused for some step-over), set up
3137 for reporting the stop now. */
08036331
PA
3138 for (thread_info *tp : all_threads (ptid))
3139 {
3140 if (tp->state != THREAD_RUNNING)
3141 continue;
3142 if (tp->executing)
3143 continue;
c65d6b55 3144
08036331
PA
3145 /* Remove matching threads from the step-over queue, so
3146 start_step_over doesn't try to resume them
3147 automatically. */
3148 if (thread_is_in_step_over_chain (tp))
3149 thread_step_over_chain_remove (tp);
c65d6b55 3150
08036331
PA
3151 /* If the thread is stopped, but the user/frontend doesn't
3152 know about that yet, queue a pending event, as if the
3153 thread had just stopped now. Unless the thread already had
3154 a pending event. */
3155 if (!tp->suspend.waitstatus_pending_p)
3156 {
3157 tp->suspend.waitstatus_pending_p = 1;
3158 tp->suspend.waitstatus.kind = TARGET_WAITKIND_STOPPED;
3159 tp->suspend.waitstatus.value.sig = GDB_SIGNAL_0;
3160 }
c65d6b55 3161
08036331
PA
3162 /* Clear the inline-frame state, since we're re-processing the
3163 stop. */
3164 clear_inline_frame_state (tp->ptid);
c65d6b55 3165
08036331
PA
3166 /* If this thread was paused because some other thread was
3167 doing an inline-step over, let that finish first. Once
3168 that happens, we'll restart all threads and consume pending
3169 stop events then. */
3170 if (step_over_info_valid_p ())
3171 continue;
3172
3173 /* Otherwise we can process the (new) pending event now. Set
3174 it so this pending event is considered by
3175 do_target_wait. */
3176 tp->resumed = 1;
3177 }
252fbfc8
PA
3178}
3179
a07daef3
PA
3180static void
3181infrun_thread_thread_exit (struct thread_info *tp, int silent)
3182{
d7e15655 3183 if (target_last_wait_ptid == tp->ptid)
a07daef3
PA
3184 nullify_last_target_wait_ptid ();
3185}
3186
0cbcdb96
PA
3187/* Delete the step resume, single-step and longjmp/exception resume
3188 breakpoints of TP. */
4e1c45ea 3189
0cbcdb96
PA
3190static void
3191delete_thread_infrun_breakpoints (struct thread_info *tp)
4e1c45ea 3192{
0cbcdb96
PA
3193 delete_step_resume_breakpoint (tp);
3194 delete_exception_resume_breakpoint (tp);
34b7e8a6 3195 delete_single_step_breakpoints (tp);
4e1c45ea
PA
3196}
3197
0cbcdb96
PA
3198/* If the target still has execution, call FUNC for each thread that
3199 just stopped. In all-stop, that's all the non-exited threads; in
3200 non-stop, that's the current thread, only. */
3201
3202typedef void (*for_each_just_stopped_thread_callback_func)
3203 (struct thread_info *tp);
4e1c45ea
PA
3204
3205static void
0cbcdb96 3206for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
4e1c45ea 3207{
d7e15655 3208 if (!target_has_execution || inferior_ptid == null_ptid)
4e1c45ea
PA
3209 return;
3210
fbea99ea 3211 if (target_is_non_stop_p ())
4e1c45ea 3212 {
0cbcdb96
PA
3213 /* If in non-stop mode, only the current thread stopped. */
3214 func (inferior_thread ());
4e1c45ea
PA
3215 }
3216 else
0cbcdb96 3217 {
0cbcdb96 3218 /* In all-stop mode, all threads have stopped. */
08036331
PA
3219 for (thread_info *tp : all_non_exited_threads ())
3220 func (tp);
0cbcdb96
PA
3221 }
3222}
3223
3224/* Delete the step resume and longjmp/exception resume breakpoints of
3225 the threads that just stopped. */
3226
3227static void
3228delete_just_stopped_threads_infrun_breakpoints (void)
3229{
3230 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
34b7e8a6
PA
3231}
3232
3233/* Delete the single-step breakpoints of the threads that just
3234 stopped. */
7c16b83e 3235
34b7e8a6
PA
3236static void
3237delete_just_stopped_threads_single_step_breakpoints (void)
3238{
3239 for_each_just_stopped_thread (delete_single_step_breakpoints);
4e1c45ea
PA
3240}
3241
221e1a37 3242/* See infrun.h. */
223698f8 3243
221e1a37 3244void
223698f8
DE
3245print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3246 const struct target_waitstatus *ws)
3247{
23fdd69e 3248 std::string status_string = target_waitstatus_to_string (ws);
d7e74731 3249 string_file stb;
223698f8
DE
3250
3251 /* The text is split over several lines because it was getting too long.
3252 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3253 output as a unit; we want only one timestamp printed if debug_timestamp
3254 is set. */
3255
d7e74731 3256 stb.printf ("infrun: target_wait (%d.%ld.%ld",
e99b03dc 3257 waiton_ptid.pid (),
e38504b3 3258 waiton_ptid.lwp (),
cc6bcb54 3259 waiton_ptid.tid ());
e99b03dc 3260 if (waiton_ptid.pid () != -1)
a068643d 3261 stb.printf (" [%s]", target_pid_to_str (waiton_ptid).c_str ());
d7e74731
PA
3262 stb.printf (", status) =\n");
3263 stb.printf ("infrun: %d.%ld.%ld [%s],\n",
e99b03dc 3264 result_ptid.pid (),
e38504b3 3265 result_ptid.lwp (),
cc6bcb54 3266 result_ptid.tid (),
a068643d 3267 target_pid_to_str (result_ptid).c_str ());
23fdd69e 3268 stb.printf ("infrun: %s\n", status_string.c_str ());
223698f8
DE
3269
3270 /* This uses %s in part to handle %'s in the text, but also to avoid
3271 a gcc error: the format attribute requires a string literal. */
d7e74731 3272 fprintf_unfiltered (gdb_stdlog, "%s", stb.c_str ());
223698f8
DE
3273}
3274
372316f1
PA
3275/* Select a thread at random, out of those which are resumed and have
3276 had events. */
3277
3278static struct thread_info *
3279random_pending_event_thread (ptid_t waiton_ptid)
3280{
372316f1 3281 int num_events = 0;
08036331
PA
3282
3283 auto has_event = [] (thread_info *tp)
3284 {
3285 return (tp->resumed
3286 && tp->suspend.waitstatus_pending_p);
3287 };
372316f1
PA
3288
3289 /* First see how many events we have. Count only resumed threads
3290 that have an event pending. */
08036331
PA
3291 for (thread_info *tp : all_non_exited_threads (waiton_ptid))
3292 if (has_event (tp))
372316f1
PA
3293 num_events++;
3294
3295 if (num_events == 0)
3296 return NULL;
3297
3298 /* Now randomly pick a thread out of those that have had events. */
08036331
PA
3299 int random_selector = (int) ((num_events * (double) rand ())
3300 / (RAND_MAX + 1.0));
372316f1
PA
3301
3302 if (debug_infrun && num_events > 1)
3303 fprintf_unfiltered (gdb_stdlog,
3304 "infrun: Found %d events, selecting #%d\n",
3305 num_events, random_selector);
3306
3307 /* Select the Nth thread that has had an event. */
08036331
PA
3308 for (thread_info *tp : all_non_exited_threads (waiton_ptid))
3309 if (has_event (tp))
372316f1 3310 if (random_selector-- == 0)
08036331 3311 return tp;
372316f1 3312
08036331 3313 gdb_assert_not_reached ("event thread not found");
372316f1
PA
3314}
3315
3316/* Wrapper for target_wait that first checks whether threads have
3317 pending statuses to report before actually asking the target for
3318 more events. */
3319
3320static ptid_t
3321do_target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
3322{
3323 ptid_t event_ptid;
3324 struct thread_info *tp;
3325
3326 /* First check if there is a resumed thread with a wait status
3327 pending. */
d7e15655 3328 if (ptid == minus_one_ptid || ptid.is_pid ())
372316f1
PA
3329 {
3330 tp = random_pending_event_thread (ptid);
3331 }
3332 else
3333 {
3334 if (debug_infrun)
3335 fprintf_unfiltered (gdb_stdlog,
3336 "infrun: Waiting for specific thread %s.\n",
a068643d 3337 target_pid_to_str (ptid).c_str ());
372316f1
PA
3338
3339 /* We have a specific thread to check. */
3340 tp = find_thread_ptid (ptid);
3341 gdb_assert (tp != NULL);
3342 if (!tp->suspend.waitstatus_pending_p)
3343 tp = NULL;
3344 }
3345
3346 if (tp != NULL
3347 && (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3348 || tp->suspend.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
3349 {
00431a78 3350 struct regcache *regcache = get_thread_regcache (tp);
ac7936df 3351 struct gdbarch *gdbarch = regcache->arch ();
372316f1
PA
3352 CORE_ADDR pc;
3353 int discard = 0;
3354
3355 pc = regcache_read_pc (regcache);
3356
3357 if (pc != tp->suspend.stop_pc)
3358 {
3359 if (debug_infrun)
3360 fprintf_unfiltered (gdb_stdlog,
3361 "infrun: PC of %s changed. was=%s, now=%s\n",
a068643d 3362 target_pid_to_str (tp->ptid).c_str (),
defd2172 3363 paddress (gdbarch, tp->suspend.stop_pc),
372316f1
PA
3364 paddress (gdbarch, pc));
3365 discard = 1;
3366 }
a01bda52 3367 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
372316f1
PA
3368 {
3369 if (debug_infrun)
3370 fprintf_unfiltered (gdb_stdlog,
3371 "infrun: previous breakpoint of %s, at %s gone\n",
a068643d 3372 target_pid_to_str (tp->ptid).c_str (),
372316f1
PA
3373 paddress (gdbarch, pc));
3374
3375 discard = 1;
3376 }
3377
3378 if (discard)
3379 {
3380 if (debug_infrun)
3381 fprintf_unfiltered (gdb_stdlog,
3382 "infrun: pending event of %s cancelled.\n",
a068643d 3383 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3384
3385 tp->suspend.waitstatus.kind = TARGET_WAITKIND_SPURIOUS;
3386 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3387 }
3388 }
3389
3390 if (tp != NULL)
3391 {
3392 if (debug_infrun)
3393 {
23fdd69e
SM
3394 std::string statstr
3395 = target_waitstatus_to_string (&tp->suspend.waitstatus);
372316f1 3396
372316f1
PA
3397 fprintf_unfiltered (gdb_stdlog,
3398 "infrun: Using pending wait status %s for %s.\n",
23fdd69e 3399 statstr.c_str (),
a068643d 3400 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3401 }
3402
3403 /* Now that we've selected our final event LWP, un-adjust its PC
3404 if it was a software breakpoint (and the target doesn't
3405 always adjust the PC itself). */
3406 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3407 && !target_supports_stopped_by_sw_breakpoint ())
3408 {
3409 struct regcache *regcache;
3410 struct gdbarch *gdbarch;
3411 int decr_pc;
3412
00431a78 3413 regcache = get_thread_regcache (tp);
ac7936df 3414 gdbarch = regcache->arch ();
372316f1
PA
3415
3416 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3417 if (decr_pc != 0)
3418 {
3419 CORE_ADDR pc;
3420
3421 pc = regcache_read_pc (regcache);
3422 regcache_write_pc (regcache, pc + decr_pc);
3423 }
3424 }
3425
3426 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3427 *status = tp->suspend.waitstatus;
3428 tp->suspend.waitstatus_pending_p = 0;
3429
3430 /* Wake up the event loop again, until all pending events are
3431 processed. */
3432 if (target_is_async_p ())
3433 mark_async_event_handler (infrun_async_inferior_event_token);
3434 return tp->ptid;
3435 }
3436
3437 /* But if we don't find one, we'll have to wait. */
3438
3439 if (deprecated_target_wait_hook)
3440 event_ptid = deprecated_target_wait_hook (ptid, status, options);
3441 else
3442 event_ptid = target_wait (ptid, status, options);
3443
3444 return event_ptid;
3445}
3446
24291992
PA
3447/* Prepare and stabilize the inferior for detaching it. E.g.,
3448 detaching while a thread is displaced stepping is a recipe for
3449 crashing it, as nothing would readjust the PC out of the scratch
3450 pad. */
3451
3452void
3453prepare_for_detach (void)
3454{
3455 struct inferior *inf = current_inferior ();
f2907e49 3456 ptid_t pid_ptid = ptid_t (inf->pid);
24291992 3457
00431a78 3458 displaced_step_inferior_state *displaced = get_displaced_stepping_state (inf);
24291992
PA
3459
3460 /* Is any thread of this process displaced stepping? If not,
3461 there's nothing else to do. */
d20172fc 3462 if (displaced->step_thread == nullptr)
24291992
PA
3463 return;
3464
3465 if (debug_infrun)
3466 fprintf_unfiltered (gdb_stdlog,
3467 "displaced-stepping in-process while detaching");
3468
9bcb1f16 3469 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
24291992 3470
00431a78 3471 while (displaced->step_thread != nullptr)
24291992 3472 {
24291992
PA
3473 struct execution_control_state ecss;
3474 struct execution_control_state *ecs;
3475
3476 ecs = &ecss;
3477 memset (ecs, 0, sizeof (*ecs));
3478
3479 overlay_cache_invalid = 1;
f15cb84a
YQ
3480 /* Flush target cache before starting to handle each event.
3481 Target was running and cache could be stale. This is just a
3482 heuristic. Running threads may modify target memory, but we
3483 don't get any event. */
3484 target_dcache_invalidate ();
24291992 3485
372316f1 3486 ecs->ptid = do_target_wait (pid_ptid, &ecs->ws, 0);
24291992
PA
3487
3488 if (debug_infrun)
3489 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3490
3491 /* If an error happens while handling the event, propagate GDB's
3492 knowledge of the executing state to the frontend/user running
3493 state. */
731f534f 3494 scoped_finish_thread_state finish_state (minus_one_ptid);
24291992
PA
3495
3496 /* Now figure out what to do with the result of the result. */
3497 handle_inferior_event (ecs);
3498
3499 /* No error, don't finish the state yet. */
731f534f 3500 finish_state.release ();
24291992
PA
3501
3502 /* Breakpoints and watchpoints are not installed on the target
3503 at this point, and signals are passed directly to the
3504 inferior, so this must mean the process is gone. */
3505 if (!ecs->wait_some_more)
3506 {
9bcb1f16 3507 restore_detaching.release ();
24291992
PA
3508 error (_("Program exited while detaching"));
3509 }
3510 }
3511
9bcb1f16 3512 restore_detaching.release ();
24291992
PA
3513}
3514
cd0fc7c3 3515/* Wait for control to return from inferior to debugger.
ae123ec6 3516
cd0fc7c3
SS
3517 If inferior gets a signal, we may decide to start it up again
3518 instead of returning. That is why there is a loop in this function.
3519 When this function actually returns it means the inferior
3520 should be left stopped and GDB should read more commands. */
3521
3522void
e4c8541f 3523wait_for_inferior (void)
cd0fc7c3 3524{
527159b7 3525 if (debug_infrun)
ae123ec6 3526 fprintf_unfiltered
e4c8541f 3527 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
527159b7 3528
4c41382a 3529 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
cd0fc7c3 3530
e6f5c25b
PA
3531 /* If an error happens while handling the event, propagate GDB's
3532 knowledge of the executing state to the frontend/user running
3533 state. */
731f534f 3534 scoped_finish_thread_state finish_state (minus_one_ptid);
e6f5c25b 3535
c906108c
SS
3536 while (1)
3537 {
ae25568b
PA
3538 struct execution_control_state ecss;
3539 struct execution_control_state *ecs = &ecss;
963f9c80 3540 ptid_t waiton_ptid = minus_one_ptid;
29f49a6a 3541
ae25568b
PA
3542 memset (ecs, 0, sizeof (*ecs));
3543
ec9499be 3544 overlay_cache_invalid = 1;
ec9499be 3545
f15cb84a
YQ
3546 /* Flush target cache before starting to handle each event.
3547 Target was running and cache could be stale. This is just a
3548 heuristic. Running threads may modify target memory, but we
3549 don't get any event. */
3550 target_dcache_invalidate ();
3551
372316f1 3552 ecs->ptid = do_target_wait (waiton_ptid, &ecs->ws, 0);
c906108c 3553
f00150c9 3554 if (debug_infrun)
223698f8 3555 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
f00150c9 3556
cd0fc7c3
SS
3557 /* Now figure out what to do with the result of the result. */
3558 handle_inferior_event (ecs);
c906108c 3559
cd0fc7c3
SS
3560 if (!ecs->wait_some_more)
3561 break;
3562 }
4e1c45ea 3563
e6f5c25b 3564 /* No error, don't finish the state yet. */
731f534f 3565 finish_state.release ();
cd0fc7c3 3566}
c906108c 3567
d3d4baed
PA
3568/* Cleanup that reinstalls the readline callback handler, if the
3569 target is running in the background. If while handling the target
3570 event something triggered a secondary prompt, like e.g., a
3571 pagination prompt, we'll have removed the callback handler (see
3572 gdb_readline_wrapper_line). Need to do this as we go back to the
3573 event loop, ready to process further input. Note this has no
3574 effect if the handler hasn't actually been removed, because calling
3575 rl_callback_handler_install resets the line buffer, thus losing
3576 input. */
3577
3578static void
d238133d 3579reinstall_readline_callback_handler_cleanup ()
d3d4baed 3580{
3b12939d
PA
3581 struct ui *ui = current_ui;
3582
3583 if (!ui->async)
6c400b59
PA
3584 {
3585 /* We're not going back to the top level event loop yet. Don't
3586 install the readline callback, as it'd prep the terminal,
3587 readline-style (raw, noecho) (e.g., --batch). We'll install
3588 it the next time the prompt is displayed, when we're ready
3589 for input. */
3590 return;
3591 }
3592
3b12939d 3593 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
d3d4baed
PA
3594 gdb_rl_callback_handler_reinstall ();
3595}
3596
243a9253
PA
3597/* Clean up the FSMs of threads that are now stopped. In non-stop,
3598 that's just the event thread. In all-stop, that's all threads. */
3599
3600static void
3601clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
3602{
08036331
PA
3603 if (ecs->event_thread != NULL
3604 && ecs->event_thread->thread_fsm != NULL)
46e3ed7f 3605 ecs->event_thread->thread_fsm->clean_up (ecs->event_thread);
243a9253
PA
3606
3607 if (!non_stop)
3608 {
08036331 3609 for (thread_info *thr : all_non_exited_threads ())
243a9253
PA
3610 {
3611 if (thr->thread_fsm == NULL)
3612 continue;
3613 if (thr == ecs->event_thread)
3614 continue;
3615
00431a78 3616 switch_to_thread (thr);
46e3ed7f 3617 thr->thread_fsm->clean_up (thr);
243a9253
PA
3618 }
3619
3620 if (ecs->event_thread != NULL)
00431a78 3621 switch_to_thread (ecs->event_thread);
243a9253
PA
3622 }
3623}
3624
3b12939d
PA
3625/* Helper for all_uis_check_sync_execution_done that works on the
3626 current UI. */
3627
3628static void
3629check_curr_ui_sync_execution_done (void)
3630{
3631 struct ui *ui = current_ui;
3632
3633 if (ui->prompt_state == PROMPT_NEEDED
3634 && ui->async
3635 && !gdb_in_secondary_prompt_p (ui))
3636 {
223ffa71 3637 target_terminal::ours ();
76727919 3638 gdb::observers::sync_execution_done.notify ();
3eb7562a 3639 ui_register_input_event_handler (ui);
3b12939d
PA
3640 }
3641}
3642
3643/* See infrun.h. */
3644
3645void
3646all_uis_check_sync_execution_done (void)
3647{
0e454242 3648 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
3649 {
3650 check_curr_ui_sync_execution_done ();
3651 }
3652}
3653
a8836c93
PA
3654/* See infrun.h. */
3655
3656void
3657all_uis_on_sync_execution_starting (void)
3658{
0e454242 3659 SWITCH_THRU_ALL_UIS ()
a8836c93
PA
3660 {
3661 if (current_ui->prompt_state == PROMPT_NEEDED)
3662 async_disable_stdin ();
3663 }
3664}
3665
1777feb0 3666/* Asynchronous version of wait_for_inferior. It is called by the
43ff13b4 3667 event loop whenever a change of state is detected on the file
1777feb0
MS
3668 descriptor corresponding to the target. It can be called more than
3669 once to complete a single execution command. In such cases we need
3670 to keep the state in a global variable ECSS. If it is the last time
a474d7c2
PA
3671 that this function is called for a single execution command, then
3672 report to the user that the inferior has stopped, and do the
1777feb0 3673 necessary cleanups. */
43ff13b4
JM
3674
3675void
fba45db2 3676fetch_inferior_event (void *client_data)
43ff13b4 3677{
0d1e5fa7 3678 struct execution_control_state ecss;
a474d7c2 3679 struct execution_control_state *ecs = &ecss;
0f641c01 3680 int cmd_done = 0;
963f9c80 3681 ptid_t waiton_ptid = minus_one_ptid;
43ff13b4 3682
0d1e5fa7
PA
3683 memset (ecs, 0, sizeof (*ecs));
3684
c61db772
PA
3685 /* Events are always processed with the main UI as current UI. This
3686 way, warnings, debug output, etc. are always consistently sent to
3687 the main console. */
4b6749b9 3688 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
c61db772 3689
d3d4baed 3690 /* End up with readline processing input, if necessary. */
d238133d
TT
3691 {
3692 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
3693
3694 /* We're handling a live event, so make sure we're doing live
3695 debugging. If we're looking at traceframes while the target is
3696 running, we're going to need to get back to that mode after
3697 handling the event. */
3698 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
3699 if (non_stop)
3700 {
3701 maybe_restore_traceframe.emplace ();
3702 set_current_traceframe (-1);
3703 }
43ff13b4 3704
d238133d
TT
3705 gdb::optional<scoped_restore_current_thread> maybe_restore_thread;
3706
3707 if (non_stop)
3708 /* In non-stop mode, the user/frontend should not notice a thread
3709 switch due to internal events. Make sure we reverse to the
3710 user selected thread and frame after handling the event and
3711 running any breakpoint commands. */
3712 maybe_restore_thread.emplace ();
3713
3714 overlay_cache_invalid = 1;
3715 /* Flush target cache before starting to handle each event. Target
3716 was running and cache could be stale. This is just a heuristic.
3717 Running threads may modify target memory, but we don't get any
3718 event. */
3719 target_dcache_invalidate ();
3720
3721 scoped_restore save_exec_dir
3722 = make_scoped_restore (&execution_direction,
3723 target_execution_direction ());
3724
3725 ecs->ptid = do_target_wait (waiton_ptid, &ecs->ws,
3726 target_can_async_p () ? TARGET_WNOHANG : 0);
3727
3728 if (debug_infrun)
3729 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3730
3731 /* If an error happens while handling the event, propagate GDB's
3732 knowledge of the executing state to the frontend/user running
3733 state. */
3734 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs->ptid;
3735 scoped_finish_thread_state finish_state (finish_ptid);
3736
979a0d13 3737 /* Get executed before scoped_restore_current_thread above to apply
d238133d
TT
3738 still for the thread which has thrown the exception. */
3739 auto defer_bpstat_clear
3740 = make_scope_exit (bpstat_clear_actions);
3741 auto defer_delete_threads
3742 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
3743
3744 /* Now figure out what to do with the result of the result. */
3745 handle_inferior_event (ecs);
3746
3747 if (!ecs->wait_some_more)
3748 {
3749 struct inferior *inf = find_inferior_ptid (ecs->ptid);
3750 int should_stop = 1;
3751 struct thread_info *thr = ecs->event_thread;
d6b48e9c 3752
d238133d 3753 delete_just_stopped_threads_infrun_breakpoints ();
f107f563 3754
d238133d
TT
3755 if (thr != NULL)
3756 {
3757 struct thread_fsm *thread_fsm = thr->thread_fsm;
243a9253 3758
d238133d 3759 if (thread_fsm != NULL)
46e3ed7f 3760 should_stop = thread_fsm->should_stop (thr);
d238133d 3761 }
243a9253 3762
d238133d
TT
3763 if (!should_stop)
3764 {
3765 keep_going (ecs);
3766 }
3767 else
3768 {
46e3ed7f 3769 bool should_notify_stop = true;
d238133d 3770 int proceeded = 0;
1840d81a 3771
d238133d 3772 clean_up_just_stopped_threads_fsms (ecs);
243a9253 3773
d238133d 3774 if (thr != NULL && thr->thread_fsm != NULL)
46e3ed7f 3775 should_notify_stop = thr->thread_fsm->should_notify_stop ();
388a7084 3776
d238133d
TT
3777 if (should_notify_stop)
3778 {
3779 /* We may not find an inferior if this was a process exit. */
3780 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
3781 proceeded = normal_stop ();
3782 }
243a9253 3783
d238133d
TT
3784 if (!proceeded)
3785 {
3786 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
3787 cmd_done = 1;
3788 }
3789 }
3790 }
4f8d22e3 3791
d238133d
TT
3792 defer_delete_threads.release ();
3793 defer_bpstat_clear.release ();
29f49a6a 3794
d238133d
TT
3795 /* No error, don't finish the thread states yet. */
3796 finish_state.release ();
731f534f 3797
d238133d
TT
3798 /* This scope is used to ensure that readline callbacks are
3799 reinstalled here. */
3800 }
4f8d22e3 3801
3b12939d
PA
3802 /* If a UI was in sync execution mode, and now isn't, restore its
3803 prompt (a synchronous execution command has finished, and we're
3804 ready for input). */
3805 all_uis_check_sync_execution_done ();
0f641c01
PA
3806
3807 if (cmd_done
0f641c01 3808 && exec_done_display_p
00431a78
PA
3809 && (inferior_ptid == null_ptid
3810 || inferior_thread ()->state != THREAD_RUNNING))
0f641c01 3811 printf_unfiltered (_("completed.\n"));
43ff13b4
JM
3812}
3813
edb3359d
DJ
3814/* Record the frame and location we're currently stepping through. */
3815void
3816set_step_info (struct frame_info *frame, struct symtab_and_line sal)
3817{
3818 struct thread_info *tp = inferior_thread ();
3819
16c381f0
JK
3820 tp->control.step_frame_id = get_frame_id (frame);
3821 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
edb3359d
DJ
3822
3823 tp->current_symtab = sal.symtab;
3824 tp->current_line = sal.line;
3825}
3826
0d1e5fa7
PA
3827/* Clear context switchable stepping state. */
3828
3829void
4e1c45ea 3830init_thread_stepping_state (struct thread_info *tss)
0d1e5fa7 3831{
7f5ef605 3832 tss->stepped_breakpoint = 0;
0d1e5fa7 3833 tss->stepping_over_breakpoint = 0;
963f9c80 3834 tss->stepping_over_watchpoint = 0;
0d1e5fa7 3835 tss->step_after_step_resume_breakpoint = 0;
cd0fc7c3
SS
3836}
3837
c32c64b7
DE
3838/* Set the cached copy of the last ptid/waitstatus. */
3839
6efcd9a8 3840void
c32c64b7
DE
3841set_last_target_status (ptid_t ptid, struct target_waitstatus status)
3842{
3843 target_last_wait_ptid = ptid;
3844 target_last_waitstatus = status;
3845}
3846
e02bc4cc 3847/* Return the cached copy of the last pid/waitstatus returned by
9a4105ab
AC
3848 target_wait()/deprecated_target_wait_hook(). The data is actually
3849 cached by handle_inferior_event(), which gets called immediately
3850 after target_wait()/deprecated_target_wait_hook(). */
e02bc4cc
DS
3851
3852void
488f131b 3853get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
e02bc4cc 3854{
39f77062 3855 *ptidp = target_last_wait_ptid;
e02bc4cc
DS
3856 *status = target_last_waitstatus;
3857}
3858
ac264b3b
MS
3859void
3860nullify_last_target_wait_ptid (void)
3861{
3862 target_last_wait_ptid = minus_one_ptid;
3863}
3864
dcf4fbde 3865/* Switch thread contexts. */
dd80620e
MS
3866
3867static void
00431a78 3868context_switch (execution_control_state *ecs)
dd80620e 3869{
00431a78
PA
3870 if (debug_infrun
3871 && ecs->ptid != inferior_ptid
3872 && ecs->event_thread != inferior_thread ())
fd48f117
DJ
3873 {
3874 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
a068643d 3875 target_pid_to_str (inferior_ptid).c_str ());
fd48f117 3876 fprintf_unfiltered (gdb_stdlog, "to %s\n",
a068643d 3877 target_pid_to_str (ecs->ptid).c_str ());
fd48f117
DJ
3878 }
3879
00431a78 3880 switch_to_thread (ecs->event_thread);
dd80620e
MS
3881}
3882
d8dd4d5f
PA
3883/* If the target can't tell whether we've hit breakpoints
3884 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
3885 check whether that could have been caused by a breakpoint. If so,
3886 adjust the PC, per gdbarch_decr_pc_after_break. */
3887
4fa8626c 3888static void
d8dd4d5f
PA
3889adjust_pc_after_break (struct thread_info *thread,
3890 struct target_waitstatus *ws)
4fa8626c 3891{
24a73cce
UW
3892 struct regcache *regcache;
3893 struct gdbarch *gdbarch;
118e6252 3894 CORE_ADDR breakpoint_pc, decr_pc;
4fa8626c 3895
4fa8626c
DJ
3896 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3897 we aren't, just return.
9709f61c
DJ
3898
3899 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
b798847d
UW
3900 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3901 implemented by software breakpoints should be handled through the normal
3902 breakpoint layer.
8fb3e588 3903
4fa8626c
DJ
3904 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3905 different signals (SIGILL or SIGEMT for instance), but it is less
3906 clear where the PC is pointing afterwards. It may not match
b798847d
UW
3907 gdbarch_decr_pc_after_break. I don't know any specific target that
3908 generates these signals at breakpoints (the code has been in GDB since at
3909 least 1992) so I can not guess how to handle them here.
8fb3e588 3910
e6cf7916
UW
3911 In earlier versions of GDB, a target with
3912 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
b798847d
UW
3913 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3914 target with both of these set in GDB history, and it seems unlikely to be
3915 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4fa8626c 3916
d8dd4d5f 3917 if (ws->kind != TARGET_WAITKIND_STOPPED)
4fa8626c
DJ
3918 return;
3919
d8dd4d5f 3920 if (ws->value.sig != GDB_SIGNAL_TRAP)
4fa8626c
DJ
3921 return;
3922
4058b839
PA
3923 /* In reverse execution, when a breakpoint is hit, the instruction
3924 under it has already been de-executed. The reported PC always
3925 points at the breakpoint address, so adjusting it further would
3926 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3927 architecture:
3928
3929 B1 0x08000000 : INSN1
3930 B2 0x08000001 : INSN2
3931 0x08000002 : INSN3
3932 PC -> 0x08000003 : INSN4
3933
3934 Say you're stopped at 0x08000003 as above. Reverse continuing
3935 from that point should hit B2 as below. Reading the PC when the
3936 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3937 been de-executed already.
3938
3939 B1 0x08000000 : INSN1
3940 B2 PC -> 0x08000001 : INSN2
3941 0x08000002 : INSN3
3942 0x08000003 : INSN4
3943
3944 We can't apply the same logic as for forward execution, because
3945 we would wrongly adjust the PC to 0x08000000, since there's a
3946 breakpoint at PC - 1. We'd then report a hit on B1, although
3947 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3948 behaviour. */
3949 if (execution_direction == EXEC_REVERSE)
3950 return;
3951
1cf4d951
PA
3952 /* If the target can tell whether the thread hit a SW breakpoint,
3953 trust it. Targets that can tell also adjust the PC
3954 themselves. */
3955 if (target_supports_stopped_by_sw_breakpoint ())
3956 return;
3957
3958 /* Note that relying on whether a breakpoint is planted in memory to
3959 determine this can fail. E.g,. the breakpoint could have been
3960 removed since. Or the thread could have been told to step an
3961 instruction the size of a breakpoint instruction, and only
3962 _after_ was a breakpoint inserted at its address. */
3963
24a73cce
UW
3964 /* If this target does not decrement the PC after breakpoints, then
3965 we have nothing to do. */
00431a78 3966 regcache = get_thread_regcache (thread);
ac7936df 3967 gdbarch = regcache->arch ();
118e6252 3968
527a273a 3969 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
118e6252 3970 if (decr_pc == 0)
24a73cce
UW
3971 return;
3972
8b86c959 3973 const address_space *aspace = regcache->aspace ();
6c95b8df 3974
8aad930b
AC
3975 /* Find the location where (if we've hit a breakpoint) the
3976 breakpoint would be. */
118e6252 3977 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
8aad930b 3978
1cf4d951
PA
3979 /* If the target can't tell whether a software breakpoint triggered,
3980 fallback to figuring it out based on breakpoints we think were
3981 inserted in the target, and on whether the thread was stepped or
3982 continued. */
3983
1c5cfe86
PA
3984 /* Check whether there actually is a software breakpoint inserted at
3985 that location.
3986
3987 If in non-stop mode, a race condition is possible where we've
3988 removed a breakpoint, but stop events for that breakpoint were
3989 already queued and arrive later. To suppress those spurious
3990 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
1cf4d951
PA
3991 and retire them after a number of stop events are reported. Note
3992 this is an heuristic and can thus get confused. The real fix is
3993 to get the "stopped by SW BP and needs adjustment" info out of
3994 the target/kernel (and thus never reach here; see above). */
6c95b8df 3995 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
fbea99ea
PA
3996 || (target_is_non_stop_p ()
3997 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
8aad930b 3998 {
07036511 3999 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
abbb1732 4000
8213266a 4001 if (record_full_is_used ())
07036511
TT
4002 restore_operation_disable.emplace
4003 (record_full_gdb_operation_disable_set ());
96429cc8 4004
1c0fdd0e
UW
4005 /* When using hardware single-step, a SIGTRAP is reported for both
4006 a completed single-step and a software breakpoint. Need to
4007 differentiate between the two, as the latter needs adjusting
4008 but the former does not.
4009
4010 The SIGTRAP can be due to a completed hardware single-step only if
4011 - we didn't insert software single-step breakpoints
1c0fdd0e
UW
4012 - this thread is currently being stepped
4013
4014 If any of these events did not occur, we must have stopped due
4015 to hitting a software breakpoint, and have to back up to the
4016 breakpoint address.
4017
4018 As a special case, we could have hardware single-stepped a
4019 software breakpoint. In this case (prev_pc == breakpoint_pc),
4020 we also need to back up to the breakpoint address. */
4021
d8dd4d5f
PA
4022 if (thread_has_single_step_breakpoints_set (thread)
4023 || !currently_stepping (thread)
4024 || (thread->stepped_breakpoint
4025 && thread->prev_pc == breakpoint_pc))
515630c5 4026 regcache_write_pc (regcache, breakpoint_pc);
8aad930b 4027 }
4fa8626c
DJ
4028}
4029
edb3359d
DJ
4030static int
4031stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
4032{
4033 for (frame = get_prev_frame (frame);
4034 frame != NULL;
4035 frame = get_prev_frame (frame))
4036 {
4037 if (frame_id_eq (get_frame_id (frame), step_frame_id))
4038 return 1;
4039 if (get_frame_type (frame) != INLINE_FRAME)
4040 break;
4041 }
4042
4043 return 0;
4044}
4045
c65d6b55
PA
4046/* If the event thread has the stop requested flag set, pretend it
4047 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4048 target_stop). */
4049
4050static bool
4051handle_stop_requested (struct execution_control_state *ecs)
4052{
4053 if (ecs->event_thread->stop_requested)
4054 {
4055 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
4056 ecs->ws.value.sig = GDB_SIGNAL_0;
4057 handle_signal_stop (ecs);
4058 return true;
4059 }
4060 return false;
4061}
4062
a96d9b2e
SDJ
4063/* Auxiliary function that handles syscall entry/return events.
4064 It returns 1 if the inferior should keep going (and GDB
4065 should ignore the event), or 0 if the event deserves to be
4066 processed. */
ca2163eb 4067
a96d9b2e 4068static int
ca2163eb 4069handle_syscall_event (struct execution_control_state *ecs)
a96d9b2e 4070{
ca2163eb 4071 struct regcache *regcache;
ca2163eb
PA
4072 int syscall_number;
4073
00431a78 4074 context_switch (ecs);
ca2163eb 4075
00431a78 4076 regcache = get_thread_regcache (ecs->event_thread);
f90263c1 4077 syscall_number = ecs->ws.value.syscall_number;
f2ffa92b 4078 ecs->event_thread->suspend.stop_pc = regcache_read_pc (regcache);
ca2163eb 4079
a96d9b2e
SDJ
4080 if (catch_syscall_enabled () > 0
4081 && catching_syscall_number (syscall_number) > 0)
4082 {
4083 if (debug_infrun)
4084 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
4085 syscall_number);
a96d9b2e 4086
16c381f0 4087 ecs->event_thread->control.stop_bpstat
a01bda52 4088 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
4089 ecs->event_thread->suspend.stop_pc,
4090 ecs->event_thread, &ecs->ws);
ab04a2af 4091
c65d6b55
PA
4092 if (handle_stop_requested (ecs))
4093 return 0;
4094
ce12b012 4095 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
ca2163eb
PA
4096 {
4097 /* Catchpoint hit. */
ca2163eb
PA
4098 return 0;
4099 }
a96d9b2e 4100 }
ca2163eb 4101
c65d6b55
PA
4102 if (handle_stop_requested (ecs))
4103 return 0;
4104
ca2163eb 4105 /* If no catchpoint triggered for this, then keep going. */
ca2163eb
PA
4106 keep_going (ecs);
4107 return 1;
a96d9b2e
SDJ
4108}
4109
7e324e48
GB
4110/* Lazily fill in the execution_control_state's stop_func_* fields. */
4111
4112static void
4113fill_in_stop_func (struct gdbarch *gdbarch,
4114 struct execution_control_state *ecs)
4115{
4116 if (!ecs->stop_func_filled_in)
4117 {
98a617f8
KB
4118 const block *block;
4119
7e324e48
GB
4120 /* Don't care about return value; stop_func_start and stop_func_name
4121 will both be 0 if it doesn't work. */
98a617f8
KB
4122 find_pc_partial_function (ecs->event_thread->suspend.stop_pc,
4123 &ecs->stop_func_name,
4124 &ecs->stop_func_start,
4125 &ecs->stop_func_end,
4126 &block);
4127
4128 /* The call to find_pc_partial_function, above, will set
4129 stop_func_start and stop_func_end to the start and end
4130 of the range containing the stop pc. If this range
4131 contains the entry pc for the block (which is always the
4132 case for contiguous blocks), advance stop_func_start past
4133 the function's start offset and entrypoint. Note that
4134 stop_func_start is NOT advanced when in a range of a
4135 non-contiguous block that does not contain the entry pc. */
4136 if (block != nullptr
4137 && ecs->stop_func_start <= BLOCK_ENTRY_PC (block)
4138 && BLOCK_ENTRY_PC (block) < ecs->stop_func_end)
4139 {
4140 ecs->stop_func_start
4141 += gdbarch_deprecated_function_start_offset (gdbarch);
4142
4143 if (gdbarch_skip_entrypoint_p (gdbarch))
4144 ecs->stop_func_start
4145 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
4146 }
591a12a1 4147
7e324e48
GB
4148 ecs->stop_func_filled_in = 1;
4149 }
4150}
4151
4f5d7f63 4152
00431a78 4153/* Return the STOP_SOON field of the inferior pointed at by ECS. */
4f5d7f63
PA
4154
4155static enum stop_kind
00431a78 4156get_inferior_stop_soon (execution_control_state *ecs)
4f5d7f63 4157{
00431a78 4158 struct inferior *inf = find_inferior_ptid (ecs->ptid);
4f5d7f63
PA
4159
4160 gdb_assert (inf != NULL);
4161 return inf->control.stop_soon;
4162}
4163
372316f1
PA
4164/* Wait for one event. Store the resulting waitstatus in WS, and
4165 return the event ptid. */
4166
4167static ptid_t
4168wait_one (struct target_waitstatus *ws)
4169{
4170 ptid_t event_ptid;
4171 ptid_t wait_ptid = minus_one_ptid;
4172
4173 overlay_cache_invalid = 1;
4174
4175 /* Flush target cache before starting to handle each event.
4176 Target was running and cache could be stale. This is just a
4177 heuristic. Running threads may modify target memory, but we
4178 don't get any event. */
4179 target_dcache_invalidate ();
4180
4181 if (deprecated_target_wait_hook)
4182 event_ptid = deprecated_target_wait_hook (wait_ptid, ws, 0);
4183 else
4184 event_ptid = target_wait (wait_ptid, ws, 0);
4185
4186 if (debug_infrun)
4187 print_target_wait_results (wait_ptid, event_ptid, ws);
4188
4189 return event_ptid;
4190}
4191
4192/* Generate a wrapper for target_stopped_by_REASON that works on PTID
4193 instead of the current thread. */
4194#define THREAD_STOPPED_BY(REASON) \
4195static int \
4196thread_stopped_by_ ## REASON (ptid_t ptid) \
4197{ \
2989a365 4198 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid); \
372316f1
PA
4199 inferior_ptid = ptid; \
4200 \
2989a365 4201 return target_stopped_by_ ## REASON (); \
372316f1
PA
4202}
4203
4204/* Generate thread_stopped_by_watchpoint. */
4205THREAD_STOPPED_BY (watchpoint)
4206/* Generate thread_stopped_by_sw_breakpoint. */
4207THREAD_STOPPED_BY (sw_breakpoint)
4208/* Generate thread_stopped_by_hw_breakpoint. */
4209THREAD_STOPPED_BY (hw_breakpoint)
4210
372316f1
PA
4211/* Save the thread's event and stop reason to process it later. */
4212
4213static void
4214save_waitstatus (struct thread_info *tp, struct target_waitstatus *ws)
4215{
372316f1
PA
4216 if (debug_infrun)
4217 {
23fdd69e 4218 std::string statstr = target_waitstatus_to_string (ws);
372316f1 4219
372316f1
PA
4220 fprintf_unfiltered (gdb_stdlog,
4221 "infrun: saving status %s for %d.%ld.%ld\n",
23fdd69e 4222 statstr.c_str (),
e99b03dc 4223 tp->ptid.pid (),
e38504b3 4224 tp->ptid.lwp (),
cc6bcb54 4225 tp->ptid.tid ());
372316f1
PA
4226 }
4227
4228 /* Record for later. */
4229 tp->suspend.waitstatus = *ws;
4230 tp->suspend.waitstatus_pending_p = 1;
4231
00431a78 4232 struct regcache *regcache = get_thread_regcache (tp);
8b86c959 4233 const address_space *aspace = regcache->aspace ();
372316f1
PA
4234
4235 if (ws->kind == TARGET_WAITKIND_STOPPED
4236 && ws->value.sig == GDB_SIGNAL_TRAP)
4237 {
4238 CORE_ADDR pc = regcache_read_pc (regcache);
4239
4240 adjust_pc_after_break (tp, &tp->suspend.waitstatus);
4241
4242 if (thread_stopped_by_watchpoint (tp->ptid))
4243 {
4244 tp->suspend.stop_reason
4245 = TARGET_STOPPED_BY_WATCHPOINT;
4246 }
4247 else if (target_supports_stopped_by_sw_breakpoint ()
4248 && thread_stopped_by_sw_breakpoint (tp->ptid))
4249 {
4250 tp->suspend.stop_reason
4251 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4252 }
4253 else if (target_supports_stopped_by_hw_breakpoint ()
4254 && thread_stopped_by_hw_breakpoint (tp->ptid))
4255 {
4256 tp->suspend.stop_reason
4257 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4258 }
4259 else if (!target_supports_stopped_by_hw_breakpoint ()
4260 && hardware_breakpoint_inserted_here_p (aspace,
4261 pc))
4262 {
4263 tp->suspend.stop_reason
4264 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4265 }
4266 else if (!target_supports_stopped_by_sw_breakpoint ()
4267 && software_breakpoint_inserted_here_p (aspace,
4268 pc))
4269 {
4270 tp->suspend.stop_reason
4271 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4272 }
4273 else if (!thread_has_single_step_breakpoints_set (tp)
4274 && currently_stepping (tp))
4275 {
4276 tp->suspend.stop_reason
4277 = TARGET_STOPPED_BY_SINGLE_STEP;
4278 }
4279 }
4280}
4281
6efcd9a8 4282/* See infrun.h. */
372316f1 4283
6efcd9a8 4284void
372316f1
PA
4285stop_all_threads (void)
4286{
4287 /* We may need multiple passes to discover all threads. */
4288 int pass;
4289 int iterations = 0;
372316f1 4290
fbea99ea 4291 gdb_assert (target_is_non_stop_p ());
372316f1
PA
4292
4293 if (debug_infrun)
4294 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads\n");
4295
00431a78 4296 scoped_restore_current_thread restore_thread;
372316f1 4297
65706a29 4298 target_thread_events (1);
9885e6bb 4299 SCOPE_EXIT { target_thread_events (0); };
65706a29 4300
372316f1
PA
4301 /* Request threads to stop, and then wait for the stops. Because
4302 threads we already know about can spawn more threads while we're
4303 trying to stop them, and we only learn about new threads when we
4304 update the thread list, do this in a loop, and keep iterating
4305 until two passes find no threads that need to be stopped. */
4306 for (pass = 0; pass < 2; pass++, iterations++)
4307 {
4308 if (debug_infrun)
4309 fprintf_unfiltered (gdb_stdlog,
4310 "infrun: stop_all_threads, pass=%d, "
4311 "iterations=%d\n", pass, iterations);
4312 while (1)
4313 {
4314 ptid_t event_ptid;
4315 struct target_waitstatus ws;
4316 int need_wait = 0;
372316f1
PA
4317
4318 update_thread_list ();
4319
4320 /* Go through all threads looking for threads that we need
4321 to tell the target to stop. */
08036331 4322 for (thread_info *t : all_non_exited_threads ())
372316f1
PA
4323 {
4324 if (t->executing)
4325 {
4326 /* If already stopping, don't request a stop again.
4327 We just haven't seen the notification yet. */
4328 if (!t->stop_requested)
4329 {
4330 if (debug_infrun)
4331 fprintf_unfiltered (gdb_stdlog,
4332 "infrun: %s executing, "
4333 "need stop\n",
a068643d 4334 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4335 target_stop (t->ptid);
4336 t->stop_requested = 1;
4337 }
4338 else
4339 {
4340 if (debug_infrun)
4341 fprintf_unfiltered (gdb_stdlog,
4342 "infrun: %s executing, "
4343 "already stopping\n",
a068643d 4344 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4345 }
4346
4347 if (t->stop_requested)
4348 need_wait = 1;
4349 }
4350 else
4351 {
4352 if (debug_infrun)
4353 fprintf_unfiltered (gdb_stdlog,
4354 "infrun: %s not executing\n",
a068643d 4355 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4356
4357 /* The thread may be not executing, but still be
4358 resumed with a pending status to process. */
4359 t->resumed = 0;
4360 }
4361 }
4362
4363 if (!need_wait)
4364 break;
4365
4366 /* If we find new threads on the second iteration, restart
4367 over. We want to see two iterations in a row with all
4368 threads stopped. */
4369 if (pass > 0)
4370 pass = -1;
4371
4372 event_ptid = wait_one (&ws);
c29705b7 4373 if (debug_infrun)
372316f1 4374 {
c29705b7
PW
4375 fprintf_unfiltered (gdb_stdlog,
4376 "infrun: stop_all_threads %s %s\n",
4377 target_waitstatus_to_string (&ws).c_str (),
4378 target_pid_to_str (event_ptid).c_str ());
372316f1 4379 }
372316f1 4380
c29705b7
PW
4381 if (ws.kind == TARGET_WAITKIND_NO_RESUMED
4382 || ws.kind == TARGET_WAITKIND_THREAD_EXITED
4383 || ws.kind == TARGET_WAITKIND_EXITED
4384 || ws.kind == TARGET_WAITKIND_SIGNALLED)
4385 {
4386 /* All resumed threads exited
4387 or one thread/process exited/signalled. */
372316f1
PA
4388 }
4389 else
4390 {
08036331 4391 thread_info *t = find_thread_ptid (event_ptid);
372316f1
PA
4392 if (t == NULL)
4393 t = add_thread (event_ptid);
4394
4395 t->stop_requested = 0;
4396 t->executing = 0;
4397 t->resumed = 0;
4398 t->control.may_range_step = 0;
4399
6efcd9a8
PA
4400 /* This may be the first time we see the inferior report
4401 a stop. */
08036331 4402 inferior *inf = find_inferior_ptid (event_ptid);
6efcd9a8
PA
4403 if (inf->needs_setup)
4404 {
4405 switch_to_thread_no_regs (t);
4406 setup_inferior (0);
4407 }
4408
372316f1
PA
4409 if (ws.kind == TARGET_WAITKIND_STOPPED
4410 && ws.value.sig == GDB_SIGNAL_0)
4411 {
4412 /* We caught the event that we intended to catch, so
4413 there's no event pending. */
4414 t->suspend.waitstatus.kind = TARGET_WAITKIND_IGNORE;
4415 t->suspend.waitstatus_pending_p = 0;
4416
00431a78 4417 if (displaced_step_fixup (t, GDB_SIGNAL_0) < 0)
372316f1
PA
4418 {
4419 /* Add it back to the step-over queue. */
4420 if (debug_infrun)
4421 {
4422 fprintf_unfiltered (gdb_stdlog,
4423 "infrun: displaced-step of %s "
4424 "canceled: adding back to the "
4425 "step-over queue\n",
a068643d 4426 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4427 }
4428 t->control.trap_expected = 0;
4429 thread_step_over_chain_enqueue (t);
4430 }
4431 }
4432 else
4433 {
4434 enum gdb_signal sig;
4435 struct regcache *regcache;
372316f1
PA
4436
4437 if (debug_infrun)
4438 {
23fdd69e 4439 std::string statstr = target_waitstatus_to_string (&ws);
372316f1 4440
372316f1
PA
4441 fprintf_unfiltered (gdb_stdlog,
4442 "infrun: target_wait %s, saving "
4443 "status for %d.%ld.%ld\n",
23fdd69e 4444 statstr.c_str (),
e99b03dc 4445 t->ptid.pid (),
e38504b3 4446 t->ptid.lwp (),
cc6bcb54 4447 t->ptid.tid ());
372316f1
PA
4448 }
4449
4450 /* Record for later. */
4451 save_waitstatus (t, &ws);
4452
4453 sig = (ws.kind == TARGET_WAITKIND_STOPPED
4454 ? ws.value.sig : GDB_SIGNAL_0);
4455
00431a78 4456 if (displaced_step_fixup (t, sig) < 0)
372316f1
PA
4457 {
4458 /* Add it back to the step-over queue. */
4459 t->control.trap_expected = 0;
4460 thread_step_over_chain_enqueue (t);
4461 }
4462
00431a78 4463 regcache = get_thread_regcache (t);
372316f1
PA
4464 t->suspend.stop_pc = regcache_read_pc (regcache);
4465
4466 if (debug_infrun)
4467 {
4468 fprintf_unfiltered (gdb_stdlog,
4469 "infrun: saved stop_pc=%s for %s "
4470 "(currently_stepping=%d)\n",
4471 paddress (target_gdbarch (),
4472 t->suspend.stop_pc),
a068643d 4473 target_pid_to_str (t->ptid).c_str (),
372316f1
PA
4474 currently_stepping (t));
4475 }
4476 }
4477 }
4478 }
4479 }
4480
372316f1
PA
4481 if (debug_infrun)
4482 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads done\n");
4483}
4484
f4836ba9
PA
4485/* Handle a TARGET_WAITKIND_NO_RESUMED event. */
4486
4487static int
4488handle_no_resumed (struct execution_control_state *ecs)
4489{
3b12939d 4490 if (target_can_async_p ())
f4836ba9 4491 {
3b12939d
PA
4492 struct ui *ui;
4493 int any_sync = 0;
f4836ba9 4494
3b12939d
PA
4495 ALL_UIS (ui)
4496 {
4497 if (ui->prompt_state == PROMPT_BLOCKED)
4498 {
4499 any_sync = 1;
4500 break;
4501 }
4502 }
4503 if (!any_sync)
4504 {
4505 /* There were no unwaited-for children left in the target, but,
4506 we're not synchronously waiting for events either. Just
4507 ignore. */
4508
4509 if (debug_infrun)
4510 fprintf_unfiltered (gdb_stdlog,
4511 "infrun: TARGET_WAITKIND_NO_RESUMED "
4512 "(ignoring: bg)\n");
4513 prepare_to_wait (ecs);
4514 return 1;
4515 }
f4836ba9
PA
4516 }
4517
4518 /* Otherwise, if we were running a synchronous execution command, we
4519 may need to cancel it and give the user back the terminal.
4520
4521 In non-stop mode, the target can't tell whether we've already
4522 consumed previous stop events, so it can end up sending us a
4523 no-resumed event like so:
4524
4525 #0 - thread 1 is left stopped
4526
4527 #1 - thread 2 is resumed and hits breakpoint
4528 -> TARGET_WAITKIND_STOPPED
4529
4530 #2 - thread 3 is resumed and exits
4531 this is the last resumed thread, so
4532 -> TARGET_WAITKIND_NO_RESUMED
4533
4534 #3 - gdb processes stop for thread 2 and decides to re-resume
4535 it.
4536
4537 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
4538 thread 2 is now resumed, so the event should be ignored.
4539
4540 IOW, if the stop for thread 2 doesn't end a foreground command,
4541 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
4542 event. But it could be that the event meant that thread 2 itself
4543 (or whatever other thread was the last resumed thread) exited.
4544
4545 To address this we refresh the thread list and check whether we
4546 have resumed threads _now_. In the example above, this removes
4547 thread 3 from the thread list. If thread 2 was re-resumed, we
4548 ignore this event. If we find no thread resumed, then we cancel
4549 the synchronous command show "no unwaited-for " to the user. */
4550 update_thread_list ();
4551
08036331 4552 for (thread_info *thread : all_non_exited_threads ())
f4836ba9
PA
4553 {
4554 if (thread->executing
4555 || thread->suspend.waitstatus_pending_p)
4556 {
4557 /* There were no unwaited-for children left in the target at
4558 some point, but there are now. Just ignore. */
4559 if (debug_infrun)
4560 fprintf_unfiltered (gdb_stdlog,
4561 "infrun: TARGET_WAITKIND_NO_RESUMED "
4562 "(ignoring: found resumed)\n");
4563 prepare_to_wait (ecs);
4564 return 1;
4565 }
4566 }
4567
4568 /* Note however that we may find no resumed thread because the whole
4569 process exited meanwhile (thus updating the thread list results
4570 in an empty thread list). In this case we know we'll be getting
4571 a process exit event shortly. */
08036331 4572 for (inferior *inf : all_inferiors ())
f4836ba9
PA
4573 {
4574 if (inf->pid == 0)
4575 continue;
4576
08036331 4577 thread_info *thread = any_live_thread_of_inferior (inf);
f4836ba9
PA
4578 if (thread == NULL)
4579 {
4580 if (debug_infrun)
4581 fprintf_unfiltered (gdb_stdlog,
4582 "infrun: TARGET_WAITKIND_NO_RESUMED "
4583 "(expect process exit)\n");
4584 prepare_to_wait (ecs);
4585 return 1;
4586 }
4587 }
4588
4589 /* Go ahead and report the event. */
4590 return 0;
4591}
4592
05ba8510
PA
4593/* Given an execution control state that has been freshly filled in by
4594 an event from the inferior, figure out what it means and take
4595 appropriate action.
4596
4597 The alternatives are:
4598
22bcd14b 4599 1) stop_waiting and return; to really stop and return to the
05ba8510
PA
4600 debugger.
4601
4602 2) keep_going and return; to wait for the next event (set
4603 ecs->event_thread->stepping_over_breakpoint to 1 to single step
4604 once). */
c906108c 4605
ec9499be 4606static void
595915c1 4607handle_inferior_event (struct execution_control_state *ecs)
cd0fc7c3 4608{
595915c1
TT
4609 /* Make sure that all temporary struct value objects that were
4610 created during the handling of the event get deleted at the
4611 end. */
4612 scoped_value_mark free_values;
4613
d6b48e9c
PA
4614 enum stop_kind stop_soon;
4615
c29705b7
PW
4616 if (debug_infrun)
4617 fprintf_unfiltered (gdb_stdlog, "infrun: handle_inferior_event %s\n",
4618 target_waitstatus_to_string (&ecs->ws).c_str ());
4619
28736962
PA
4620 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
4621 {
4622 /* We had an event in the inferior, but we are not interested in
4623 handling it at this level. The lower layers have already
4624 done what needs to be done, if anything.
4625
4626 One of the possible circumstances for this is when the
4627 inferior produces output for the console. The inferior has
4628 not stopped, and we are ignoring the event. Another possible
4629 circumstance is any event which the lower level knows will be
4630 reported multiple times without an intervening resume. */
28736962
PA
4631 prepare_to_wait (ecs);
4632 return;
4633 }
4634
65706a29
PA
4635 if (ecs->ws.kind == TARGET_WAITKIND_THREAD_EXITED)
4636 {
65706a29
PA
4637 prepare_to_wait (ecs);
4638 return;
4639 }
4640
0e5bf2a8 4641 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
f4836ba9
PA
4642 && handle_no_resumed (ecs))
4643 return;
0e5bf2a8 4644
1777feb0 4645 /* Cache the last pid/waitstatus. */
c32c64b7 4646 set_last_target_status (ecs->ptid, ecs->ws);
e02bc4cc 4647
ca005067 4648 /* Always clear state belonging to the previous time we stopped. */
aa7d318d 4649 stop_stack_dummy = STOP_NONE;
ca005067 4650
0e5bf2a8
PA
4651 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
4652 {
4653 /* No unwaited-for children left. IOW, all resumed children
4654 have exited. */
0e5bf2a8 4655 stop_print_frame = 0;
22bcd14b 4656 stop_waiting (ecs);
0e5bf2a8
PA
4657 return;
4658 }
4659
8c90c137 4660 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
64776a0b 4661 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
359f5fe6
PA
4662 {
4663 ecs->event_thread = find_thread_ptid (ecs->ptid);
4664 /* If it's a new thread, add it to the thread database. */
4665 if (ecs->event_thread == NULL)
4666 ecs->event_thread = add_thread (ecs->ptid);
c1e36e3e
PA
4667
4668 /* Disable range stepping. If the next step request could use a
4669 range, this will be end up re-enabled then. */
4670 ecs->event_thread->control.may_range_step = 0;
359f5fe6 4671 }
88ed393a
JK
4672
4673 /* Dependent on valid ECS->EVENT_THREAD. */
d8dd4d5f 4674 adjust_pc_after_break (ecs->event_thread, &ecs->ws);
88ed393a
JK
4675
4676 /* Dependent on the current PC value modified by adjust_pc_after_break. */
4677 reinit_frame_cache ();
4678
28736962
PA
4679 breakpoint_retire_moribund ();
4680
2b009048
DJ
4681 /* First, distinguish signals caused by the debugger from signals
4682 that have to do with the program's own actions. Note that
4683 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
4684 on the operating system version. Here we detect when a SIGILL or
4685 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
4686 something similar for SIGSEGV, since a SIGSEGV will be generated
4687 when we're trying to execute a breakpoint instruction on a
4688 non-executable stack. This happens for call dummy breakpoints
4689 for architectures like SPARC that place call dummies on the
4690 stack. */
2b009048 4691 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
a493e3e2
PA
4692 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
4693 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
4694 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
2b009048 4695 {
00431a78 4696 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
de0a0249 4697
a01bda52 4698 if (breakpoint_inserted_here_p (regcache->aspace (),
de0a0249
UW
4699 regcache_read_pc (regcache)))
4700 {
4701 if (debug_infrun)
4702 fprintf_unfiltered (gdb_stdlog,
4703 "infrun: Treating signal as SIGTRAP\n");
a493e3e2 4704 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
de0a0249 4705 }
2b009048
DJ
4706 }
4707
28736962
PA
4708 /* Mark the non-executing threads accordingly. In all-stop, all
4709 threads of all processes are stopped when we get any event
e1316e60 4710 reported. In non-stop mode, only the event thread stops. */
372316f1
PA
4711 {
4712 ptid_t mark_ptid;
4713
fbea99ea 4714 if (!target_is_non_stop_p ())
372316f1
PA
4715 mark_ptid = minus_one_ptid;
4716 else if (ecs->ws.kind == TARGET_WAITKIND_SIGNALLED
4717 || ecs->ws.kind == TARGET_WAITKIND_EXITED)
4718 {
4719 /* If we're handling a process exit in non-stop mode, even
4720 though threads haven't been deleted yet, one would think
4721 that there is nothing to do, as threads of the dead process
4722 will be soon deleted, and threads of any other process were
4723 left running. However, on some targets, threads survive a
4724 process exit event. E.g., for the "checkpoint" command,
4725 when the current checkpoint/fork exits, linux-fork.c
4726 automatically switches to another fork from within
4727 target_mourn_inferior, by associating the same
4728 inferior/thread to another fork. We haven't mourned yet at
4729 this point, but we must mark any threads left in the
4730 process as not-executing so that finish_thread_state marks
4731 them stopped (in the user's perspective) if/when we present
4732 the stop to the user. */
e99b03dc 4733 mark_ptid = ptid_t (ecs->ptid.pid ());
372316f1
PA
4734 }
4735 else
4736 mark_ptid = ecs->ptid;
4737
4738 set_executing (mark_ptid, 0);
4739
4740 /* Likewise the resumed flag. */
4741 set_resumed (mark_ptid, 0);
4742 }
8c90c137 4743
488f131b
JB
4744 switch (ecs->ws.kind)
4745 {
4746 case TARGET_WAITKIND_LOADED:
00431a78 4747 context_switch (ecs);
b0f4b84b
DJ
4748 /* Ignore gracefully during startup of the inferior, as it might
4749 be the shell which has just loaded some objects, otherwise
4750 add the symbols for the newly loaded objects. Also ignore at
4751 the beginning of an attach or remote session; we will query
4752 the full list of libraries once the connection is
4753 established. */
4f5d7f63 4754
00431a78 4755 stop_soon = get_inferior_stop_soon (ecs);
c0236d92 4756 if (stop_soon == NO_STOP_QUIETLY)
488f131b 4757 {
edcc5120
TT
4758 struct regcache *regcache;
4759
00431a78 4760 regcache = get_thread_regcache (ecs->event_thread);
edcc5120
TT
4761
4762 handle_solib_event ();
4763
4764 ecs->event_thread->control.stop_bpstat
a01bda52 4765 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
4766 ecs->event_thread->suspend.stop_pc,
4767 ecs->event_thread, &ecs->ws);
ab04a2af 4768
c65d6b55
PA
4769 if (handle_stop_requested (ecs))
4770 return;
4771
ce12b012 4772 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
edcc5120
TT
4773 {
4774 /* A catchpoint triggered. */
94c57d6a
PA
4775 process_event_stop_test (ecs);
4776 return;
edcc5120 4777 }
488f131b 4778
b0f4b84b
DJ
4779 /* If requested, stop when the dynamic linker notifies
4780 gdb of events. This allows the user to get control
4781 and place breakpoints in initializer routines for
4782 dynamically loaded objects (among other things). */
a493e3e2 4783 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
b0f4b84b
DJ
4784 if (stop_on_solib_events)
4785 {
55409f9d
DJ
4786 /* Make sure we print "Stopped due to solib-event" in
4787 normal_stop. */
4788 stop_print_frame = 1;
4789
22bcd14b 4790 stop_waiting (ecs);
b0f4b84b
DJ
4791 return;
4792 }
488f131b 4793 }
b0f4b84b
DJ
4794
4795 /* If we are skipping through a shell, or through shared library
4796 loading that we aren't interested in, resume the program. If
5c09a2c5 4797 we're running the program normally, also resume. */
b0f4b84b
DJ
4798 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
4799 {
74960c60
VP
4800 /* Loading of shared libraries might have changed breakpoint
4801 addresses. Make sure new breakpoints are inserted. */
a25a5a45 4802 if (stop_soon == NO_STOP_QUIETLY)
74960c60 4803 insert_breakpoints ();
64ce06e4 4804 resume (GDB_SIGNAL_0);
b0f4b84b
DJ
4805 prepare_to_wait (ecs);
4806 return;
4807 }
4808
5c09a2c5
PA
4809 /* But stop if we're attaching or setting up a remote
4810 connection. */
4811 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4812 || stop_soon == STOP_QUIETLY_REMOTE)
4813 {
4814 if (debug_infrun)
4815 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
22bcd14b 4816 stop_waiting (ecs);
5c09a2c5
PA
4817 return;
4818 }
4819
4820 internal_error (__FILE__, __LINE__,
4821 _("unhandled stop_soon: %d"), (int) stop_soon);
c5aa993b 4822
488f131b 4823 case TARGET_WAITKIND_SPURIOUS:
c65d6b55
PA
4824 if (handle_stop_requested (ecs))
4825 return;
00431a78 4826 context_switch (ecs);
64ce06e4 4827 resume (GDB_SIGNAL_0);
488f131b
JB
4828 prepare_to_wait (ecs);
4829 return;
c5aa993b 4830
65706a29 4831 case TARGET_WAITKIND_THREAD_CREATED:
c65d6b55
PA
4832 if (handle_stop_requested (ecs))
4833 return;
00431a78 4834 context_switch (ecs);
65706a29
PA
4835 if (!switch_back_to_stepped_thread (ecs))
4836 keep_going (ecs);
4837 return;
4838
488f131b 4839 case TARGET_WAITKIND_EXITED:
940c3c06 4840 case TARGET_WAITKIND_SIGNALLED:
fb66883a 4841 inferior_ptid = ecs->ptid;
c9657e70 4842 set_current_inferior (find_inferior_ptid (ecs->ptid));
6c95b8df
PA
4843 set_current_program_space (current_inferior ()->pspace);
4844 handle_vfork_child_exec_or_exit (0);
223ffa71 4845 target_terminal::ours (); /* Must do this before mourn anyway. */
488f131b 4846
0c557179
SDJ
4847 /* Clearing any previous state of convenience variables. */
4848 clear_exit_convenience_vars ();
4849
940c3c06
PA
4850 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
4851 {
4852 /* Record the exit code in the convenience variable $_exitcode, so
4853 that the user can inspect this again later. */
4854 set_internalvar_integer (lookup_internalvar ("_exitcode"),
4855 (LONGEST) ecs->ws.value.integer);
4856
4857 /* Also record this in the inferior itself. */
4858 current_inferior ()->has_exit_code = 1;
4859 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
8cf64490 4860
98eb56a4
PA
4861 /* Support the --return-child-result option. */
4862 return_child_result_value = ecs->ws.value.integer;
4863
76727919 4864 gdb::observers::exited.notify (ecs->ws.value.integer);
940c3c06
PA
4865 }
4866 else
0c557179 4867 {
00431a78 4868 struct gdbarch *gdbarch = current_inferior ()->gdbarch;
0c557179
SDJ
4869
4870 if (gdbarch_gdb_signal_to_target_p (gdbarch))
4871 {
4872 /* Set the value of the internal variable $_exitsignal,
4873 which holds the signal uncaught by the inferior. */
4874 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
4875 gdbarch_gdb_signal_to_target (gdbarch,
4876 ecs->ws.value.sig));
4877 }
4878 else
4879 {
4880 /* We don't have access to the target's method used for
4881 converting between signal numbers (GDB's internal
4882 representation <-> target's representation).
4883 Therefore, we cannot do a good job at displaying this
4884 information to the user. It's better to just warn
4885 her about it (if infrun debugging is enabled), and
4886 give up. */
4887 if (debug_infrun)
4888 fprintf_filtered (gdb_stdlog, _("\
4889Cannot fill $_exitsignal with the correct signal number.\n"));
4890 }
4891
76727919 4892 gdb::observers::signal_exited.notify (ecs->ws.value.sig);
0c557179 4893 }
8cf64490 4894
488f131b 4895 gdb_flush (gdb_stdout);
bc1e6c81 4896 target_mourn_inferior (inferior_ptid);
488f131b 4897 stop_print_frame = 0;
22bcd14b 4898 stop_waiting (ecs);
488f131b 4899 return;
c5aa993b 4900
488f131b 4901 /* The following are the only cases in which we keep going;
1777feb0 4902 the above cases end in a continue or goto. */
488f131b 4903 case TARGET_WAITKIND_FORKED:
deb3b17b 4904 case TARGET_WAITKIND_VFORKED:
e2d96639
YQ
4905 /* Check whether the inferior is displaced stepping. */
4906 {
00431a78 4907 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
ac7936df 4908 struct gdbarch *gdbarch = regcache->arch ();
e2d96639
YQ
4909
4910 /* If checking displaced stepping is supported, and thread
4911 ecs->ptid is displaced stepping. */
00431a78 4912 if (displaced_step_in_progress_thread (ecs->event_thread))
e2d96639
YQ
4913 {
4914 struct inferior *parent_inf
c9657e70 4915 = find_inferior_ptid (ecs->ptid);
e2d96639
YQ
4916 struct regcache *child_regcache;
4917 CORE_ADDR parent_pc;
4918
4919 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
4920 indicating that the displaced stepping of syscall instruction
4921 has been done. Perform cleanup for parent process here. Note
4922 that this operation also cleans up the child process for vfork,
4923 because their pages are shared. */
00431a78 4924 displaced_step_fixup (ecs->event_thread, GDB_SIGNAL_TRAP);
c2829269
PA
4925 /* Start a new step-over in another thread if there's one
4926 that needs it. */
4927 start_step_over ();
e2d96639
YQ
4928
4929 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
4930 {
c0987663 4931 struct displaced_step_inferior_state *displaced
00431a78 4932 = get_displaced_stepping_state (parent_inf);
c0987663 4933
e2d96639
YQ
4934 /* Restore scratch pad for child process. */
4935 displaced_step_restore (displaced, ecs->ws.value.related_pid);
4936 }
4937
4938 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
4939 the child's PC is also within the scratchpad. Set the child's PC
4940 to the parent's PC value, which has already been fixed up.
4941 FIXME: we use the parent's aspace here, although we're touching
4942 the child, because the child hasn't been added to the inferior
4943 list yet at this point. */
4944
4945 child_regcache
4946 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
4947 gdbarch,
4948 parent_inf->aspace);
4949 /* Read PC value of parent process. */
4950 parent_pc = regcache_read_pc (regcache);
4951
4952 if (debug_displaced)
4953 fprintf_unfiltered (gdb_stdlog,
4954 "displaced: write child pc from %s to %s\n",
4955 paddress (gdbarch,
4956 regcache_read_pc (child_regcache)),
4957 paddress (gdbarch, parent_pc));
4958
4959 regcache_write_pc (child_regcache, parent_pc);
4960 }
4961 }
4962
00431a78 4963 context_switch (ecs);
5a2901d9 4964
b242c3c2
PA
4965 /* Immediately detach breakpoints from the child before there's
4966 any chance of letting the user delete breakpoints from the
4967 breakpoint lists. If we don't do this early, it's easy to
4968 leave left over traps in the child, vis: "break foo; catch
4969 fork; c; <fork>; del; c; <child calls foo>". We only follow
4970 the fork on the last `continue', and by that time the
4971 breakpoint at "foo" is long gone from the breakpoint table.
4972 If we vforked, then we don't need to unpatch here, since both
4973 parent and child are sharing the same memory pages; we'll
4974 need to unpatch at follow/detach time instead to be certain
4975 that new breakpoints added between catchpoint hit time and
4976 vfork follow are detached. */
4977 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
4978 {
b242c3c2
PA
4979 /* This won't actually modify the breakpoint list, but will
4980 physically remove the breakpoints from the child. */
d80ee84f 4981 detach_breakpoints (ecs->ws.value.related_pid);
b242c3c2
PA
4982 }
4983
34b7e8a6 4984 delete_just_stopped_threads_single_step_breakpoints ();
d03285ec 4985
e58b0e63
PA
4986 /* In case the event is caught by a catchpoint, remember that
4987 the event is to be followed at the next resume of the thread,
4988 and not immediately. */
4989 ecs->event_thread->pending_follow = ecs->ws;
4990
f2ffa92b
PA
4991 ecs->event_thread->suspend.stop_pc
4992 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
675bf4cb 4993
16c381f0 4994 ecs->event_thread->control.stop_bpstat
a01bda52 4995 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
4996 ecs->event_thread->suspend.stop_pc,
4997 ecs->event_thread, &ecs->ws);
675bf4cb 4998
c65d6b55
PA
4999 if (handle_stop_requested (ecs))
5000 return;
5001
ce12b012
PA
5002 /* If no catchpoint triggered for this, then keep going. Note
5003 that we're interested in knowing the bpstat actually causes a
5004 stop, not just if it may explain the signal. Software
5005 watchpoints, for example, always appear in the bpstat. */
5006 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5007 {
e58b0e63 5008 int should_resume;
3e43a32a
MS
5009 int follow_child
5010 = (follow_fork_mode_string == follow_fork_mode_child);
e58b0e63 5011
a493e3e2 5012 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
e58b0e63
PA
5013
5014 should_resume = follow_fork ();
5015
00431a78
PA
5016 thread_info *parent = ecs->event_thread;
5017 thread_info *child = find_thread_ptid (ecs->ws.value.related_pid);
6c95b8df 5018
a2077e25
PA
5019 /* At this point, the parent is marked running, and the
5020 child is marked stopped. */
5021
5022 /* If not resuming the parent, mark it stopped. */
5023 if (follow_child && !detach_fork && !non_stop && !sched_multi)
00431a78 5024 parent->set_running (false);
a2077e25
PA
5025
5026 /* If resuming the child, mark it running. */
5027 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
00431a78 5028 child->set_running (true);
a2077e25 5029
6c95b8df 5030 /* In non-stop mode, also resume the other branch. */
fbea99ea
PA
5031 if (!detach_fork && (non_stop
5032 || (sched_multi && target_is_non_stop_p ())))
6c95b8df
PA
5033 {
5034 if (follow_child)
5035 switch_to_thread (parent);
5036 else
5037 switch_to_thread (child);
5038
5039 ecs->event_thread = inferior_thread ();
5040 ecs->ptid = inferior_ptid;
5041 keep_going (ecs);
5042 }
5043
5044 if (follow_child)
5045 switch_to_thread (child);
5046 else
5047 switch_to_thread (parent);
5048
e58b0e63
PA
5049 ecs->event_thread = inferior_thread ();
5050 ecs->ptid = inferior_ptid;
5051
5052 if (should_resume)
5053 keep_going (ecs);
5054 else
22bcd14b 5055 stop_waiting (ecs);
04e68871
DJ
5056 return;
5057 }
94c57d6a
PA
5058 process_event_stop_test (ecs);
5059 return;
488f131b 5060
6c95b8df
PA
5061 case TARGET_WAITKIND_VFORK_DONE:
5062 /* Done with the shared memory region. Re-insert breakpoints in
5063 the parent, and keep going. */
5064
00431a78 5065 context_switch (ecs);
6c95b8df
PA
5066
5067 current_inferior ()->waiting_for_vfork_done = 0;
56710373 5068 current_inferior ()->pspace->breakpoints_not_allowed = 0;
c65d6b55
PA
5069
5070 if (handle_stop_requested (ecs))
5071 return;
5072
6c95b8df
PA
5073 /* This also takes care of reinserting breakpoints in the
5074 previously locked inferior. */
5075 keep_going (ecs);
5076 return;
5077
488f131b 5078 case TARGET_WAITKIND_EXECD:
488f131b 5079
cbd2b4e3
PA
5080 /* Note we can't read registers yet (the stop_pc), because we
5081 don't yet know the inferior's post-exec architecture.
5082 'stop_pc' is explicitly read below instead. */
00431a78 5083 switch_to_thread_no_regs (ecs->event_thread);
5a2901d9 5084
6c95b8df
PA
5085 /* Do whatever is necessary to the parent branch of the vfork. */
5086 handle_vfork_child_exec_or_exit (1);
5087
795e548f
PA
5088 /* This causes the eventpoints and symbol table to be reset.
5089 Must do this now, before trying to determine whether to
5090 stop. */
71b43ef8 5091 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
795e548f 5092
17d8546e
DB
5093 /* In follow_exec we may have deleted the original thread and
5094 created a new one. Make sure that the event thread is the
5095 execd thread for that case (this is a nop otherwise). */
5096 ecs->event_thread = inferior_thread ();
5097
f2ffa92b
PA
5098 ecs->event_thread->suspend.stop_pc
5099 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
ecdc3a72 5100
16c381f0 5101 ecs->event_thread->control.stop_bpstat
a01bda52 5102 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5103 ecs->event_thread->suspend.stop_pc,
5104 ecs->event_thread, &ecs->ws);
795e548f 5105
71b43ef8
PA
5106 /* Note that this may be referenced from inside
5107 bpstat_stop_status above, through inferior_has_execd. */
5108 xfree (ecs->ws.value.execd_pathname);
5109 ecs->ws.value.execd_pathname = NULL;
5110
c65d6b55
PA
5111 if (handle_stop_requested (ecs))
5112 return;
5113
04e68871 5114 /* If no catchpoint triggered for this, then keep going. */
ce12b012 5115 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5116 {
a493e3e2 5117 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
04e68871
DJ
5118 keep_going (ecs);
5119 return;
5120 }
94c57d6a
PA
5121 process_event_stop_test (ecs);
5122 return;
488f131b 5123
b4dc5ffa
MK
5124 /* Be careful not to try to gather much state about a thread
5125 that's in a syscall. It's frequently a losing proposition. */
488f131b 5126 case TARGET_WAITKIND_SYSCALL_ENTRY:
1777feb0 5127 /* Getting the current syscall number. */
94c57d6a
PA
5128 if (handle_syscall_event (ecs) == 0)
5129 process_event_stop_test (ecs);
5130 return;
c906108c 5131
488f131b
JB
5132 /* Before examining the threads further, step this thread to
5133 get it entirely out of the syscall. (We get notice of the
5134 event when the thread is just on the verge of exiting a
5135 syscall. Stepping one instruction seems to get it back
b4dc5ffa 5136 into user code.) */
488f131b 5137 case TARGET_WAITKIND_SYSCALL_RETURN:
94c57d6a
PA
5138 if (handle_syscall_event (ecs) == 0)
5139 process_event_stop_test (ecs);
5140 return;
c906108c 5141
488f131b 5142 case TARGET_WAITKIND_STOPPED:
4f5d7f63
PA
5143 handle_signal_stop (ecs);
5144 return;
c906108c 5145
b2175913
MS
5146 case TARGET_WAITKIND_NO_HISTORY:
5147 /* Reverse execution: target ran out of history info. */
eab402df 5148
d1988021 5149 /* Switch to the stopped thread. */
00431a78 5150 context_switch (ecs);
d1988021
MM
5151 if (debug_infrun)
5152 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
5153
34b7e8a6 5154 delete_just_stopped_threads_single_step_breakpoints ();
f2ffa92b
PA
5155 ecs->event_thread->suspend.stop_pc
5156 = regcache_read_pc (get_thread_regcache (inferior_thread ()));
c65d6b55
PA
5157
5158 if (handle_stop_requested (ecs))
5159 return;
5160
76727919 5161 gdb::observers::no_history.notify ();
22bcd14b 5162 stop_waiting (ecs);
b2175913 5163 return;
488f131b 5164 }
4f5d7f63
PA
5165}
5166
372316f1
PA
5167/* Restart threads back to what they were trying to do back when we
5168 paused them for an in-line step-over. The EVENT_THREAD thread is
5169 ignored. */
4d9d9d04
PA
5170
5171static void
372316f1
PA
5172restart_threads (struct thread_info *event_thread)
5173{
372316f1
PA
5174 /* In case the instruction just stepped spawned a new thread. */
5175 update_thread_list ();
5176
08036331 5177 for (thread_info *tp : all_non_exited_threads ())
372316f1
PA
5178 {
5179 if (tp == event_thread)
5180 {
5181 if (debug_infrun)
5182 fprintf_unfiltered (gdb_stdlog,
5183 "infrun: restart threads: "
5184 "[%s] is event thread\n",
a068643d 5185 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5186 continue;
5187 }
5188
5189 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5190 {
5191 if (debug_infrun)
5192 fprintf_unfiltered (gdb_stdlog,
5193 "infrun: restart threads: "
5194 "[%s] not meant to be running\n",
a068643d 5195 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5196 continue;
5197 }
5198
5199 if (tp->resumed)
5200 {
5201 if (debug_infrun)
5202 fprintf_unfiltered (gdb_stdlog,
5203 "infrun: restart threads: [%s] resumed\n",
a068643d 5204 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5205 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
5206 continue;
5207 }
5208
5209 if (thread_is_in_step_over_chain (tp))
5210 {
5211 if (debug_infrun)
5212 fprintf_unfiltered (gdb_stdlog,
5213 "infrun: restart threads: "
5214 "[%s] needs step-over\n",
a068643d 5215 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5216 gdb_assert (!tp->resumed);
5217 continue;
5218 }
5219
5220
5221 if (tp->suspend.waitstatus_pending_p)
5222 {
5223 if (debug_infrun)
5224 fprintf_unfiltered (gdb_stdlog,
5225 "infrun: restart threads: "
5226 "[%s] has pending status\n",
a068643d 5227 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5228 tp->resumed = 1;
5229 continue;
5230 }
5231
c65d6b55
PA
5232 gdb_assert (!tp->stop_requested);
5233
372316f1
PA
5234 /* If some thread needs to start a step-over at this point, it
5235 should still be in the step-over queue, and thus skipped
5236 above. */
5237 if (thread_still_needs_step_over (tp))
5238 {
5239 internal_error (__FILE__, __LINE__,
5240 "thread [%s] needs a step-over, but not in "
5241 "step-over queue\n",
a068643d 5242 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5243 }
5244
5245 if (currently_stepping (tp))
5246 {
5247 if (debug_infrun)
5248 fprintf_unfiltered (gdb_stdlog,
5249 "infrun: restart threads: [%s] was stepping\n",
a068643d 5250 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5251 keep_going_stepped_thread (tp);
5252 }
5253 else
5254 {
5255 struct execution_control_state ecss;
5256 struct execution_control_state *ecs = &ecss;
5257
5258 if (debug_infrun)
5259 fprintf_unfiltered (gdb_stdlog,
5260 "infrun: restart threads: [%s] continuing\n",
a068643d 5261 target_pid_to_str (tp->ptid).c_str ());
372316f1 5262 reset_ecs (ecs, tp);
00431a78 5263 switch_to_thread (tp);
372316f1
PA
5264 keep_going_pass_signal (ecs);
5265 }
5266 }
5267}
5268
5269/* Callback for iterate_over_threads. Find a resumed thread that has
5270 a pending waitstatus. */
5271
5272static int
5273resumed_thread_with_pending_status (struct thread_info *tp,
5274 void *arg)
5275{
5276 return (tp->resumed
5277 && tp->suspend.waitstatus_pending_p);
5278}
5279
5280/* Called when we get an event that may finish an in-line or
5281 out-of-line (displaced stepping) step-over started previously.
5282 Return true if the event is processed and we should go back to the
5283 event loop; false if the caller should continue processing the
5284 event. */
5285
5286static int
4d9d9d04
PA
5287finish_step_over (struct execution_control_state *ecs)
5288{
372316f1
PA
5289 int had_step_over_info;
5290
00431a78 5291 displaced_step_fixup (ecs->event_thread,
4d9d9d04
PA
5292 ecs->event_thread->suspend.stop_signal);
5293
372316f1
PA
5294 had_step_over_info = step_over_info_valid_p ();
5295
5296 if (had_step_over_info)
4d9d9d04
PA
5297 {
5298 /* If we're stepping over a breakpoint with all threads locked,
5299 then only the thread that was stepped should be reporting
5300 back an event. */
5301 gdb_assert (ecs->event_thread->control.trap_expected);
5302
c65d6b55 5303 clear_step_over_info ();
4d9d9d04
PA
5304 }
5305
fbea99ea 5306 if (!target_is_non_stop_p ())
372316f1 5307 return 0;
4d9d9d04
PA
5308
5309 /* Start a new step-over in another thread if there's one that
5310 needs it. */
5311 start_step_over ();
372316f1
PA
5312
5313 /* If we were stepping over a breakpoint before, and haven't started
5314 a new in-line step-over sequence, then restart all other threads
5315 (except the event thread). We can't do this in all-stop, as then
5316 e.g., we wouldn't be able to issue any other remote packet until
5317 these other threads stop. */
5318 if (had_step_over_info && !step_over_info_valid_p ())
5319 {
5320 struct thread_info *pending;
5321
5322 /* If we only have threads with pending statuses, the restart
5323 below won't restart any thread and so nothing re-inserts the
5324 breakpoint we just stepped over. But we need it inserted
5325 when we later process the pending events, otherwise if
5326 another thread has a pending event for this breakpoint too,
5327 we'd discard its event (because the breakpoint that
5328 originally caused the event was no longer inserted). */
00431a78 5329 context_switch (ecs);
372316f1
PA
5330 insert_breakpoints ();
5331
5332 restart_threads (ecs->event_thread);
5333
5334 /* If we have events pending, go through handle_inferior_event
5335 again, picking up a pending event at random. This avoids
5336 thread starvation. */
5337
5338 /* But not if we just stepped over a watchpoint in order to let
5339 the instruction execute so we can evaluate its expression.
5340 The set of watchpoints that triggered is recorded in the
5341 breakpoint objects themselves (see bp->watchpoint_triggered).
5342 If we processed another event first, that other event could
5343 clobber this info. */
5344 if (ecs->event_thread->stepping_over_watchpoint)
5345 return 0;
5346
5347 pending = iterate_over_threads (resumed_thread_with_pending_status,
5348 NULL);
5349 if (pending != NULL)
5350 {
5351 struct thread_info *tp = ecs->event_thread;
5352 struct regcache *regcache;
5353
5354 if (debug_infrun)
5355 {
5356 fprintf_unfiltered (gdb_stdlog,
5357 "infrun: found resumed threads with "
5358 "pending events, saving status\n");
5359 }
5360
5361 gdb_assert (pending != tp);
5362
5363 /* Record the event thread's event for later. */
5364 save_waitstatus (tp, &ecs->ws);
5365 /* This was cleared early, by handle_inferior_event. Set it
5366 so this pending event is considered by
5367 do_target_wait. */
5368 tp->resumed = 1;
5369
5370 gdb_assert (!tp->executing);
5371
00431a78 5372 regcache = get_thread_regcache (tp);
372316f1
PA
5373 tp->suspend.stop_pc = regcache_read_pc (regcache);
5374
5375 if (debug_infrun)
5376 {
5377 fprintf_unfiltered (gdb_stdlog,
5378 "infrun: saved stop_pc=%s for %s "
5379 "(currently_stepping=%d)\n",
5380 paddress (target_gdbarch (),
5381 tp->suspend.stop_pc),
a068643d 5382 target_pid_to_str (tp->ptid).c_str (),
372316f1
PA
5383 currently_stepping (tp));
5384 }
5385
5386 /* This in-line step-over finished; clear this so we won't
5387 start a new one. This is what handle_signal_stop would
5388 do, if we returned false. */
5389 tp->stepping_over_breakpoint = 0;
5390
5391 /* Wake up the event loop again. */
5392 mark_async_event_handler (infrun_async_inferior_event_token);
5393
5394 prepare_to_wait (ecs);
5395 return 1;
5396 }
5397 }
5398
5399 return 0;
4d9d9d04
PA
5400}
5401
4f5d7f63
PA
5402/* Come here when the program has stopped with a signal. */
5403
5404static void
5405handle_signal_stop (struct execution_control_state *ecs)
5406{
5407 struct frame_info *frame;
5408 struct gdbarch *gdbarch;
5409 int stopped_by_watchpoint;
5410 enum stop_kind stop_soon;
5411 int random_signal;
c906108c 5412
f0407826
DE
5413 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
5414
c65d6b55
PA
5415 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
5416
f0407826
DE
5417 /* Do we need to clean up the state of a thread that has
5418 completed a displaced single-step? (Doing so usually affects
5419 the PC, so do it here, before we set stop_pc.) */
372316f1
PA
5420 if (finish_step_over (ecs))
5421 return;
f0407826
DE
5422
5423 /* If we either finished a single-step or hit a breakpoint, but
5424 the user wanted this thread to be stopped, pretend we got a
5425 SIG0 (generic unsignaled stop). */
5426 if (ecs->event_thread->stop_requested
5427 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
5428 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
237fc4c9 5429
f2ffa92b
PA
5430 ecs->event_thread->suspend.stop_pc
5431 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
488f131b 5432
527159b7 5433 if (debug_infrun)
237fc4c9 5434 {
00431a78 5435 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
b926417a 5436 struct gdbarch *reg_gdbarch = regcache->arch ();
2989a365 5437 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
7f82dfc7
JK
5438
5439 inferior_ptid = ecs->ptid;
5af949e3
UW
5440
5441 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
b926417a 5442 paddress (reg_gdbarch,
f2ffa92b 5443 ecs->event_thread->suspend.stop_pc));
d92524f1 5444 if (target_stopped_by_watchpoint ())
237fc4c9
PA
5445 {
5446 CORE_ADDR addr;
abbb1732 5447
237fc4c9
PA
5448 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
5449
8b88a78e 5450 if (target_stopped_data_address (current_top_target (), &addr))
237fc4c9 5451 fprintf_unfiltered (gdb_stdlog,
5af949e3 5452 "infrun: stopped data address = %s\n",
b926417a 5453 paddress (reg_gdbarch, addr));
237fc4c9
PA
5454 else
5455 fprintf_unfiltered (gdb_stdlog,
5456 "infrun: (no data address available)\n");
5457 }
5458 }
527159b7 5459
36fa8042
PA
5460 /* This is originated from start_remote(), start_inferior() and
5461 shared libraries hook functions. */
00431a78 5462 stop_soon = get_inferior_stop_soon (ecs);
36fa8042
PA
5463 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
5464 {
00431a78 5465 context_switch (ecs);
36fa8042
PA
5466 if (debug_infrun)
5467 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
5468 stop_print_frame = 1;
22bcd14b 5469 stop_waiting (ecs);
36fa8042
PA
5470 return;
5471 }
5472
36fa8042
PA
5473 /* This originates from attach_command(). We need to overwrite
5474 the stop_signal here, because some kernels don't ignore a
5475 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
5476 See more comments in inferior.h. On the other hand, if we
5477 get a non-SIGSTOP, report it to the user - assume the backend
5478 will handle the SIGSTOP if it should show up later.
5479
5480 Also consider that the attach is complete when we see a
5481 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
5482 target extended-remote report it instead of a SIGSTOP
5483 (e.g. gdbserver). We already rely on SIGTRAP being our
5484 signal, so this is no exception.
5485
5486 Also consider that the attach is complete when we see a
5487 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
5488 the target to stop all threads of the inferior, in case the
5489 low level attach operation doesn't stop them implicitly. If
5490 they weren't stopped implicitly, then the stub will report a
5491 GDB_SIGNAL_0, meaning: stopped for no particular reason
5492 other than GDB's request. */
5493 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5494 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
5495 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5496 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
5497 {
5498 stop_print_frame = 1;
22bcd14b 5499 stop_waiting (ecs);
36fa8042
PA
5500 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5501 return;
5502 }
5503
488f131b 5504 /* See if something interesting happened to the non-current thread. If
b40c7d58 5505 so, then switch to that thread. */
d7e15655 5506 if (ecs->ptid != inferior_ptid)
488f131b 5507 {
527159b7 5508 if (debug_infrun)
8a9de0e4 5509 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
527159b7 5510
00431a78 5511 context_switch (ecs);
c5aa993b 5512
9a4105ab 5513 if (deprecated_context_hook)
00431a78 5514 deprecated_context_hook (ecs->event_thread->global_num);
488f131b 5515 }
c906108c 5516
568d6575
UW
5517 /* At this point, get hold of the now-current thread's frame. */
5518 frame = get_current_frame ();
5519 gdbarch = get_frame_arch (frame);
5520
2adfaa28 5521 /* Pull the single step breakpoints out of the target. */
af48d08f 5522 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
488f131b 5523 {
af48d08f 5524 struct regcache *regcache;
af48d08f 5525 CORE_ADDR pc;
2adfaa28 5526
00431a78 5527 regcache = get_thread_regcache (ecs->event_thread);
8b86c959
YQ
5528 const address_space *aspace = regcache->aspace ();
5529
af48d08f 5530 pc = regcache_read_pc (regcache);
34b7e8a6 5531
af48d08f
PA
5532 /* However, before doing so, if this single-step breakpoint was
5533 actually for another thread, set this thread up for moving
5534 past it. */
5535 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
5536 aspace, pc))
5537 {
5538 if (single_step_breakpoint_inserted_here_p (aspace, pc))
2adfaa28
PA
5539 {
5540 if (debug_infrun)
5541 {
5542 fprintf_unfiltered (gdb_stdlog,
af48d08f 5543 "infrun: [%s] hit another thread's "
34b7e8a6 5544 "single-step breakpoint\n",
a068643d 5545 target_pid_to_str (ecs->ptid).c_str ());
2adfaa28 5546 }
af48d08f
PA
5547 ecs->hit_singlestep_breakpoint = 1;
5548 }
5549 }
5550 else
5551 {
5552 if (debug_infrun)
5553 {
5554 fprintf_unfiltered (gdb_stdlog,
5555 "infrun: [%s] hit its "
5556 "single-step breakpoint\n",
a068643d 5557 target_pid_to_str (ecs->ptid).c_str ());
2adfaa28
PA
5558 }
5559 }
488f131b 5560 }
af48d08f 5561 delete_just_stopped_threads_single_step_breakpoints ();
c906108c 5562
963f9c80
PA
5563 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5564 && ecs->event_thread->control.trap_expected
5565 && ecs->event_thread->stepping_over_watchpoint)
d983da9c
DJ
5566 stopped_by_watchpoint = 0;
5567 else
5568 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
5569
5570 /* If necessary, step over this watchpoint. We'll be back to display
5571 it in a moment. */
5572 if (stopped_by_watchpoint
d92524f1 5573 && (target_have_steppable_watchpoint
568d6575 5574 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
488f131b 5575 {
488f131b
JB
5576 /* At this point, we are stopped at an instruction which has
5577 attempted to write to a piece of memory under control of
5578 a watchpoint. The instruction hasn't actually executed
5579 yet. If we were to evaluate the watchpoint expression
5580 now, we would get the old value, and therefore no change
5581 would seem to have occurred.
5582
5583 In order to make watchpoints work `right', we really need
5584 to complete the memory write, and then evaluate the
d983da9c
DJ
5585 watchpoint expression. We do this by single-stepping the
5586 target.
5587
7f89fd65 5588 It may not be necessary to disable the watchpoint to step over
d983da9c
DJ
5589 it. For example, the PA can (with some kernel cooperation)
5590 single step over a watchpoint without disabling the watchpoint.
5591
5592 It is far more common to need to disable a watchpoint to step
5593 the inferior over it. If we have non-steppable watchpoints,
5594 we must disable the current watchpoint; it's simplest to
963f9c80
PA
5595 disable all watchpoints.
5596
5597 Any breakpoint at PC must also be stepped over -- if there's
5598 one, it will have already triggered before the watchpoint
5599 triggered, and we either already reported it to the user, or
5600 it didn't cause a stop and we called keep_going. In either
5601 case, if there was a breakpoint at PC, we must be trying to
5602 step past it. */
5603 ecs->event_thread->stepping_over_watchpoint = 1;
5604 keep_going (ecs);
488f131b
JB
5605 return;
5606 }
5607
4e1c45ea 5608 ecs->event_thread->stepping_over_breakpoint = 0;
963f9c80 5609 ecs->event_thread->stepping_over_watchpoint = 0;
16c381f0
JK
5610 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
5611 ecs->event_thread->control.stop_step = 0;
488f131b 5612 stop_print_frame = 1;
488f131b 5613 stopped_by_random_signal = 0;
ddfe970e 5614 bpstat stop_chain = NULL;
488f131b 5615
edb3359d
DJ
5616 /* Hide inlined functions starting here, unless we just performed stepi or
5617 nexti. After stepi and nexti, always show the innermost frame (not any
5618 inline function call sites). */
16c381f0 5619 if (ecs->event_thread->control.step_range_end != 1)
0574c78f 5620 {
00431a78
PA
5621 const address_space *aspace
5622 = get_thread_regcache (ecs->event_thread)->aspace ();
0574c78f
GB
5623
5624 /* skip_inline_frames is expensive, so we avoid it if we can
5625 determine that the address is one where functions cannot have
5626 been inlined. This improves performance with inferiors that
5627 load a lot of shared libraries, because the solib event
5628 breakpoint is defined as the address of a function (i.e. not
5629 inline). Note that we have to check the previous PC as well
5630 as the current one to catch cases when we have just
5631 single-stepped off a breakpoint prior to reinstating it.
5632 Note that we're assuming that the code we single-step to is
5633 not inline, but that's not definitive: there's nothing
5634 preventing the event breakpoint function from containing
5635 inlined code, and the single-step ending up there. If the
5636 user had set a breakpoint on that inlined code, the missing
5637 skip_inline_frames call would break things. Fortunately
5638 that's an extremely unlikely scenario. */
f2ffa92b
PA
5639 if (!pc_at_non_inline_function (aspace,
5640 ecs->event_thread->suspend.stop_pc,
5641 &ecs->ws)
a210c238
MR
5642 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5643 && ecs->event_thread->control.trap_expected
5644 && pc_at_non_inline_function (aspace,
5645 ecs->event_thread->prev_pc,
09ac7c10 5646 &ecs->ws)))
1c5a993e 5647 {
f2ffa92b
PA
5648 stop_chain = build_bpstat_chain (aspace,
5649 ecs->event_thread->suspend.stop_pc,
5650 &ecs->ws);
00431a78 5651 skip_inline_frames (ecs->event_thread, stop_chain);
1c5a993e
MR
5652
5653 /* Re-fetch current thread's frame in case that invalidated
5654 the frame cache. */
5655 frame = get_current_frame ();
5656 gdbarch = get_frame_arch (frame);
5657 }
0574c78f 5658 }
edb3359d 5659
a493e3e2 5660 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
16c381f0 5661 && ecs->event_thread->control.trap_expected
568d6575 5662 && gdbarch_single_step_through_delay_p (gdbarch)
4e1c45ea 5663 && currently_stepping (ecs->event_thread))
3352ef37 5664 {
b50d7442 5665 /* We're trying to step off a breakpoint. Turns out that we're
3352ef37 5666 also on an instruction that needs to be stepped multiple
1777feb0 5667 times before it's been fully executing. E.g., architectures
3352ef37
AC
5668 with a delay slot. It needs to be stepped twice, once for
5669 the instruction and once for the delay slot. */
5670 int step_through_delay
568d6575 5671 = gdbarch_single_step_through_delay (gdbarch, frame);
abbb1732 5672
527159b7 5673 if (debug_infrun && step_through_delay)
8a9de0e4 5674 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
16c381f0
JK
5675 if (ecs->event_thread->control.step_range_end == 0
5676 && step_through_delay)
3352ef37
AC
5677 {
5678 /* The user issued a continue when stopped at a breakpoint.
5679 Set up for another trap and get out of here. */
4e1c45ea 5680 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
5681 keep_going (ecs);
5682 return;
5683 }
5684 else if (step_through_delay)
5685 {
5686 /* The user issued a step when stopped at a breakpoint.
5687 Maybe we should stop, maybe we should not - the delay
5688 slot *might* correspond to a line of source. In any
ca67fcb8
VP
5689 case, don't decide that here, just set
5690 ecs->stepping_over_breakpoint, making sure we
5691 single-step again before breakpoints are re-inserted. */
4e1c45ea 5692 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
5693 }
5694 }
5695
ab04a2af
TT
5696 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
5697 handles this event. */
5698 ecs->event_thread->control.stop_bpstat
a01bda52 5699 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5700 ecs->event_thread->suspend.stop_pc,
5701 ecs->event_thread, &ecs->ws, stop_chain);
db82e815 5702
ab04a2af
TT
5703 /* Following in case break condition called a
5704 function. */
5705 stop_print_frame = 1;
73dd234f 5706
ab04a2af
TT
5707 /* This is where we handle "moribund" watchpoints. Unlike
5708 software breakpoints traps, hardware watchpoint traps are
5709 always distinguishable from random traps. If no high-level
5710 watchpoint is associated with the reported stop data address
5711 anymore, then the bpstat does not explain the signal ---
5712 simply make sure to ignore it if `stopped_by_watchpoint' is
5713 set. */
5714
5715 if (debug_infrun
5716 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
47591c29 5717 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
427cd150 5718 GDB_SIGNAL_TRAP)
ab04a2af
TT
5719 && stopped_by_watchpoint)
5720 fprintf_unfiltered (gdb_stdlog,
5721 "infrun: no user watchpoint explains "
5722 "watchpoint SIGTRAP, ignoring\n");
73dd234f 5723
bac7d97b 5724 /* NOTE: cagney/2003-03-29: These checks for a random signal
ab04a2af
TT
5725 at one stage in the past included checks for an inferior
5726 function call's call dummy's return breakpoint. The original
5727 comment, that went with the test, read:
03cebad2 5728
ab04a2af
TT
5729 ``End of a stack dummy. Some systems (e.g. Sony news) give
5730 another signal besides SIGTRAP, so check here as well as
5731 above.''
73dd234f 5732
ab04a2af
TT
5733 If someone ever tries to get call dummys on a
5734 non-executable stack to work (where the target would stop
5735 with something like a SIGSEGV), then those tests might need
5736 to be re-instated. Given, however, that the tests were only
5737 enabled when momentary breakpoints were not being used, I
5738 suspect that it won't be the case.
488f131b 5739
ab04a2af
TT
5740 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
5741 be necessary for call dummies on a non-executable stack on
5742 SPARC. */
488f131b 5743
bac7d97b 5744 /* See if the breakpoints module can explain the signal. */
47591c29
PA
5745 random_signal
5746 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
5747 ecs->event_thread->suspend.stop_signal);
bac7d97b 5748
1cf4d951
PA
5749 /* Maybe this was a trap for a software breakpoint that has since
5750 been removed. */
5751 if (random_signal && target_stopped_by_sw_breakpoint ())
5752 {
f2ffa92b
PA
5753 if (program_breakpoint_here_p (gdbarch,
5754 ecs->event_thread->suspend.stop_pc))
1cf4d951
PA
5755 {
5756 struct regcache *regcache;
5757 int decr_pc;
5758
5759 /* Re-adjust PC to what the program would see if GDB was not
5760 debugging it. */
00431a78 5761 regcache = get_thread_regcache (ecs->event_thread);
527a273a 5762 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
1cf4d951
PA
5763 if (decr_pc != 0)
5764 {
07036511
TT
5765 gdb::optional<scoped_restore_tmpl<int>>
5766 restore_operation_disable;
1cf4d951
PA
5767
5768 if (record_full_is_used ())
07036511
TT
5769 restore_operation_disable.emplace
5770 (record_full_gdb_operation_disable_set ());
1cf4d951 5771
f2ffa92b
PA
5772 regcache_write_pc (regcache,
5773 ecs->event_thread->suspend.stop_pc + decr_pc);
1cf4d951
PA
5774 }
5775 }
5776 else
5777 {
5778 /* A delayed software breakpoint event. Ignore the trap. */
5779 if (debug_infrun)
5780 fprintf_unfiltered (gdb_stdlog,
5781 "infrun: delayed software breakpoint "
5782 "trap, ignoring\n");
5783 random_signal = 0;
5784 }
5785 }
5786
5787 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
5788 has since been removed. */
5789 if (random_signal && target_stopped_by_hw_breakpoint ())
5790 {
5791 /* A delayed hardware breakpoint event. Ignore the trap. */
5792 if (debug_infrun)
5793 fprintf_unfiltered (gdb_stdlog,
5794 "infrun: delayed hardware breakpoint/watchpoint "
5795 "trap, ignoring\n");
5796 random_signal = 0;
5797 }
5798
bac7d97b
PA
5799 /* If not, perhaps stepping/nexting can. */
5800 if (random_signal)
5801 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5802 && currently_stepping (ecs->event_thread));
ab04a2af 5803
2adfaa28
PA
5804 /* Perhaps the thread hit a single-step breakpoint of _another_
5805 thread. Single-step breakpoints are transparent to the
5806 breakpoints module. */
5807 if (random_signal)
5808 random_signal = !ecs->hit_singlestep_breakpoint;
5809
bac7d97b
PA
5810 /* No? Perhaps we got a moribund watchpoint. */
5811 if (random_signal)
5812 random_signal = !stopped_by_watchpoint;
ab04a2af 5813
c65d6b55
PA
5814 /* Always stop if the user explicitly requested this thread to
5815 remain stopped. */
5816 if (ecs->event_thread->stop_requested)
5817 {
5818 random_signal = 1;
5819 if (debug_infrun)
5820 fprintf_unfiltered (gdb_stdlog, "infrun: user-requested stop\n");
5821 }
5822
488f131b
JB
5823 /* For the program's own signals, act according to
5824 the signal handling tables. */
5825
ce12b012 5826 if (random_signal)
488f131b
JB
5827 {
5828 /* Signal not for debugging purposes. */
c9657e70 5829 struct inferior *inf = find_inferior_ptid (ecs->ptid);
c9737c08 5830 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
488f131b 5831
527159b7 5832 if (debug_infrun)
c9737c08
PA
5833 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
5834 gdb_signal_to_symbol_string (stop_signal));
527159b7 5835
488f131b
JB
5836 stopped_by_random_signal = 1;
5837
252fbfc8
PA
5838 /* Always stop on signals if we're either just gaining control
5839 of the program, or the user explicitly requested this thread
5840 to remain stopped. */
d6b48e9c 5841 if (stop_soon != NO_STOP_QUIETLY
252fbfc8 5842 || ecs->event_thread->stop_requested
24291992 5843 || (!inf->detaching
16c381f0 5844 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
488f131b 5845 {
22bcd14b 5846 stop_waiting (ecs);
488f131b
JB
5847 return;
5848 }
b57bacec
PA
5849
5850 /* Notify observers the signal has "handle print" set. Note we
5851 returned early above if stopping; normal_stop handles the
5852 printing in that case. */
5853 if (signal_print[ecs->event_thread->suspend.stop_signal])
5854 {
5855 /* The signal table tells us to print about this signal. */
223ffa71 5856 target_terminal::ours_for_output ();
76727919 5857 gdb::observers::signal_received.notify (ecs->event_thread->suspend.stop_signal);
223ffa71 5858 target_terminal::inferior ();
b57bacec 5859 }
488f131b
JB
5860
5861 /* Clear the signal if it should not be passed. */
16c381f0 5862 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
a493e3e2 5863 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
488f131b 5864
f2ffa92b 5865 if (ecs->event_thread->prev_pc == ecs->event_thread->suspend.stop_pc
16c381f0 5866 && ecs->event_thread->control.trap_expected
8358c15c 5867 && ecs->event_thread->control.step_resume_breakpoint == NULL)
68f53502
AC
5868 {
5869 /* We were just starting a new sequence, attempting to
5870 single-step off of a breakpoint and expecting a SIGTRAP.
237fc4c9 5871 Instead this signal arrives. This signal will take us out
68f53502
AC
5872 of the stepping range so GDB needs to remember to, when
5873 the signal handler returns, resume stepping off that
5874 breakpoint. */
5875 /* To simplify things, "continue" is forced to use the same
5876 code paths as single-step - set a breakpoint at the
5877 signal return address and then, once hit, step off that
5878 breakpoint. */
237fc4c9
PA
5879 if (debug_infrun)
5880 fprintf_unfiltered (gdb_stdlog,
5881 "infrun: signal arrived while stepping over "
5882 "breakpoint\n");
d3169d93 5883
2c03e5be 5884 insert_hp_step_resume_breakpoint_at_frame (frame);
4e1c45ea 5885 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
5886 /* Reset trap_expected to ensure breakpoints are re-inserted. */
5887 ecs->event_thread->control.trap_expected = 0;
d137e6dc
PA
5888
5889 /* If we were nexting/stepping some other thread, switch to
5890 it, so that we don't continue it, losing control. */
5891 if (!switch_back_to_stepped_thread (ecs))
5892 keep_going (ecs);
9d799f85 5893 return;
68f53502 5894 }
9d799f85 5895
e5f8a7cc 5896 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
f2ffa92b
PA
5897 && (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
5898 ecs->event_thread)
e5f8a7cc 5899 || ecs->event_thread->control.step_range_end == 1)
edb3359d 5900 && frame_id_eq (get_stack_frame_id (frame),
16c381f0 5901 ecs->event_thread->control.step_stack_frame_id)
8358c15c 5902 && ecs->event_thread->control.step_resume_breakpoint == NULL)
d303a6c7
AC
5903 {
5904 /* The inferior is about to take a signal that will take it
5905 out of the single step range. Set a breakpoint at the
5906 current PC (which is presumably where the signal handler
5907 will eventually return) and then allow the inferior to
5908 run free.
5909
5910 Note that this is only needed for a signal delivered
5911 while in the single-step range. Nested signals aren't a
5912 problem as they eventually all return. */
237fc4c9
PA
5913 if (debug_infrun)
5914 fprintf_unfiltered (gdb_stdlog,
5915 "infrun: signal may take us out of "
5916 "single-step range\n");
5917
372316f1 5918 clear_step_over_info ();
2c03e5be 5919 insert_hp_step_resume_breakpoint_at_frame (frame);
e5f8a7cc 5920 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
5921 /* Reset trap_expected to ensure breakpoints are re-inserted. */
5922 ecs->event_thread->control.trap_expected = 0;
9d799f85
AC
5923 keep_going (ecs);
5924 return;
d303a6c7 5925 }
9d799f85
AC
5926
5927 /* Note: step_resume_breakpoint may be non-NULL. This occures
5928 when either there's a nested signal, or when there's a
5929 pending signal enabled just as the signal handler returns
5930 (leaving the inferior at the step-resume-breakpoint without
5931 actually executing it). Either way continue until the
5932 breakpoint is really hit. */
c447ac0b
PA
5933
5934 if (!switch_back_to_stepped_thread (ecs))
5935 {
5936 if (debug_infrun)
5937 fprintf_unfiltered (gdb_stdlog,
5938 "infrun: random signal, keep going\n");
5939
5940 keep_going (ecs);
5941 }
5942 return;
488f131b 5943 }
94c57d6a
PA
5944
5945 process_event_stop_test (ecs);
5946}
5947
5948/* Come here when we've got some debug event / signal we can explain
5949 (IOW, not a random signal), and test whether it should cause a
5950 stop, or whether we should resume the inferior (transparently).
5951 E.g., could be a breakpoint whose condition evaluates false; we
5952 could be still stepping within the line; etc. */
5953
5954static void
5955process_event_stop_test (struct execution_control_state *ecs)
5956{
5957 struct symtab_and_line stop_pc_sal;
5958 struct frame_info *frame;
5959 struct gdbarch *gdbarch;
cdaa5b73
PA
5960 CORE_ADDR jmp_buf_pc;
5961 struct bpstat_what what;
94c57d6a 5962
cdaa5b73 5963 /* Handle cases caused by hitting a breakpoint. */
611c83ae 5964
cdaa5b73
PA
5965 frame = get_current_frame ();
5966 gdbarch = get_frame_arch (frame);
fcf3daef 5967
cdaa5b73 5968 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
611c83ae 5969
cdaa5b73
PA
5970 if (what.call_dummy)
5971 {
5972 stop_stack_dummy = what.call_dummy;
5973 }
186c406b 5974
243a9253
PA
5975 /* A few breakpoint types have callbacks associated (e.g.,
5976 bp_jit_event). Run them now. */
5977 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
5978
cdaa5b73
PA
5979 /* If we hit an internal event that triggers symbol changes, the
5980 current frame will be invalidated within bpstat_what (e.g., if we
5981 hit an internal solib event). Re-fetch it. */
5982 frame = get_current_frame ();
5983 gdbarch = get_frame_arch (frame);
e2e4d78b 5984
cdaa5b73
PA
5985 switch (what.main_action)
5986 {
5987 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
5988 /* If we hit the breakpoint at longjmp while stepping, we
5989 install a momentary breakpoint at the target of the
5990 jmp_buf. */
186c406b 5991
cdaa5b73
PA
5992 if (debug_infrun)
5993 fprintf_unfiltered (gdb_stdlog,
5994 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
186c406b 5995
cdaa5b73 5996 ecs->event_thread->stepping_over_breakpoint = 1;
611c83ae 5997
cdaa5b73
PA
5998 if (what.is_longjmp)
5999 {
6000 struct value *arg_value;
6001
6002 /* If we set the longjmp breakpoint via a SystemTap probe,
6003 then use it to extract the arguments. The destination PC
6004 is the third argument to the probe. */
6005 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6006 if (arg_value)
8fa0c4f8
AA
6007 {
6008 jmp_buf_pc = value_as_address (arg_value);
6009 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6010 }
cdaa5b73
PA
6011 else if (!gdbarch_get_longjmp_target_p (gdbarch)
6012 || !gdbarch_get_longjmp_target (gdbarch,
6013 frame, &jmp_buf_pc))
e2e4d78b 6014 {
cdaa5b73
PA
6015 if (debug_infrun)
6016 fprintf_unfiltered (gdb_stdlog,
6017 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
6018 "(!gdbarch_get_longjmp_target)\n");
6019 keep_going (ecs);
6020 return;
e2e4d78b 6021 }
e2e4d78b 6022
cdaa5b73
PA
6023 /* Insert a breakpoint at resume address. */
6024 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
6025 }
6026 else
6027 check_exception_resume (ecs, frame);
6028 keep_going (ecs);
6029 return;
e81a37f7 6030
cdaa5b73
PA
6031 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
6032 {
6033 struct frame_info *init_frame;
e81a37f7 6034
cdaa5b73 6035 /* There are several cases to consider.
c906108c 6036
cdaa5b73
PA
6037 1. The initiating frame no longer exists. In this case we
6038 must stop, because the exception or longjmp has gone too
6039 far.
2c03e5be 6040
cdaa5b73
PA
6041 2. The initiating frame exists, and is the same as the
6042 current frame. We stop, because the exception or longjmp
6043 has been caught.
2c03e5be 6044
cdaa5b73
PA
6045 3. The initiating frame exists and is different from the
6046 current frame. This means the exception or longjmp has
6047 been caught beneath the initiating frame, so keep going.
c906108c 6048
cdaa5b73
PA
6049 4. longjmp breakpoint has been placed just to protect
6050 against stale dummy frames and user is not interested in
6051 stopping around longjmps. */
c5aa993b 6052
cdaa5b73
PA
6053 if (debug_infrun)
6054 fprintf_unfiltered (gdb_stdlog,
6055 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
c5aa993b 6056
cdaa5b73
PA
6057 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
6058 != NULL);
6059 delete_exception_resume_breakpoint (ecs->event_thread);
c5aa993b 6060
cdaa5b73
PA
6061 if (what.is_longjmp)
6062 {
b67a2c6f 6063 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
c5aa993b 6064
cdaa5b73 6065 if (!frame_id_p (ecs->event_thread->initiating_frame))
e5ef252a 6066 {
cdaa5b73
PA
6067 /* Case 4. */
6068 keep_going (ecs);
6069 return;
e5ef252a 6070 }
cdaa5b73 6071 }
c5aa993b 6072
cdaa5b73 6073 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
527159b7 6074
cdaa5b73
PA
6075 if (init_frame)
6076 {
6077 struct frame_id current_id
6078 = get_frame_id (get_current_frame ());
6079 if (frame_id_eq (current_id,
6080 ecs->event_thread->initiating_frame))
6081 {
6082 /* Case 2. Fall through. */
6083 }
6084 else
6085 {
6086 /* Case 3. */
6087 keep_going (ecs);
6088 return;
6089 }
68f53502 6090 }
488f131b 6091
cdaa5b73
PA
6092 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6093 exists. */
6094 delete_step_resume_breakpoint (ecs->event_thread);
e5ef252a 6095
bdc36728 6096 end_stepping_range (ecs);
cdaa5b73
PA
6097 }
6098 return;
e5ef252a 6099
cdaa5b73
PA
6100 case BPSTAT_WHAT_SINGLE:
6101 if (debug_infrun)
6102 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
6103 ecs->event_thread->stepping_over_breakpoint = 1;
6104 /* Still need to check other stuff, at least the case where we
6105 are stepping and step out of the right range. */
6106 break;
e5ef252a 6107
cdaa5b73
PA
6108 case BPSTAT_WHAT_STEP_RESUME:
6109 if (debug_infrun)
6110 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
e5ef252a 6111
cdaa5b73
PA
6112 delete_step_resume_breakpoint (ecs->event_thread);
6113 if (ecs->event_thread->control.proceed_to_finish
6114 && execution_direction == EXEC_REVERSE)
6115 {
6116 struct thread_info *tp = ecs->event_thread;
6117
6118 /* We are finishing a function in reverse, and just hit the
6119 step-resume breakpoint at the start address of the
6120 function, and we're almost there -- just need to back up
6121 by one more single-step, which should take us back to the
6122 function call. */
6123 tp->control.step_range_start = tp->control.step_range_end = 1;
6124 keep_going (ecs);
e5ef252a 6125 return;
cdaa5b73
PA
6126 }
6127 fill_in_stop_func (gdbarch, ecs);
f2ffa92b 6128 if (ecs->event_thread->suspend.stop_pc == ecs->stop_func_start
cdaa5b73
PA
6129 && execution_direction == EXEC_REVERSE)
6130 {
6131 /* We are stepping over a function call in reverse, and just
6132 hit the step-resume breakpoint at the start address of
6133 the function. Go back to single-stepping, which should
6134 take us back to the function call. */
6135 ecs->event_thread->stepping_over_breakpoint = 1;
6136 keep_going (ecs);
6137 return;
6138 }
6139 break;
e5ef252a 6140
cdaa5b73
PA
6141 case BPSTAT_WHAT_STOP_NOISY:
6142 if (debug_infrun)
6143 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
6144 stop_print_frame = 1;
e5ef252a 6145
99619bea
PA
6146 /* Assume the thread stopped for a breapoint. We'll still check
6147 whether a/the breakpoint is there when the thread is next
6148 resumed. */
6149 ecs->event_thread->stepping_over_breakpoint = 1;
e5ef252a 6150
22bcd14b 6151 stop_waiting (ecs);
cdaa5b73 6152 return;
e5ef252a 6153
cdaa5b73
PA
6154 case BPSTAT_WHAT_STOP_SILENT:
6155 if (debug_infrun)
6156 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
6157 stop_print_frame = 0;
e5ef252a 6158
99619bea
PA
6159 /* Assume the thread stopped for a breapoint. We'll still check
6160 whether a/the breakpoint is there when the thread is next
6161 resumed. */
6162 ecs->event_thread->stepping_over_breakpoint = 1;
22bcd14b 6163 stop_waiting (ecs);
cdaa5b73
PA
6164 return;
6165
6166 case BPSTAT_WHAT_HP_STEP_RESUME:
6167 if (debug_infrun)
6168 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
6169
6170 delete_step_resume_breakpoint (ecs->event_thread);
6171 if (ecs->event_thread->step_after_step_resume_breakpoint)
6172 {
6173 /* Back when the step-resume breakpoint was inserted, we
6174 were trying to single-step off a breakpoint. Go back to
6175 doing that. */
6176 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6177 ecs->event_thread->stepping_over_breakpoint = 1;
6178 keep_going (ecs);
6179 return;
e5ef252a 6180 }
cdaa5b73
PA
6181 break;
6182
6183 case BPSTAT_WHAT_KEEP_CHECKING:
6184 break;
e5ef252a 6185 }
c906108c 6186
af48d08f
PA
6187 /* If we stepped a permanent breakpoint and we had a high priority
6188 step-resume breakpoint for the address we stepped, but we didn't
6189 hit it, then we must have stepped into the signal handler. The
6190 step-resume was only necessary to catch the case of _not_
6191 stepping into the handler, so delete it, and fall through to
6192 checking whether the step finished. */
6193 if (ecs->event_thread->stepped_breakpoint)
6194 {
6195 struct breakpoint *sr_bp
6196 = ecs->event_thread->control.step_resume_breakpoint;
6197
8d707a12
PA
6198 if (sr_bp != NULL
6199 && sr_bp->loc->permanent
af48d08f
PA
6200 && sr_bp->type == bp_hp_step_resume
6201 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6202 {
6203 if (debug_infrun)
6204 fprintf_unfiltered (gdb_stdlog,
6205 "infrun: stepped permanent breakpoint, stopped in "
6206 "handler\n");
6207 delete_step_resume_breakpoint (ecs->event_thread);
6208 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6209 }
6210 }
6211
cdaa5b73
PA
6212 /* We come here if we hit a breakpoint but should not stop for it.
6213 Possibly we also were stepping and should stop for that. So fall
6214 through and test for stepping. But, if not stepping, do not
6215 stop. */
c906108c 6216
a7212384
UW
6217 /* In all-stop mode, if we're currently stepping but have stopped in
6218 some other thread, we need to switch back to the stepped thread. */
c447ac0b
PA
6219 if (switch_back_to_stepped_thread (ecs))
6220 return;
776f04fa 6221
8358c15c 6222 if (ecs->event_thread->control.step_resume_breakpoint)
488f131b 6223 {
527159b7 6224 if (debug_infrun)
d3169d93
DJ
6225 fprintf_unfiltered (gdb_stdlog,
6226 "infrun: step-resume breakpoint is inserted\n");
527159b7 6227
488f131b
JB
6228 /* Having a step-resume breakpoint overrides anything
6229 else having to do with stepping commands until
6230 that breakpoint is reached. */
488f131b
JB
6231 keep_going (ecs);
6232 return;
6233 }
c5aa993b 6234
16c381f0 6235 if (ecs->event_thread->control.step_range_end == 0)
488f131b 6236 {
527159b7 6237 if (debug_infrun)
8a9de0e4 6238 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
488f131b 6239 /* Likewise if we aren't even stepping. */
488f131b
JB
6240 keep_going (ecs);
6241 return;
6242 }
c5aa993b 6243
4b7703ad
JB
6244 /* Re-fetch current thread's frame in case the code above caused
6245 the frame cache to be re-initialized, making our FRAME variable
6246 a dangling pointer. */
6247 frame = get_current_frame ();
628fe4e4 6248 gdbarch = get_frame_arch (frame);
7e324e48 6249 fill_in_stop_func (gdbarch, ecs);
4b7703ad 6250
488f131b 6251 /* If stepping through a line, keep going if still within it.
c906108c 6252
488f131b
JB
6253 Note that step_range_end is the address of the first instruction
6254 beyond the step range, and NOT the address of the last instruction
31410e84
MS
6255 within it!
6256
6257 Note also that during reverse execution, we may be stepping
6258 through a function epilogue and therefore must detect when
6259 the current-frame changes in the middle of a line. */
6260
f2ffa92b
PA
6261 if (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6262 ecs->event_thread)
31410e84 6263 && (execution_direction != EXEC_REVERSE
388a8562 6264 || frame_id_eq (get_frame_id (frame),
16c381f0 6265 ecs->event_thread->control.step_frame_id)))
488f131b 6266 {
527159b7 6267 if (debug_infrun)
5af949e3
UW
6268 fprintf_unfiltered
6269 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
16c381f0
JK
6270 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6271 paddress (gdbarch, ecs->event_thread->control.step_range_end));
b2175913 6272
c1e36e3e
PA
6273 /* Tentatively re-enable range stepping; `resume' disables it if
6274 necessary (e.g., if we're stepping over a breakpoint or we
6275 have software watchpoints). */
6276 ecs->event_thread->control.may_range_step = 1;
6277
b2175913
MS
6278 /* When stepping backward, stop at beginning of line range
6279 (unless it's the function entry point, in which case
6280 keep going back to the call point). */
f2ffa92b 6281 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
16c381f0 6282 if (stop_pc == ecs->event_thread->control.step_range_start
b2175913
MS
6283 && stop_pc != ecs->stop_func_start
6284 && execution_direction == EXEC_REVERSE)
bdc36728 6285 end_stepping_range (ecs);
b2175913
MS
6286 else
6287 keep_going (ecs);
6288
488f131b
JB
6289 return;
6290 }
c5aa993b 6291
488f131b 6292 /* We stepped out of the stepping range. */
c906108c 6293
488f131b 6294 /* If we are stepping at the source level and entered the runtime
388a8562
MS
6295 loader dynamic symbol resolution code...
6296
6297 EXEC_FORWARD: we keep on single stepping until we exit the run
6298 time loader code and reach the callee's address.
6299
6300 EXEC_REVERSE: we've already executed the callee (backward), and
6301 the runtime loader code is handled just like any other
6302 undebuggable function call. Now we need only keep stepping
6303 backward through the trampoline code, and that's handled further
6304 down, so there is nothing for us to do here. */
6305
6306 if (execution_direction != EXEC_REVERSE
16c381f0 6307 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
f2ffa92b 6308 && in_solib_dynsym_resolve_code (ecs->event_thread->suspend.stop_pc))
488f131b 6309 {
4c8c40e6 6310 CORE_ADDR pc_after_resolver =
f2ffa92b
PA
6311 gdbarch_skip_solib_resolver (gdbarch,
6312 ecs->event_thread->suspend.stop_pc);
c906108c 6313
527159b7 6314 if (debug_infrun)
3e43a32a
MS
6315 fprintf_unfiltered (gdb_stdlog,
6316 "infrun: stepped into dynsym resolve code\n");
527159b7 6317
488f131b
JB
6318 if (pc_after_resolver)
6319 {
6320 /* Set up a step-resume breakpoint at the address
6321 indicated by SKIP_SOLIB_RESOLVER. */
51abb421 6322 symtab_and_line sr_sal;
488f131b 6323 sr_sal.pc = pc_after_resolver;
6c95b8df 6324 sr_sal.pspace = get_frame_program_space (frame);
488f131b 6325
a6d9a66e
UW
6326 insert_step_resume_breakpoint_at_sal (gdbarch,
6327 sr_sal, null_frame_id);
c5aa993b 6328 }
c906108c 6329
488f131b
JB
6330 keep_going (ecs);
6331 return;
6332 }
c906108c 6333
1d509aa6
MM
6334 /* Step through an indirect branch thunk. */
6335 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
f2ffa92b
PA
6336 && gdbarch_in_indirect_branch_thunk (gdbarch,
6337 ecs->event_thread->suspend.stop_pc))
1d509aa6
MM
6338 {
6339 if (debug_infrun)
6340 fprintf_unfiltered (gdb_stdlog,
6341 "infrun: stepped into indirect branch thunk\n");
6342 keep_going (ecs);
6343 return;
6344 }
6345
16c381f0
JK
6346 if (ecs->event_thread->control.step_range_end != 1
6347 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6348 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
568d6575 6349 && get_frame_type (frame) == SIGTRAMP_FRAME)
488f131b 6350 {
527159b7 6351 if (debug_infrun)
3e43a32a
MS
6352 fprintf_unfiltered (gdb_stdlog,
6353 "infrun: stepped into signal trampoline\n");
42edda50 6354 /* The inferior, while doing a "step" or "next", has ended up in
8fb3e588
AC
6355 a signal trampoline (either by a signal being delivered or by
6356 the signal handler returning). Just single-step until the
6357 inferior leaves the trampoline (either by calling the handler
6358 or returning). */
488f131b
JB
6359 keep_going (ecs);
6360 return;
6361 }
c906108c 6362
14132e89
MR
6363 /* If we're in the return path from a shared library trampoline,
6364 we want to proceed through the trampoline when stepping. */
6365 /* macro/2012-04-25: This needs to come before the subroutine
6366 call check below as on some targets return trampolines look
6367 like subroutine calls (MIPS16 return thunks). */
6368 if (gdbarch_in_solib_return_trampoline (gdbarch,
f2ffa92b
PA
6369 ecs->event_thread->suspend.stop_pc,
6370 ecs->stop_func_name)
14132e89
MR
6371 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
6372 {
6373 /* Determine where this trampoline returns. */
f2ffa92b
PA
6374 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6375 CORE_ADDR real_stop_pc
6376 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
14132e89
MR
6377
6378 if (debug_infrun)
6379 fprintf_unfiltered (gdb_stdlog,
6380 "infrun: stepped into solib return tramp\n");
6381
6382 /* Only proceed through if we know where it's going. */
6383 if (real_stop_pc)
6384 {
6385 /* And put the step-breakpoint there and go until there. */
51abb421 6386 symtab_and_line sr_sal;
14132e89
MR
6387 sr_sal.pc = real_stop_pc;
6388 sr_sal.section = find_pc_overlay (sr_sal.pc);
6389 sr_sal.pspace = get_frame_program_space (frame);
6390
6391 /* Do not specify what the fp should be when we stop since
6392 on some machines the prologue is where the new fp value
6393 is established. */
6394 insert_step_resume_breakpoint_at_sal (gdbarch,
6395 sr_sal, null_frame_id);
6396
6397 /* Restart without fiddling with the step ranges or
6398 other state. */
6399 keep_going (ecs);
6400 return;
6401 }
6402 }
6403
c17eaafe
DJ
6404 /* Check for subroutine calls. The check for the current frame
6405 equalling the step ID is not necessary - the check of the
6406 previous frame's ID is sufficient - but it is a common case and
6407 cheaper than checking the previous frame's ID.
14e60db5
DJ
6408
6409 NOTE: frame_id_eq will never report two invalid frame IDs as
6410 being equal, so to get into this block, both the current and
6411 previous frame must have valid frame IDs. */
005ca36a
JB
6412 /* The outer_frame_id check is a heuristic to detect stepping
6413 through startup code. If we step over an instruction which
6414 sets the stack pointer from an invalid value to a valid value,
6415 we may detect that as a subroutine call from the mythical
6416 "outermost" function. This could be fixed by marking
6417 outermost frames as !stack_p,code_p,special_p. Then the
6418 initial outermost frame, before sp was valid, would
ce6cca6d 6419 have code_addr == &_start. See the comment in frame_id_eq
005ca36a 6420 for more. */
edb3359d 6421 if (!frame_id_eq (get_stack_frame_id (frame),
16c381f0 6422 ecs->event_thread->control.step_stack_frame_id)
005ca36a 6423 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
16c381f0
JK
6424 ecs->event_thread->control.step_stack_frame_id)
6425 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
005ca36a 6426 outer_frame_id)
885eeb5b 6427 || (ecs->event_thread->control.step_start_function
f2ffa92b 6428 != find_pc_function (ecs->event_thread->suspend.stop_pc)))))
488f131b 6429 {
f2ffa92b 6430 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
95918acb 6431 CORE_ADDR real_stop_pc;
8fb3e588 6432
527159b7 6433 if (debug_infrun)
8a9de0e4 6434 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
527159b7 6435
b7a084be 6436 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
95918acb
AC
6437 {
6438 /* I presume that step_over_calls is only 0 when we're
6439 supposed to be stepping at the assembly language level
6440 ("stepi"). Just stop. */
388a8562 6441 /* And this works the same backward as frontward. MVS */
bdc36728 6442 end_stepping_range (ecs);
95918acb
AC
6443 return;
6444 }
8fb3e588 6445
388a8562
MS
6446 /* Reverse stepping through solib trampolines. */
6447
6448 if (execution_direction == EXEC_REVERSE
16c381f0 6449 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
388a8562
MS
6450 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6451 || (ecs->stop_func_start == 0
6452 && in_solib_dynsym_resolve_code (stop_pc))))
6453 {
6454 /* Any solib trampoline code can be handled in reverse
6455 by simply continuing to single-step. We have already
6456 executed the solib function (backwards), and a few
6457 steps will take us back through the trampoline to the
6458 caller. */
6459 keep_going (ecs);
6460 return;
6461 }
6462
16c381f0 6463 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
8567c30f 6464 {
b2175913
MS
6465 /* We're doing a "next".
6466
6467 Normal (forward) execution: set a breakpoint at the
6468 callee's return address (the address at which the caller
6469 will resume).
6470
6471 Reverse (backward) execution. set the step-resume
6472 breakpoint at the start of the function that we just
6473 stepped into (backwards), and continue to there. When we
6130d0b7 6474 get there, we'll need to single-step back to the caller. */
b2175913
MS
6475
6476 if (execution_direction == EXEC_REVERSE)
6477 {
acf9414f
JK
6478 /* If we're already at the start of the function, we've either
6479 just stepped backward into a single instruction function,
6480 or stepped back out of a signal handler to the first instruction
6481 of the function. Just keep going, which will single-step back
6482 to the caller. */
58c48e72 6483 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
acf9414f 6484 {
acf9414f 6485 /* Normal function call return (static or dynamic). */
51abb421 6486 symtab_and_line sr_sal;
acf9414f
JK
6487 sr_sal.pc = ecs->stop_func_start;
6488 sr_sal.pspace = get_frame_program_space (frame);
6489 insert_step_resume_breakpoint_at_sal (gdbarch,
6490 sr_sal, null_frame_id);
6491 }
b2175913
MS
6492 }
6493 else
568d6575 6494 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6495
8567c30f
AC
6496 keep_going (ecs);
6497 return;
6498 }
a53c66de 6499
95918acb 6500 /* If we are in a function call trampoline (a stub between the
8fb3e588
AC
6501 calling routine and the real function), locate the real
6502 function. That's what tells us (a) whether we want to step
6503 into it at all, and (b) what prologue we want to run to the
6504 end of, if we do step into it. */
568d6575 6505 real_stop_pc = skip_language_trampoline (frame, stop_pc);
95918acb 6506 if (real_stop_pc == 0)
568d6575 6507 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
95918acb
AC
6508 if (real_stop_pc != 0)
6509 ecs->stop_func_start = real_stop_pc;
8fb3e588 6510
db5f024e 6511 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
1b2bfbb9 6512 {
51abb421 6513 symtab_and_line sr_sal;
1b2bfbb9 6514 sr_sal.pc = ecs->stop_func_start;
6c95b8df 6515 sr_sal.pspace = get_frame_program_space (frame);
1b2bfbb9 6516
a6d9a66e
UW
6517 insert_step_resume_breakpoint_at_sal (gdbarch,
6518 sr_sal, null_frame_id);
8fb3e588
AC
6519 keep_going (ecs);
6520 return;
1b2bfbb9
RC
6521 }
6522
95918acb 6523 /* If we have line number information for the function we are
1bfeeb0f
JL
6524 thinking of stepping into and the function isn't on the skip
6525 list, step into it.
95918acb 6526
8fb3e588
AC
6527 If there are several symtabs at that PC (e.g. with include
6528 files), just want to know whether *any* of them have line
6529 numbers. find_pc_line handles this. */
95918acb
AC
6530 {
6531 struct symtab_and_line tmp_sal;
8fb3e588 6532
95918acb 6533 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
2b914b52 6534 if (tmp_sal.line != 0
85817405 6535 && !function_name_is_marked_for_skip (ecs->stop_func_name,
de7985c3 6536 tmp_sal))
95918acb 6537 {
b2175913 6538 if (execution_direction == EXEC_REVERSE)
568d6575 6539 handle_step_into_function_backward (gdbarch, ecs);
b2175913 6540 else
568d6575 6541 handle_step_into_function (gdbarch, ecs);
95918acb
AC
6542 return;
6543 }
6544 }
6545
6546 /* If we have no line number and the step-stop-if-no-debug is
8fb3e588
AC
6547 set, we stop the step so that the user has a chance to switch
6548 in assembly mode. */
16c381f0 6549 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
078130d0 6550 && step_stop_if_no_debug)
95918acb 6551 {
bdc36728 6552 end_stepping_range (ecs);
95918acb
AC
6553 return;
6554 }
6555
b2175913
MS
6556 if (execution_direction == EXEC_REVERSE)
6557 {
acf9414f
JK
6558 /* If we're already at the start of the function, we've either just
6559 stepped backward into a single instruction function without line
6560 number info, or stepped back out of a signal handler to the first
6561 instruction of the function without line number info. Just keep
6562 going, which will single-step back to the caller. */
6563 if (ecs->stop_func_start != stop_pc)
6564 {
6565 /* Set a breakpoint at callee's start address.
6566 From there we can step once and be back in the caller. */
51abb421 6567 symtab_and_line sr_sal;
acf9414f
JK
6568 sr_sal.pc = ecs->stop_func_start;
6569 sr_sal.pspace = get_frame_program_space (frame);
6570 insert_step_resume_breakpoint_at_sal (gdbarch,
6571 sr_sal, null_frame_id);
6572 }
b2175913
MS
6573 }
6574 else
6575 /* Set a breakpoint at callee's return address (the address
6576 at which the caller will resume). */
568d6575 6577 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6578
95918acb 6579 keep_going (ecs);
488f131b 6580 return;
488f131b 6581 }
c906108c 6582
fdd654f3
MS
6583 /* Reverse stepping through solib trampolines. */
6584
6585 if (execution_direction == EXEC_REVERSE
16c381f0 6586 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
fdd654f3 6587 {
f2ffa92b
PA
6588 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6589
fdd654f3
MS
6590 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6591 || (ecs->stop_func_start == 0
6592 && in_solib_dynsym_resolve_code (stop_pc)))
6593 {
6594 /* Any solib trampoline code can be handled in reverse
6595 by simply continuing to single-step. We have already
6596 executed the solib function (backwards), and a few
6597 steps will take us back through the trampoline to the
6598 caller. */
6599 keep_going (ecs);
6600 return;
6601 }
6602 else if (in_solib_dynsym_resolve_code (stop_pc))
6603 {
6604 /* Stepped backward into the solib dynsym resolver.
6605 Set a breakpoint at its start and continue, then
6606 one more step will take us out. */
51abb421 6607 symtab_and_line sr_sal;
fdd654f3 6608 sr_sal.pc = ecs->stop_func_start;
9d1807c3 6609 sr_sal.pspace = get_frame_program_space (frame);
fdd654f3
MS
6610 insert_step_resume_breakpoint_at_sal (gdbarch,
6611 sr_sal, null_frame_id);
6612 keep_going (ecs);
6613 return;
6614 }
6615 }
6616
f2ffa92b 6617 stop_pc_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
7ed0fe66 6618
1b2bfbb9
RC
6619 /* NOTE: tausq/2004-05-24: This if block used to be done before all
6620 the trampoline processing logic, however, there are some trampolines
6621 that have no names, so we should do trampoline handling first. */
16c381f0 6622 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7ed0fe66 6623 && ecs->stop_func_name == NULL
2afb61aa 6624 && stop_pc_sal.line == 0)
1b2bfbb9 6625 {
527159b7 6626 if (debug_infrun)
3e43a32a
MS
6627 fprintf_unfiltered (gdb_stdlog,
6628 "infrun: stepped into undebuggable function\n");
527159b7 6629
1b2bfbb9 6630 /* The inferior just stepped into, or returned to, an
7ed0fe66
DJ
6631 undebuggable function (where there is no debugging information
6632 and no line number corresponding to the address where the
1b2bfbb9
RC
6633 inferior stopped). Since we want to skip this kind of code,
6634 we keep going until the inferior returns from this
14e60db5
DJ
6635 function - unless the user has asked us not to (via
6636 set step-mode) or we no longer know how to get back
6637 to the call site. */
6638 if (step_stop_if_no_debug
c7ce8faa 6639 || !frame_id_p (frame_unwind_caller_id (frame)))
1b2bfbb9
RC
6640 {
6641 /* If we have no line number and the step-stop-if-no-debug
6642 is set, we stop the step so that the user has a chance to
6643 switch in assembly mode. */
bdc36728 6644 end_stepping_range (ecs);
1b2bfbb9
RC
6645 return;
6646 }
6647 else
6648 {
6649 /* Set a breakpoint at callee's return address (the address
6650 at which the caller will resume). */
568d6575 6651 insert_step_resume_breakpoint_at_caller (frame);
1b2bfbb9
RC
6652 keep_going (ecs);
6653 return;
6654 }
6655 }
6656
16c381f0 6657 if (ecs->event_thread->control.step_range_end == 1)
1b2bfbb9
RC
6658 {
6659 /* It is stepi or nexti. We always want to stop stepping after
6660 one instruction. */
527159b7 6661 if (debug_infrun)
8a9de0e4 6662 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
bdc36728 6663 end_stepping_range (ecs);
1b2bfbb9
RC
6664 return;
6665 }
6666
2afb61aa 6667 if (stop_pc_sal.line == 0)
488f131b
JB
6668 {
6669 /* We have no line number information. That means to stop
6670 stepping (does this always happen right after one instruction,
6671 when we do "s" in a function with no line numbers,
6672 or can this happen as a result of a return or longjmp?). */
527159b7 6673 if (debug_infrun)
8a9de0e4 6674 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
bdc36728 6675 end_stepping_range (ecs);
488f131b
JB
6676 return;
6677 }
c906108c 6678
edb3359d
DJ
6679 /* Look for "calls" to inlined functions, part one. If the inline
6680 frame machinery detected some skipped call sites, we have entered
6681 a new inline function. */
6682
6683 if (frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 6684 ecs->event_thread->control.step_frame_id)
00431a78 6685 && inline_skipped_frames (ecs->event_thread))
edb3359d 6686 {
edb3359d
DJ
6687 if (debug_infrun)
6688 fprintf_unfiltered (gdb_stdlog,
6689 "infrun: stepped into inlined function\n");
6690
51abb421 6691 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
edb3359d 6692
16c381f0 6693 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
edb3359d
DJ
6694 {
6695 /* For "step", we're going to stop. But if the call site
6696 for this inlined function is on the same source line as
6697 we were previously stepping, go down into the function
6698 first. Otherwise stop at the call site. */
6699
6700 if (call_sal.line == ecs->event_thread->current_line
6701 && call_sal.symtab == ecs->event_thread->current_symtab)
00431a78 6702 step_into_inline_frame (ecs->event_thread);
edb3359d 6703
bdc36728 6704 end_stepping_range (ecs);
edb3359d
DJ
6705 return;
6706 }
6707 else
6708 {
6709 /* For "next", we should stop at the call site if it is on a
6710 different source line. Otherwise continue through the
6711 inlined function. */
6712 if (call_sal.line == ecs->event_thread->current_line
6713 && call_sal.symtab == ecs->event_thread->current_symtab)
6714 keep_going (ecs);
6715 else
bdc36728 6716 end_stepping_range (ecs);
edb3359d
DJ
6717 return;
6718 }
6719 }
6720
6721 /* Look for "calls" to inlined functions, part two. If we are still
6722 in the same real function we were stepping through, but we have
6723 to go further up to find the exact frame ID, we are stepping
6724 through a more inlined call beyond its call site. */
6725
6726 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
6727 && !frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 6728 ecs->event_thread->control.step_frame_id)
edb3359d 6729 && stepped_in_from (get_current_frame (),
16c381f0 6730 ecs->event_thread->control.step_frame_id))
edb3359d
DJ
6731 {
6732 if (debug_infrun)
6733 fprintf_unfiltered (gdb_stdlog,
6734 "infrun: stepping through inlined function\n");
6735
16c381f0 6736 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
edb3359d
DJ
6737 keep_going (ecs);
6738 else
bdc36728 6739 end_stepping_range (ecs);
edb3359d
DJ
6740 return;
6741 }
6742
f2ffa92b 6743 if ((ecs->event_thread->suspend.stop_pc == stop_pc_sal.pc)
4e1c45ea
PA
6744 && (ecs->event_thread->current_line != stop_pc_sal.line
6745 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
488f131b
JB
6746 {
6747 /* We are at the start of a different line. So stop. Note that
6748 we don't stop if we step into the middle of a different line.
6749 That is said to make things like for (;;) statements work
6750 better. */
527159b7 6751 if (debug_infrun)
3e43a32a
MS
6752 fprintf_unfiltered (gdb_stdlog,
6753 "infrun: stepped to a different line\n");
bdc36728 6754 end_stepping_range (ecs);
488f131b
JB
6755 return;
6756 }
c906108c 6757
488f131b 6758 /* We aren't done stepping.
c906108c 6759
488f131b
JB
6760 Optimize by setting the stepping range to the line.
6761 (We might not be in the original line, but if we entered a
6762 new line in mid-statement, we continue stepping. This makes
6763 things like for(;;) statements work better.) */
c906108c 6764
16c381f0
JK
6765 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
6766 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
c1e36e3e 6767 ecs->event_thread->control.may_range_step = 1;
edb3359d 6768 set_step_info (frame, stop_pc_sal);
488f131b 6769
527159b7 6770 if (debug_infrun)
8a9de0e4 6771 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
488f131b 6772 keep_going (ecs);
104c1213
JM
6773}
6774
c447ac0b
PA
6775/* In all-stop mode, if we're currently stepping but have stopped in
6776 some other thread, we may need to switch back to the stepped
6777 thread. Returns true we set the inferior running, false if we left
6778 it stopped (and the event needs further processing). */
6779
6780static int
6781switch_back_to_stepped_thread (struct execution_control_state *ecs)
6782{
fbea99ea 6783 if (!target_is_non_stop_p ())
c447ac0b 6784 {
99619bea
PA
6785 struct thread_info *stepping_thread;
6786
6787 /* If any thread is blocked on some internal breakpoint, and we
6788 simply need to step over that breakpoint to get it going
6789 again, do that first. */
6790
6791 /* However, if we see an event for the stepping thread, then we
6792 know all other threads have been moved past their breakpoints
6793 already. Let the caller check whether the step is finished,
6794 etc., before deciding to move it past a breakpoint. */
6795 if (ecs->event_thread->control.step_range_end != 0)
6796 return 0;
6797
6798 /* Check if the current thread is blocked on an incomplete
6799 step-over, interrupted by a random signal. */
6800 if (ecs->event_thread->control.trap_expected
6801 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
c447ac0b 6802 {
99619bea
PA
6803 if (debug_infrun)
6804 {
6805 fprintf_unfiltered (gdb_stdlog,
6806 "infrun: need to finish step-over of [%s]\n",
a068643d 6807 target_pid_to_str (ecs->event_thread->ptid).c_str ());
99619bea
PA
6808 }
6809 keep_going (ecs);
6810 return 1;
6811 }
2adfaa28 6812
99619bea
PA
6813 /* Check if the current thread is blocked by a single-step
6814 breakpoint of another thread. */
6815 if (ecs->hit_singlestep_breakpoint)
6816 {
6817 if (debug_infrun)
6818 {
6819 fprintf_unfiltered (gdb_stdlog,
6820 "infrun: need to step [%s] over single-step "
6821 "breakpoint\n",
a068643d 6822 target_pid_to_str (ecs->ptid).c_str ());
99619bea
PA
6823 }
6824 keep_going (ecs);
6825 return 1;
6826 }
6827
4d9d9d04
PA
6828 /* If this thread needs yet another step-over (e.g., stepping
6829 through a delay slot), do it first before moving on to
6830 another thread. */
6831 if (thread_still_needs_step_over (ecs->event_thread))
6832 {
6833 if (debug_infrun)
6834 {
6835 fprintf_unfiltered (gdb_stdlog,
6836 "infrun: thread [%s] still needs step-over\n",
a068643d 6837 target_pid_to_str (ecs->event_thread->ptid).c_str ());
4d9d9d04
PA
6838 }
6839 keep_going (ecs);
6840 return 1;
6841 }
70509625 6842
483805cf
PA
6843 /* If scheduler locking applies even if not stepping, there's no
6844 need to walk over threads. Above we've checked whether the
6845 current thread is stepping. If some other thread not the
6846 event thread is stepping, then it must be that scheduler
6847 locking is not in effect. */
856e7dd6 6848 if (schedlock_applies (ecs->event_thread))
483805cf
PA
6849 return 0;
6850
4d9d9d04
PA
6851 /* Otherwise, we no longer expect a trap in the current thread.
6852 Clear the trap_expected flag before switching back -- this is
6853 what keep_going does as well, if we call it. */
6854 ecs->event_thread->control.trap_expected = 0;
6855
6856 /* Likewise, clear the signal if it should not be passed. */
6857 if (!signal_program[ecs->event_thread->suspend.stop_signal])
6858 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6859
6860 /* Do all pending step-overs before actually proceeding with
483805cf 6861 step/next/etc. */
4d9d9d04
PA
6862 if (start_step_over ())
6863 {
6864 prepare_to_wait (ecs);
6865 return 1;
6866 }
6867
6868 /* Look for the stepping/nexting thread. */
483805cf 6869 stepping_thread = NULL;
4d9d9d04 6870
08036331 6871 for (thread_info *tp : all_non_exited_threads ())
483805cf 6872 {
fbea99ea
PA
6873 /* Ignore threads of processes the caller is not
6874 resuming. */
483805cf 6875 if (!sched_multi
e99b03dc 6876 && tp->ptid.pid () != ecs->ptid.pid ())
483805cf
PA
6877 continue;
6878
6879 /* When stepping over a breakpoint, we lock all threads
6880 except the one that needs to move past the breakpoint.
6881 If a non-event thread has this set, the "incomplete
6882 step-over" check above should have caught it earlier. */
372316f1
PA
6883 if (tp->control.trap_expected)
6884 {
6885 internal_error (__FILE__, __LINE__,
6886 "[%s] has inconsistent state: "
6887 "trap_expected=%d\n",
a068643d 6888 target_pid_to_str (tp->ptid).c_str (),
372316f1
PA
6889 tp->control.trap_expected);
6890 }
483805cf
PA
6891
6892 /* Did we find the stepping thread? */
6893 if (tp->control.step_range_end)
6894 {
6895 /* Yep. There should only one though. */
6896 gdb_assert (stepping_thread == NULL);
6897
6898 /* The event thread is handled at the top, before we
6899 enter this loop. */
6900 gdb_assert (tp != ecs->event_thread);
6901
6902 /* If some thread other than the event thread is
6903 stepping, then scheduler locking can't be in effect,
6904 otherwise we wouldn't have resumed the current event
6905 thread in the first place. */
856e7dd6 6906 gdb_assert (!schedlock_applies (tp));
483805cf
PA
6907
6908 stepping_thread = tp;
6909 }
99619bea
PA
6910 }
6911
483805cf 6912 if (stepping_thread != NULL)
99619bea 6913 {
c447ac0b
PA
6914 if (debug_infrun)
6915 fprintf_unfiltered (gdb_stdlog,
6916 "infrun: switching back to stepped thread\n");
6917
2ac7589c
PA
6918 if (keep_going_stepped_thread (stepping_thread))
6919 {
6920 prepare_to_wait (ecs);
6921 return 1;
6922 }
6923 }
6924 }
2adfaa28 6925
2ac7589c
PA
6926 return 0;
6927}
2adfaa28 6928
2ac7589c
PA
6929/* Set a previously stepped thread back to stepping. Returns true on
6930 success, false if the resume is not possible (e.g., the thread
6931 vanished). */
6932
6933static int
6934keep_going_stepped_thread (struct thread_info *tp)
6935{
6936 struct frame_info *frame;
2ac7589c
PA
6937 struct execution_control_state ecss;
6938 struct execution_control_state *ecs = &ecss;
2adfaa28 6939
2ac7589c
PA
6940 /* If the stepping thread exited, then don't try to switch back and
6941 resume it, which could fail in several different ways depending
6942 on the target. Instead, just keep going.
2adfaa28 6943
2ac7589c
PA
6944 We can find a stepping dead thread in the thread list in two
6945 cases:
2adfaa28 6946
2ac7589c
PA
6947 - The target supports thread exit events, and when the target
6948 tries to delete the thread from the thread list, inferior_ptid
6949 pointed at the exiting thread. In such case, calling
6950 delete_thread does not really remove the thread from the list;
6951 instead, the thread is left listed, with 'exited' state.
64ce06e4 6952
2ac7589c
PA
6953 - The target's debug interface does not support thread exit
6954 events, and so we have no idea whatsoever if the previously
6955 stepping thread is still alive. For that reason, we need to
6956 synchronously query the target now. */
2adfaa28 6957
00431a78 6958 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
2ac7589c
PA
6959 {
6960 if (debug_infrun)
6961 fprintf_unfiltered (gdb_stdlog,
6962 "infrun: not resuming previously "
6963 "stepped thread, it has vanished\n");
6964
00431a78 6965 delete_thread (tp);
2ac7589c 6966 return 0;
c447ac0b 6967 }
2ac7589c
PA
6968
6969 if (debug_infrun)
6970 fprintf_unfiltered (gdb_stdlog,
6971 "infrun: resuming previously stepped thread\n");
6972
6973 reset_ecs (ecs, tp);
00431a78 6974 switch_to_thread (tp);
2ac7589c 6975
f2ffa92b 6976 tp->suspend.stop_pc = regcache_read_pc (get_thread_regcache (tp));
2ac7589c 6977 frame = get_current_frame ();
2ac7589c
PA
6978
6979 /* If the PC of the thread we were trying to single-step has
6980 changed, then that thread has trapped or been signaled, but the
6981 event has not been reported to GDB yet. Re-poll the target
6982 looking for this particular thread's event (i.e. temporarily
6983 enable schedlock) by:
6984
6985 - setting a break at the current PC
6986 - resuming that particular thread, only (by setting trap
6987 expected)
6988
6989 This prevents us continuously moving the single-step breakpoint
6990 forward, one instruction at a time, overstepping. */
6991
f2ffa92b 6992 if (tp->suspend.stop_pc != tp->prev_pc)
2ac7589c
PA
6993 {
6994 ptid_t resume_ptid;
6995
6996 if (debug_infrun)
6997 fprintf_unfiltered (gdb_stdlog,
6998 "infrun: expected thread advanced also (%s -> %s)\n",
6999 paddress (target_gdbarch (), tp->prev_pc),
f2ffa92b 7000 paddress (target_gdbarch (), tp->suspend.stop_pc));
2ac7589c
PA
7001
7002 /* Clear the info of the previous step-over, as it's no longer
7003 valid (if the thread was trying to step over a breakpoint, it
7004 has already succeeded). It's what keep_going would do too,
7005 if we called it. Do this before trying to insert the sss
7006 breakpoint, otherwise if we were previously trying to step
7007 over this exact address in another thread, the breakpoint is
7008 skipped. */
7009 clear_step_over_info ();
7010 tp->control.trap_expected = 0;
7011
7012 insert_single_step_breakpoint (get_frame_arch (frame),
7013 get_frame_address_space (frame),
f2ffa92b 7014 tp->suspend.stop_pc);
2ac7589c 7015
372316f1 7016 tp->resumed = 1;
fbea99ea 7017 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
2ac7589c
PA
7018 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
7019 }
7020 else
7021 {
7022 if (debug_infrun)
7023 fprintf_unfiltered (gdb_stdlog,
7024 "infrun: expected thread still hasn't advanced\n");
7025
7026 keep_going_pass_signal (ecs);
7027 }
7028 return 1;
c447ac0b
PA
7029}
7030
8b061563
PA
7031/* Is thread TP in the middle of (software or hardware)
7032 single-stepping? (Note the result of this function must never be
7033 passed directly as target_resume's STEP parameter.) */
104c1213 7034
a289b8f6 7035static int
b3444185 7036currently_stepping (struct thread_info *tp)
a7212384 7037{
8358c15c
JK
7038 return ((tp->control.step_range_end
7039 && tp->control.step_resume_breakpoint == NULL)
7040 || tp->control.trap_expected
af48d08f 7041 || tp->stepped_breakpoint
8358c15c 7042 || bpstat_should_step ());
a7212384
UW
7043}
7044
b2175913
MS
7045/* Inferior has stepped into a subroutine call with source code that
7046 we should not step over. Do step to the first line of code in
7047 it. */
c2c6d25f
JM
7048
7049static void
568d6575
UW
7050handle_step_into_function (struct gdbarch *gdbarch,
7051 struct execution_control_state *ecs)
c2c6d25f 7052{
7e324e48
GB
7053 fill_in_stop_func (gdbarch, ecs);
7054
f2ffa92b
PA
7055 compunit_symtab *cust
7056 = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7057 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7058 ecs->stop_func_start
7059 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
c2c6d25f 7060
51abb421 7061 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
c2c6d25f
JM
7062 /* Use the step_resume_break to step until the end of the prologue,
7063 even if that involves jumps (as it seems to on the vax under
7064 4.2). */
7065 /* If the prologue ends in the middle of a source line, continue to
7066 the end of that source line (if it is still within the function).
7067 Otherwise, just go to end of prologue. */
2afb61aa
PA
7068 if (stop_func_sal.end
7069 && stop_func_sal.pc != ecs->stop_func_start
7070 && stop_func_sal.end < ecs->stop_func_end)
7071 ecs->stop_func_start = stop_func_sal.end;
c2c6d25f 7072
2dbd5e30
KB
7073 /* Architectures which require breakpoint adjustment might not be able
7074 to place a breakpoint at the computed address. If so, the test
7075 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7076 ecs->stop_func_start to an address at which a breakpoint may be
7077 legitimately placed.
8fb3e588 7078
2dbd5e30
KB
7079 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7080 made, GDB will enter an infinite loop when stepping through
7081 optimized code consisting of VLIW instructions which contain
7082 subinstructions corresponding to different source lines. On
7083 FR-V, it's not permitted to place a breakpoint on any but the
7084 first subinstruction of a VLIW instruction. When a breakpoint is
7085 set, GDB will adjust the breakpoint address to the beginning of
7086 the VLIW instruction. Thus, we need to make the corresponding
7087 adjustment here when computing the stop address. */
8fb3e588 7088
568d6575 7089 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
2dbd5e30
KB
7090 {
7091 ecs->stop_func_start
568d6575 7092 = gdbarch_adjust_breakpoint_address (gdbarch,
8fb3e588 7093 ecs->stop_func_start);
2dbd5e30
KB
7094 }
7095
f2ffa92b 7096 if (ecs->stop_func_start == ecs->event_thread->suspend.stop_pc)
c2c6d25f
JM
7097 {
7098 /* We are already there: stop now. */
bdc36728 7099 end_stepping_range (ecs);
c2c6d25f
JM
7100 return;
7101 }
7102 else
7103 {
7104 /* Put the step-breakpoint there and go until there. */
51abb421 7105 symtab_and_line sr_sal;
c2c6d25f
JM
7106 sr_sal.pc = ecs->stop_func_start;
7107 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
6c95b8df 7108 sr_sal.pspace = get_frame_program_space (get_current_frame ());
44cbf7b5 7109
c2c6d25f 7110 /* Do not specify what the fp should be when we stop since on
488f131b
JB
7111 some machines the prologue is where the new fp value is
7112 established. */
a6d9a66e 7113 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
c2c6d25f
JM
7114
7115 /* And make sure stepping stops right away then. */
16c381f0
JK
7116 ecs->event_thread->control.step_range_end
7117 = ecs->event_thread->control.step_range_start;
c2c6d25f
JM
7118 }
7119 keep_going (ecs);
7120}
d4f3574e 7121
b2175913
MS
7122/* Inferior has stepped backward into a subroutine call with source
7123 code that we should not step over. Do step to the beginning of the
7124 last line of code in it. */
7125
7126static void
568d6575
UW
7127handle_step_into_function_backward (struct gdbarch *gdbarch,
7128 struct execution_control_state *ecs)
b2175913 7129{
43f3e411 7130 struct compunit_symtab *cust;
167e4384 7131 struct symtab_and_line stop_func_sal;
b2175913 7132
7e324e48
GB
7133 fill_in_stop_func (gdbarch, ecs);
7134
f2ffa92b 7135 cust = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7136 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7137 ecs->stop_func_start
7138 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
b2175913 7139
f2ffa92b 7140 stop_func_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
b2175913
MS
7141
7142 /* OK, we're just going to keep stepping here. */
f2ffa92b 7143 if (stop_func_sal.pc == ecs->event_thread->suspend.stop_pc)
b2175913
MS
7144 {
7145 /* We're there already. Just stop stepping now. */
bdc36728 7146 end_stepping_range (ecs);
b2175913
MS
7147 }
7148 else
7149 {
7150 /* Else just reset the step range and keep going.
7151 No step-resume breakpoint, they don't work for
7152 epilogues, which can have multiple entry paths. */
16c381f0
JK
7153 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7154 ecs->event_thread->control.step_range_end = stop_func_sal.end;
b2175913
MS
7155 keep_going (ecs);
7156 }
7157 return;
7158}
7159
d3169d93 7160/* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
44cbf7b5
AC
7161 This is used to both functions and to skip over code. */
7162
7163static void
2c03e5be
PA
7164insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7165 struct symtab_and_line sr_sal,
7166 struct frame_id sr_id,
7167 enum bptype sr_type)
44cbf7b5 7168{
611c83ae
PA
7169 /* There should never be more than one step-resume or longjmp-resume
7170 breakpoint per thread, so we should never be setting a new
44cbf7b5 7171 step_resume_breakpoint when one is already active. */
8358c15c 7172 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
2c03e5be 7173 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
d3169d93
DJ
7174
7175 if (debug_infrun)
7176 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
7177 "infrun: inserting step-resume breakpoint at %s\n",
7178 paddress (gdbarch, sr_sal.pc));
d3169d93 7179
8358c15c 7180 inferior_thread ()->control.step_resume_breakpoint
454dafbd 7181 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
2c03e5be
PA
7182}
7183
9da8c2a0 7184void
2c03e5be
PA
7185insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7186 struct symtab_and_line sr_sal,
7187 struct frame_id sr_id)
7188{
7189 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7190 sr_sal, sr_id,
7191 bp_step_resume);
44cbf7b5 7192}
7ce450bd 7193
2c03e5be
PA
7194/* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7195 This is used to skip a potential signal handler.
7ce450bd 7196
14e60db5
DJ
7197 This is called with the interrupted function's frame. The signal
7198 handler, when it returns, will resume the interrupted function at
7199 RETURN_FRAME.pc. */
d303a6c7
AC
7200
7201static void
2c03e5be 7202insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
d303a6c7 7203{
f4c1edd8 7204 gdb_assert (return_frame != NULL);
d303a6c7 7205
51abb421
PA
7206 struct gdbarch *gdbarch = get_frame_arch (return_frame);
7207
7208 symtab_and_line sr_sal;
568d6575 7209 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
d303a6c7 7210 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7211 sr_sal.pspace = get_frame_program_space (return_frame);
d303a6c7 7212
2c03e5be
PA
7213 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7214 get_stack_frame_id (return_frame),
7215 bp_hp_step_resume);
d303a6c7
AC
7216}
7217
2c03e5be
PA
7218/* Insert a "step-resume breakpoint" at the previous frame's PC. This
7219 is used to skip a function after stepping into it (for "next" or if
7220 the called function has no debugging information).
14e60db5
DJ
7221
7222 The current function has almost always been reached by single
7223 stepping a call or return instruction. NEXT_FRAME belongs to the
7224 current function, and the breakpoint will be set at the caller's
7225 resume address.
7226
7227 This is a separate function rather than reusing
2c03e5be 7228 insert_hp_step_resume_breakpoint_at_frame in order to avoid
14e60db5 7229 get_prev_frame, which may stop prematurely (see the implementation
c7ce8faa 7230 of frame_unwind_caller_id for an example). */
14e60db5
DJ
7231
7232static void
7233insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
7234{
14e60db5
DJ
7235 /* We shouldn't have gotten here if we don't know where the call site
7236 is. */
c7ce8faa 7237 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
14e60db5 7238
51abb421 7239 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
14e60db5 7240
51abb421 7241 symtab_and_line sr_sal;
c7ce8faa
DJ
7242 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
7243 frame_unwind_caller_pc (next_frame));
14e60db5 7244 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7245 sr_sal.pspace = frame_unwind_program_space (next_frame);
14e60db5 7246
a6d9a66e 7247 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
c7ce8faa 7248 frame_unwind_caller_id (next_frame));
14e60db5
DJ
7249}
7250
611c83ae
PA
7251/* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7252 new breakpoint at the target of a jmp_buf. The handling of
7253 longjmp-resume uses the same mechanisms used for handling
7254 "step-resume" breakpoints. */
7255
7256static void
a6d9a66e 7257insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
611c83ae 7258{
e81a37f7
TT
7259 /* There should never be more than one longjmp-resume breakpoint per
7260 thread, so we should never be setting a new
611c83ae 7261 longjmp_resume_breakpoint when one is already active. */
e81a37f7 7262 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
611c83ae
PA
7263
7264 if (debug_infrun)
7265 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
7266 "infrun: inserting longjmp-resume breakpoint at %s\n",
7267 paddress (gdbarch, pc));
611c83ae 7268
e81a37f7 7269 inferior_thread ()->control.exception_resume_breakpoint =
454dafbd 7270 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
611c83ae
PA
7271}
7272
186c406b
TT
7273/* Insert an exception resume breakpoint. TP is the thread throwing
7274 the exception. The block B is the block of the unwinder debug hook
7275 function. FRAME is the frame corresponding to the call to this
7276 function. SYM is the symbol of the function argument holding the
7277 target PC of the exception. */
7278
7279static void
7280insert_exception_resume_breakpoint (struct thread_info *tp,
3977b71f 7281 const struct block *b,
186c406b
TT
7282 struct frame_info *frame,
7283 struct symbol *sym)
7284{
a70b8144 7285 try
186c406b 7286 {
63e43d3a 7287 struct block_symbol vsym;
186c406b
TT
7288 struct value *value;
7289 CORE_ADDR handler;
7290 struct breakpoint *bp;
7291
de63c46b
PA
7292 vsym = lookup_symbol_search_name (SYMBOL_SEARCH_NAME (sym),
7293 b, VAR_DOMAIN);
63e43d3a 7294 value = read_var_value (vsym.symbol, vsym.block, frame);
186c406b
TT
7295 /* If the value was optimized out, revert to the old behavior. */
7296 if (! value_optimized_out (value))
7297 {
7298 handler = value_as_address (value);
7299
7300 if (debug_infrun)
7301 fprintf_unfiltered (gdb_stdlog,
7302 "infrun: exception resume at %lx\n",
7303 (unsigned long) handler);
7304
7305 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd
TT
7306 handler,
7307 bp_exception_resume).release ();
c70a6932
JK
7308
7309 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
7310 frame = NULL;
7311
5d5658a1 7312 bp->thread = tp->global_num;
186c406b
TT
7313 inferior_thread ()->control.exception_resume_breakpoint = bp;
7314 }
7315 }
230d2906 7316 catch (const gdb_exception_error &e)
492d29ea
PA
7317 {
7318 /* We want to ignore errors here. */
7319 }
186c406b
TT
7320}
7321
28106bc2
SDJ
7322/* A helper for check_exception_resume that sets an
7323 exception-breakpoint based on a SystemTap probe. */
7324
7325static void
7326insert_exception_resume_from_probe (struct thread_info *tp,
729662a5 7327 const struct bound_probe *probe,
28106bc2
SDJ
7328 struct frame_info *frame)
7329{
7330 struct value *arg_value;
7331 CORE_ADDR handler;
7332 struct breakpoint *bp;
7333
7334 arg_value = probe_safe_evaluate_at_pc (frame, 1);
7335 if (!arg_value)
7336 return;
7337
7338 handler = value_as_address (arg_value);
7339
7340 if (debug_infrun)
7341 fprintf_unfiltered (gdb_stdlog,
7342 "infrun: exception resume at %s\n",
6bac7473 7343 paddress (get_objfile_arch (probe->objfile),
28106bc2
SDJ
7344 handler));
7345
7346 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd 7347 handler, bp_exception_resume).release ();
5d5658a1 7348 bp->thread = tp->global_num;
28106bc2
SDJ
7349 inferior_thread ()->control.exception_resume_breakpoint = bp;
7350}
7351
186c406b
TT
7352/* This is called when an exception has been intercepted. Check to
7353 see whether the exception's destination is of interest, and if so,
7354 set an exception resume breakpoint there. */
7355
7356static void
7357check_exception_resume (struct execution_control_state *ecs,
28106bc2 7358 struct frame_info *frame)
186c406b 7359{
729662a5 7360 struct bound_probe probe;
28106bc2
SDJ
7361 struct symbol *func;
7362
7363 /* First see if this exception unwinding breakpoint was set via a
7364 SystemTap probe point. If so, the probe has two arguments: the
7365 CFA and the HANDLER. We ignore the CFA, extract the handler, and
7366 set a breakpoint there. */
6bac7473 7367 probe = find_probe_by_pc (get_frame_pc (frame));
935676c9 7368 if (probe.prob)
28106bc2 7369 {
729662a5 7370 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
28106bc2
SDJ
7371 return;
7372 }
7373
7374 func = get_frame_function (frame);
7375 if (!func)
7376 return;
186c406b 7377
a70b8144 7378 try
186c406b 7379 {
3977b71f 7380 const struct block *b;
8157b174 7381 struct block_iterator iter;
186c406b
TT
7382 struct symbol *sym;
7383 int argno = 0;
7384
7385 /* The exception breakpoint is a thread-specific breakpoint on
7386 the unwinder's debug hook, declared as:
7387
7388 void _Unwind_DebugHook (void *cfa, void *handler);
7389
7390 The CFA argument indicates the frame to which control is
7391 about to be transferred. HANDLER is the destination PC.
7392
7393 We ignore the CFA and set a temporary breakpoint at HANDLER.
7394 This is not extremely efficient but it avoids issues in gdb
7395 with computing the DWARF CFA, and it also works even in weird
7396 cases such as throwing an exception from inside a signal
7397 handler. */
7398
7399 b = SYMBOL_BLOCK_VALUE (func);
7400 ALL_BLOCK_SYMBOLS (b, iter, sym)
7401 {
7402 if (!SYMBOL_IS_ARGUMENT (sym))
7403 continue;
7404
7405 if (argno == 0)
7406 ++argno;
7407 else
7408 {
7409 insert_exception_resume_breakpoint (ecs->event_thread,
7410 b, frame, sym);
7411 break;
7412 }
7413 }
7414 }
230d2906 7415 catch (const gdb_exception_error &e)
492d29ea
PA
7416 {
7417 }
186c406b
TT
7418}
7419
104c1213 7420static void
22bcd14b 7421stop_waiting (struct execution_control_state *ecs)
104c1213 7422{
527159b7 7423 if (debug_infrun)
22bcd14b 7424 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
527159b7 7425
cd0fc7c3
SS
7426 /* Let callers know we don't want to wait for the inferior anymore. */
7427 ecs->wait_some_more = 0;
fbea99ea
PA
7428
7429 /* If all-stop, but the target is always in non-stop mode, stop all
7430 threads now that we're presenting the stop to the user. */
7431 if (!non_stop && target_is_non_stop_p ())
7432 stop_all_threads ();
cd0fc7c3
SS
7433}
7434
4d9d9d04
PA
7435/* Like keep_going, but passes the signal to the inferior, even if the
7436 signal is set to nopass. */
d4f3574e
SS
7437
7438static void
4d9d9d04 7439keep_going_pass_signal (struct execution_control_state *ecs)
d4f3574e 7440{
d7e15655 7441 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
372316f1 7442 gdb_assert (!ecs->event_thread->resumed);
4d9d9d04 7443
d4f3574e 7444 /* Save the pc before execution, to compare with pc after stop. */
fb14de7b 7445 ecs->event_thread->prev_pc
00431a78 7446 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
d4f3574e 7447
4d9d9d04 7448 if (ecs->event_thread->control.trap_expected)
d4f3574e 7449 {
4d9d9d04
PA
7450 struct thread_info *tp = ecs->event_thread;
7451
7452 if (debug_infrun)
7453 fprintf_unfiltered (gdb_stdlog,
7454 "infrun: %s has trap_expected set, "
7455 "resuming to collect trap\n",
a068643d 7456 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04 7457
a9ba6bae
PA
7458 /* We haven't yet gotten our trap, and either: intercepted a
7459 non-signal event (e.g., a fork); or took a signal which we
7460 are supposed to pass through to the inferior. Simply
7461 continue. */
64ce06e4 7462 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e 7463 }
372316f1
PA
7464 else if (step_over_info_valid_p ())
7465 {
7466 /* Another thread is stepping over a breakpoint in-line. If
7467 this thread needs a step-over too, queue the request. In
7468 either case, this resume must be deferred for later. */
7469 struct thread_info *tp = ecs->event_thread;
7470
7471 if (ecs->hit_singlestep_breakpoint
7472 || thread_still_needs_step_over (tp))
7473 {
7474 if (debug_infrun)
7475 fprintf_unfiltered (gdb_stdlog,
7476 "infrun: step-over already in progress: "
7477 "step-over for %s deferred\n",
a068643d 7478 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
7479 thread_step_over_chain_enqueue (tp);
7480 }
7481 else
7482 {
7483 if (debug_infrun)
7484 fprintf_unfiltered (gdb_stdlog,
7485 "infrun: step-over in progress: "
7486 "resume of %s deferred\n",
a068643d 7487 target_pid_to_str (tp->ptid).c_str ());
372316f1 7488 }
372316f1 7489 }
d4f3574e
SS
7490 else
7491 {
31e77af2 7492 struct regcache *regcache = get_current_regcache ();
963f9c80
PA
7493 int remove_bp;
7494 int remove_wps;
8d297bbf 7495 step_over_what step_what;
31e77af2 7496
d4f3574e 7497 /* Either the trap was not expected, but we are continuing
a9ba6bae
PA
7498 anyway (if we got a signal, the user asked it be passed to
7499 the child)
7500 -- or --
7501 We got our expected trap, but decided we should resume from
7502 it.
d4f3574e 7503
a9ba6bae 7504 We're going to run this baby now!
d4f3574e 7505
c36b740a
VP
7506 Note that insert_breakpoints won't try to re-insert
7507 already inserted breakpoints. Therefore, we don't
7508 care if breakpoints were already inserted, or not. */
a9ba6bae 7509
31e77af2
PA
7510 /* If we need to step over a breakpoint, and we're not using
7511 displaced stepping to do so, insert all breakpoints
7512 (watchpoints, etc.) but the one we're stepping over, step one
7513 instruction, and then re-insert the breakpoint when that step
7514 is finished. */
963f9c80 7515
6c4cfb24
PA
7516 step_what = thread_still_needs_step_over (ecs->event_thread);
7517
963f9c80 7518 remove_bp = (ecs->hit_singlestep_breakpoint
6c4cfb24
PA
7519 || (step_what & STEP_OVER_BREAKPOINT));
7520 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
963f9c80 7521
cb71640d
PA
7522 /* We can't use displaced stepping if we need to step past a
7523 watchpoint. The instruction copied to the scratch pad would
7524 still trigger the watchpoint. */
7525 if (remove_bp
3fc8eb30 7526 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
45e8c884 7527 {
a01bda52 7528 set_step_over_info (regcache->aspace (),
21edc42f
YQ
7529 regcache_read_pc (regcache), remove_wps,
7530 ecs->event_thread->global_num);
45e8c884 7531 }
963f9c80 7532 else if (remove_wps)
21edc42f 7533 set_step_over_info (NULL, 0, remove_wps, -1);
372316f1
PA
7534
7535 /* If we now need to do an in-line step-over, we need to stop
7536 all other threads. Note this must be done before
7537 insert_breakpoints below, because that removes the breakpoint
7538 we're about to step over, otherwise other threads could miss
7539 it. */
fbea99ea 7540 if (step_over_info_valid_p () && target_is_non_stop_p ())
372316f1 7541 stop_all_threads ();
abbb1732 7542
31e77af2 7543 /* Stop stepping if inserting breakpoints fails. */
a70b8144 7544 try
31e77af2
PA
7545 {
7546 insert_breakpoints ();
7547 }
230d2906 7548 catch (const gdb_exception_error &e)
31e77af2
PA
7549 {
7550 exception_print (gdb_stderr, e);
22bcd14b 7551 stop_waiting (ecs);
bdf2a94a 7552 clear_step_over_info ();
31e77af2 7553 return;
d4f3574e
SS
7554 }
7555
963f9c80 7556 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
d4f3574e 7557
64ce06e4 7558 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e
SS
7559 }
7560
488f131b 7561 prepare_to_wait (ecs);
d4f3574e
SS
7562}
7563
4d9d9d04
PA
7564/* Called when we should continue running the inferior, because the
7565 current event doesn't cause a user visible stop. This does the
7566 resuming part; waiting for the next event is done elsewhere. */
7567
7568static void
7569keep_going (struct execution_control_state *ecs)
7570{
7571 if (ecs->event_thread->control.trap_expected
7572 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
7573 ecs->event_thread->control.trap_expected = 0;
7574
7575 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7576 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7577 keep_going_pass_signal (ecs);
7578}
7579
104c1213
JM
7580/* This function normally comes after a resume, before
7581 handle_inferior_event exits. It takes care of any last bits of
7582 housekeeping, and sets the all-important wait_some_more flag. */
cd0fc7c3 7583
104c1213
JM
7584static void
7585prepare_to_wait (struct execution_control_state *ecs)
cd0fc7c3 7586{
527159b7 7587 if (debug_infrun)
8a9de0e4 7588 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
104c1213 7589
104c1213 7590 ecs->wait_some_more = 1;
0b333c5e
PA
7591
7592 if (!target_is_async_p ())
7593 mark_infrun_async_event_handler ();
c906108c 7594}
11cf8741 7595
fd664c91 7596/* We are done with the step range of a step/next/si/ni command.
b57bacec 7597 Called once for each n of a "step n" operation. */
fd664c91
PA
7598
7599static void
bdc36728 7600end_stepping_range (struct execution_control_state *ecs)
fd664c91 7601{
bdc36728 7602 ecs->event_thread->control.stop_step = 1;
bdc36728 7603 stop_waiting (ecs);
fd664c91
PA
7604}
7605
33d62d64
JK
7606/* Several print_*_reason functions to print why the inferior has stopped.
7607 We always print something when the inferior exits, or receives a signal.
7608 The rest of the cases are dealt with later on in normal_stop and
7609 print_it_typical. Ideally there should be a call to one of these
7610 print_*_reason functions functions from handle_inferior_event each time
22bcd14b 7611 stop_waiting is called.
33d62d64 7612
fd664c91
PA
7613 Note that we don't call these directly, instead we delegate that to
7614 the interpreters, through observers. Interpreters then call these
7615 with whatever uiout is right. */
33d62d64 7616
fd664c91
PA
7617void
7618print_end_stepping_range_reason (struct ui_out *uiout)
33d62d64 7619{
fd664c91 7620 /* For CLI-like interpreters, print nothing. */
33d62d64 7621
112e8700 7622 if (uiout->is_mi_like_p ())
fd664c91 7623 {
112e8700 7624 uiout->field_string ("reason",
fd664c91
PA
7625 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
7626 }
7627}
33d62d64 7628
fd664c91
PA
7629void
7630print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
11cf8741 7631{
33d62d64 7632 annotate_signalled ();
112e8700
SM
7633 if (uiout->is_mi_like_p ())
7634 uiout->field_string
7635 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
7636 uiout->text ("\nProgram terminated with signal ");
33d62d64 7637 annotate_signal_name ();
112e8700 7638 uiout->field_string ("signal-name",
2ea28649 7639 gdb_signal_to_name (siggnal));
33d62d64 7640 annotate_signal_name_end ();
112e8700 7641 uiout->text (", ");
33d62d64 7642 annotate_signal_string ();
112e8700 7643 uiout->field_string ("signal-meaning",
2ea28649 7644 gdb_signal_to_string (siggnal));
33d62d64 7645 annotate_signal_string_end ();
112e8700
SM
7646 uiout->text (".\n");
7647 uiout->text ("The program no longer exists.\n");
33d62d64
JK
7648}
7649
fd664c91
PA
7650void
7651print_exited_reason (struct ui_out *uiout, int exitstatus)
33d62d64 7652{
fda326dd 7653 struct inferior *inf = current_inferior ();
a068643d 7654 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
fda326dd 7655
33d62d64
JK
7656 annotate_exited (exitstatus);
7657 if (exitstatus)
7658 {
112e8700
SM
7659 if (uiout->is_mi_like_p ())
7660 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
7661 uiout->text ("[Inferior ");
7662 uiout->text (plongest (inf->num));
7663 uiout->text (" (");
a068643d 7664 uiout->text (pidstr.c_str ());
112e8700
SM
7665 uiout->text (") exited with code ");
7666 uiout->field_fmt ("exit-code", "0%o", (unsigned int) exitstatus);
7667 uiout->text ("]\n");
33d62d64
JK
7668 }
7669 else
11cf8741 7670 {
112e8700
SM
7671 if (uiout->is_mi_like_p ())
7672 uiout->field_string
7673 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
7674 uiout->text ("[Inferior ");
7675 uiout->text (plongest (inf->num));
7676 uiout->text (" (");
a068643d 7677 uiout->text (pidstr.c_str ());
112e8700 7678 uiout->text (") exited normally]\n");
33d62d64 7679 }
33d62d64
JK
7680}
7681
012b3a21
WT
7682/* Some targets/architectures can do extra processing/display of
7683 segmentation faults. E.g., Intel MPX boundary faults.
7684 Call the architecture dependent function to handle the fault. */
7685
7686static void
7687handle_segmentation_fault (struct ui_out *uiout)
7688{
7689 struct regcache *regcache = get_current_regcache ();
ac7936df 7690 struct gdbarch *gdbarch = regcache->arch ();
012b3a21
WT
7691
7692 if (gdbarch_handle_segmentation_fault_p (gdbarch))
7693 gdbarch_handle_segmentation_fault (gdbarch, uiout);
7694}
7695
fd664c91
PA
7696void
7697print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
33d62d64 7698{
f303dbd6
PA
7699 struct thread_info *thr = inferior_thread ();
7700
33d62d64
JK
7701 annotate_signal ();
7702
112e8700 7703 if (uiout->is_mi_like_p ())
f303dbd6
PA
7704 ;
7705 else if (show_thread_that_caused_stop ())
33d62d64 7706 {
f303dbd6 7707 const char *name;
33d62d64 7708
112e8700 7709 uiout->text ("\nThread ");
33eca680 7710 uiout->field_string ("thread-id", print_thread_id (thr));
f303dbd6
PA
7711
7712 name = thr->name != NULL ? thr->name : target_thread_name (thr);
7713 if (name != NULL)
7714 {
112e8700 7715 uiout->text (" \"");
33eca680 7716 uiout->field_string ("name", name);
112e8700 7717 uiout->text ("\"");
f303dbd6 7718 }
33d62d64 7719 }
f303dbd6 7720 else
112e8700 7721 uiout->text ("\nProgram");
f303dbd6 7722
112e8700
SM
7723 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
7724 uiout->text (" stopped");
33d62d64
JK
7725 else
7726 {
112e8700 7727 uiout->text (" received signal ");
8b93c638 7728 annotate_signal_name ();
112e8700
SM
7729 if (uiout->is_mi_like_p ())
7730 uiout->field_string
7731 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
7732 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8b93c638 7733 annotate_signal_name_end ();
112e8700 7734 uiout->text (", ");
8b93c638 7735 annotate_signal_string ();
112e8700 7736 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
012b3a21
WT
7737
7738 if (siggnal == GDB_SIGNAL_SEGV)
7739 handle_segmentation_fault (uiout);
7740
8b93c638 7741 annotate_signal_string_end ();
33d62d64 7742 }
112e8700 7743 uiout->text (".\n");
33d62d64 7744}
252fbfc8 7745
fd664c91
PA
7746void
7747print_no_history_reason (struct ui_out *uiout)
33d62d64 7748{
112e8700 7749 uiout->text ("\nNo more reverse-execution history.\n");
11cf8741 7750}
43ff13b4 7751
0c7e1a46
PA
7752/* Print current location without a level number, if we have changed
7753 functions or hit a breakpoint. Print source line if we have one.
7754 bpstat_print contains the logic deciding in detail what to print,
7755 based on the event(s) that just occurred. */
7756
243a9253
PA
7757static void
7758print_stop_location (struct target_waitstatus *ws)
0c7e1a46
PA
7759{
7760 int bpstat_ret;
f486487f 7761 enum print_what source_flag;
0c7e1a46
PA
7762 int do_frame_printing = 1;
7763 struct thread_info *tp = inferior_thread ();
7764
7765 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
7766 switch (bpstat_ret)
7767 {
7768 case PRINT_UNKNOWN:
7769 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
7770 should) carry around the function and does (or should) use
7771 that when doing a frame comparison. */
7772 if (tp->control.stop_step
7773 && frame_id_eq (tp->control.step_frame_id,
7774 get_frame_id (get_current_frame ()))
f2ffa92b
PA
7775 && (tp->control.step_start_function
7776 == find_pc_function (tp->suspend.stop_pc)))
0c7e1a46
PA
7777 {
7778 /* Finished step, just print source line. */
7779 source_flag = SRC_LINE;
7780 }
7781 else
7782 {
7783 /* Print location and source line. */
7784 source_flag = SRC_AND_LOC;
7785 }
7786 break;
7787 case PRINT_SRC_AND_LOC:
7788 /* Print location and source line. */
7789 source_flag = SRC_AND_LOC;
7790 break;
7791 case PRINT_SRC_ONLY:
7792 source_flag = SRC_LINE;
7793 break;
7794 case PRINT_NOTHING:
7795 /* Something bogus. */
7796 source_flag = SRC_LINE;
7797 do_frame_printing = 0;
7798 break;
7799 default:
7800 internal_error (__FILE__, __LINE__, _("Unknown value."));
7801 }
7802
7803 /* The behavior of this routine with respect to the source
7804 flag is:
7805 SRC_LINE: Print only source line
7806 LOCATION: Print only location
7807 SRC_AND_LOC: Print location and source line. */
7808 if (do_frame_printing)
7809 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
243a9253
PA
7810}
7811
243a9253
PA
7812/* See infrun.h. */
7813
7814void
4c7d57e7 7815print_stop_event (struct ui_out *uiout, bool displays)
243a9253 7816{
243a9253
PA
7817 struct target_waitstatus last;
7818 ptid_t last_ptid;
7819 struct thread_info *tp;
7820
7821 get_last_target_status (&last_ptid, &last);
7822
67ad9399
TT
7823 {
7824 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
0c7e1a46 7825
67ad9399 7826 print_stop_location (&last);
243a9253 7827
67ad9399 7828 /* Display the auto-display expressions. */
4c7d57e7
TT
7829 if (displays)
7830 do_displays ();
67ad9399 7831 }
243a9253
PA
7832
7833 tp = inferior_thread ();
7834 if (tp->thread_fsm != NULL
46e3ed7f 7835 && tp->thread_fsm->finished_p ())
243a9253
PA
7836 {
7837 struct return_value_info *rv;
7838
46e3ed7f 7839 rv = tp->thread_fsm->return_value ();
243a9253
PA
7840 if (rv != NULL)
7841 print_return_value (uiout, rv);
7842 }
0c7e1a46
PA
7843}
7844
388a7084
PA
7845/* See infrun.h. */
7846
7847void
7848maybe_remove_breakpoints (void)
7849{
7850 if (!breakpoints_should_be_inserted_now () && target_has_execution)
7851 {
7852 if (remove_breakpoints ())
7853 {
223ffa71 7854 target_terminal::ours_for_output ();
388a7084
PA
7855 printf_filtered (_("Cannot remove breakpoints because "
7856 "program is no longer writable.\nFurther "
7857 "execution is probably impossible.\n"));
7858 }
7859 }
7860}
7861
4c2f2a79
PA
7862/* The execution context that just caused a normal stop. */
7863
7864struct stop_context
7865{
2d844eaf
TT
7866 stop_context ();
7867 ~stop_context ();
7868
7869 DISABLE_COPY_AND_ASSIGN (stop_context);
7870
7871 bool changed () const;
7872
4c2f2a79
PA
7873 /* The stop ID. */
7874 ULONGEST stop_id;
c906108c 7875
4c2f2a79 7876 /* The event PTID. */
c906108c 7877
4c2f2a79
PA
7878 ptid_t ptid;
7879
7880 /* If stopp for a thread event, this is the thread that caused the
7881 stop. */
7882 struct thread_info *thread;
7883
7884 /* The inferior that caused the stop. */
7885 int inf_num;
7886};
7887
2d844eaf 7888/* Initializes a new stop context. If stopped for a thread event, this
4c2f2a79
PA
7889 takes a strong reference to the thread. */
7890
2d844eaf 7891stop_context::stop_context ()
4c2f2a79 7892{
2d844eaf
TT
7893 stop_id = get_stop_id ();
7894 ptid = inferior_ptid;
7895 inf_num = current_inferior ()->num;
4c2f2a79 7896
d7e15655 7897 if (inferior_ptid != null_ptid)
4c2f2a79
PA
7898 {
7899 /* Take a strong reference so that the thread can't be deleted
7900 yet. */
2d844eaf
TT
7901 thread = inferior_thread ();
7902 thread->incref ();
4c2f2a79
PA
7903 }
7904 else
2d844eaf 7905 thread = NULL;
4c2f2a79
PA
7906}
7907
7908/* Release a stop context previously created with save_stop_context.
7909 Releases the strong reference to the thread as well. */
7910
2d844eaf 7911stop_context::~stop_context ()
4c2f2a79 7912{
2d844eaf
TT
7913 if (thread != NULL)
7914 thread->decref ();
4c2f2a79
PA
7915}
7916
7917/* Return true if the current context no longer matches the saved stop
7918 context. */
7919
2d844eaf
TT
7920bool
7921stop_context::changed () const
7922{
7923 if (ptid != inferior_ptid)
7924 return true;
7925 if (inf_num != current_inferior ()->num)
7926 return true;
7927 if (thread != NULL && thread->state != THREAD_STOPPED)
7928 return true;
7929 if (get_stop_id () != stop_id)
7930 return true;
7931 return false;
4c2f2a79
PA
7932}
7933
7934/* See infrun.h. */
7935
7936int
96baa820 7937normal_stop (void)
c906108c 7938{
73b65bb0
DJ
7939 struct target_waitstatus last;
7940 ptid_t last_ptid;
7941
7942 get_last_target_status (&last_ptid, &last);
7943
4c2f2a79
PA
7944 new_stop_id ();
7945
29f49a6a
PA
7946 /* If an exception is thrown from this point on, make sure to
7947 propagate GDB's knowledge of the executing state to the
7948 frontend/user running state. A QUIT is an easy exception to see
7949 here, so do this before any filtered output. */
731f534f
PA
7950
7951 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
7952
c35b1492 7953 if (!non_stop)
731f534f 7954 maybe_finish_thread_state.emplace (minus_one_ptid);
e1316e60
PA
7955 else if (last.kind == TARGET_WAITKIND_SIGNALLED
7956 || last.kind == TARGET_WAITKIND_EXITED)
7957 {
7958 /* On some targets, we may still have live threads in the
7959 inferior when we get a process exit event. E.g., for
7960 "checkpoint", when the current checkpoint/fork exits,
7961 linux-fork.c automatically switches to another fork from
7962 within target_mourn_inferior. */
731f534f
PA
7963 if (inferior_ptid != null_ptid)
7964 maybe_finish_thread_state.emplace (ptid_t (inferior_ptid.pid ()));
e1316e60
PA
7965 }
7966 else if (last.kind != TARGET_WAITKIND_NO_RESUMED)
731f534f 7967 maybe_finish_thread_state.emplace (inferior_ptid);
29f49a6a 7968
b57bacec
PA
7969 /* As we're presenting a stop, and potentially removing breakpoints,
7970 update the thread list so we can tell whether there are threads
7971 running on the target. With target remote, for example, we can
7972 only learn about new threads when we explicitly update the thread
7973 list. Do this before notifying the interpreters about signal
7974 stops, end of stepping ranges, etc., so that the "new thread"
7975 output is emitted before e.g., "Program received signal FOO",
7976 instead of after. */
7977 update_thread_list ();
7978
7979 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
76727919 7980 gdb::observers::signal_received.notify (inferior_thread ()->suspend.stop_signal);
b57bacec 7981
c906108c
SS
7982 /* As with the notification of thread events, we want to delay
7983 notifying the user that we've switched thread context until
7984 the inferior actually stops.
7985
73b65bb0
DJ
7986 There's no point in saying anything if the inferior has exited.
7987 Note that SIGNALLED here means "exited with a signal", not
b65dc60b
PA
7988 "received a signal".
7989
7990 Also skip saying anything in non-stop mode. In that mode, as we
7991 don't want GDB to switch threads behind the user's back, to avoid
7992 races where the user is typing a command to apply to thread x,
7993 but GDB switches to thread y before the user finishes entering
7994 the command, fetch_inferior_event installs a cleanup to restore
7995 the current thread back to the thread the user had selected right
7996 after this event is handled, so we're not really switching, only
7997 informing of a stop. */
4f8d22e3 7998 if (!non_stop
731f534f 7999 && previous_inferior_ptid != inferior_ptid
73b65bb0
DJ
8000 && target_has_execution
8001 && last.kind != TARGET_WAITKIND_SIGNALLED
0e5bf2a8
PA
8002 && last.kind != TARGET_WAITKIND_EXITED
8003 && last.kind != TARGET_WAITKIND_NO_RESUMED)
c906108c 8004 {
0e454242 8005 SWITCH_THRU_ALL_UIS ()
3b12939d 8006 {
223ffa71 8007 target_terminal::ours_for_output ();
3b12939d 8008 printf_filtered (_("[Switching to %s]\n"),
a068643d 8009 target_pid_to_str (inferior_ptid).c_str ());
3b12939d
PA
8010 annotate_thread_changed ();
8011 }
39f77062 8012 previous_inferior_ptid = inferior_ptid;
c906108c 8013 }
c906108c 8014
0e5bf2a8
PA
8015 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
8016 {
0e454242 8017 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8018 if (current_ui->prompt_state == PROMPT_BLOCKED)
8019 {
223ffa71 8020 target_terminal::ours_for_output ();
3b12939d
PA
8021 printf_filtered (_("No unwaited-for children left.\n"));
8022 }
0e5bf2a8
PA
8023 }
8024
b57bacec 8025 /* Note: this depends on the update_thread_list call above. */
388a7084 8026 maybe_remove_breakpoints ();
c906108c 8027
c906108c
SS
8028 /* If an auto-display called a function and that got a signal,
8029 delete that auto-display to avoid an infinite recursion. */
8030
8031 if (stopped_by_random_signal)
8032 disable_current_display ();
8033
0e454242 8034 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8035 {
8036 async_enable_stdin ();
8037 }
c906108c 8038
388a7084 8039 /* Let the user/frontend see the threads as stopped. */
731f534f 8040 maybe_finish_thread_state.reset ();
388a7084
PA
8041
8042 /* Select innermost stack frame - i.e., current frame is frame 0,
8043 and current location is based on that. Handle the case where the
8044 dummy call is returning after being stopped. E.g. the dummy call
8045 previously hit a breakpoint. (If the dummy call returns
8046 normally, we won't reach here.) Do this before the stop hook is
8047 run, so that it doesn't get to see the temporary dummy frame,
8048 which is not where we'll present the stop. */
8049 if (has_stack_frames ())
8050 {
8051 if (stop_stack_dummy == STOP_STACK_DUMMY)
8052 {
8053 /* Pop the empty frame that contains the stack dummy. This
8054 also restores inferior state prior to the call (struct
8055 infcall_suspend_state). */
8056 struct frame_info *frame = get_current_frame ();
8057
8058 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
8059 frame_pop (frame);
8060 /* frame_pop calls reinit_frame_cache as the last thing it
8061 does which means there's now no selected frame. */
8062 }
8063
8064 select_frame (get_current_frame ());
8065
8066 /* Set the current source location. */
8067 set_current_sal_from_frame (get_current_frame ());
8068 }
dd7e2d2b
PA
8069
8070 /* Look up the hook_stop and run it (CLI internally handles problem
8071 of stop_command's pre-hook not existing). */
4c2f2a79
PA
8072 if (stop_command != NULL)
8073 {
2d844eaf 8074 stop_context saved_context;
4c2f2a79 8075
a70b8144 8076 try
bf469271
PA
8077 {
8078 execute_cmd_pre_hook (stop_command);
8079 }
230d2906 8080 catch (const gdb_exception &ex)
bf469271
PA
8081 {
8082 exception_fprintf (gdb_stderr, ex,
8083 "Error while running hook_stop:\n");
8084 }
4c2f2a79
PA
8085
8086 /* If the stop hook resumes the target, then there's no point in
8087 trying to notify about the previous stop; its context is
8088 gone. Likewise if the command switches thread or inferior --
8089 the observers would print a stop for the wrong
8090 thread/inferior. */
2d844eaf
TT
8091 if (saved_context.changed ())
8092 return 1;
4c2f2a79 8093 }
dd7e2d2b 8094
388a7084
PA
8095 /* Notify observers about the stop. This is where the interpreters
8096 print the stop event. */
d7e15655 8097 if (inferior_ptid != null_ptid)
76727919 8098 gdb::observers::normal_stop.notify (inferior_thread ()->control.stop_bpstat,
388a7084
PA
8099 stop_print_frame);
8100 else
76727919 8101 gdb::observers::normal_stop.notify (NULL, stop_print_frame);
347bddb7 8102
243a9253
PA
8103 annotate_stopped ();
8104
48844aa6
PA
8105 if (target_has_execution)
8106 {
8107 if (last.kind != TARGET_WAITKIND_SIGNALLED
fe726667
PA
8108 && last.kind != TARGET_WAITKIND_EXITED
8109 && last.kind != TARGET_WAITKIND_NO_RESUMED)
48844aa6
PA
8110 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8111 Delete any breakpoint that is to be deleted at the next stop. */
16c381f0 8112 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
94cc34af 8113 }
6c95b8df
PA
8114
8115 /* Try to get rid of automatically added inferiors that are no
8116 longer needed. Keeping those around slows down things linearly.
8117 Note that this never removes the current inferior. */
8118 prune_inferiors ();
4c2f2a79
PA
8119
8120 return 0;
c906108c 8121}
c906108c 8122\f
c5aa993b 8123int
96baa820 8124signal_stop_state (int signo)
c906108c 8125{
d6b48e9c 8126 return signal_stop[signo];
c906108c
SS
8127}
8128
c5aa993b 8129int
96baa820 8130signal_print_state (int signo)
c906108c
SS
8131{
8132 return signal_print[signo];
8133}
8134
c5aa993b 8135int
96baa820 8136signal_pass_state (int signo)
c906108c
SS
8137{
8138 return signal_program[signo];
8139}
8140
2455069d
UW
8141static void
8142signal_cache_update (int signo)
8143{
8144 if (signo == -1)
8145 {
a493e3e2 8146 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
2455069d
UW
8147 signal_cache_update (signo);
8148
8149 return;
8150 }
8151
8152 signal_pass[signo] = (signal_stop[signo] == 0
8153 && signal_print[signo] == 0
ab04a2af
TT
8154 && signal_program[signo] == 1
8155 && signal_catch[signo] == 0);
2455069d
UW
8156}
8157
488f131b 8158int
7bda5e4a 8159signal_stop_update (int signo, int state)
d4f3574e
SS
8160{
8161 int ret = signal_stop[signo];
abbb1732 8162
d4f3574e 8163 signal_stop[signo] = state;
2455069d 8164 signal_cache_update (signo);
d4f3574e
SS
8165 return ret;
8166}
8167
488f131b 8168int
7bda5e4a 8169signal_print_update (int signo, int state)
d4f3574e
SS
8170{
8171 int ret = signal_print[signo];
abbb1732 8172
d4f3574e 8173 signal_print[signo] = state;
2455069d 8174 signal_cache_update (signo);
d4f3574e
SS
8175 return ret;
8176}
8177
488f131b 8178int
7bda5e4a 8179signal_pass_update (int signo, int state)
d4f3574e
SS
8180{
8181 int ret = signal_program[signo];
abbb1732 8182
d4f3574e 8183 signal_program[signo] = state;
2455069d 8184 signal_cache_update (signo);
d4f3574e
SS
8185 return ret;
8186}
8187
ab04a2af
TT
8188/* Update the global 'signal_catch' from INFO and notify the
8189 target. */
8190
8191void
8192signal_catch_update (const unsigned int *info)
8193{
8194 int i;
8195
8196 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8197 signal_catch[i] = info[i] > 0;
8198 signal_cache_update (-1);
adc6a863 8199 target_pass_signals (signal_pass);
ab04a2af
TT
8200}
8201
c906108c 8202static void
96baa820 8203sig_print_header (void)
c906108c 8204{
3e43a32a
MS
8205 printf_filtered (_("Signal Stop\tPrint\tPass "
8206 "to program\tDescription\n"));
c906108c
SS
8207}
8208
8209static void
2ea28649 8210sig_print_info (enum gdb_signal oursig)
c906108c 8211{
2ea28649 8212 const char *name = gdb_signal_to_name (oursig);
c906108c 8213 int name_padding = 13 - strlen (name);
96baa820 8214
c906108c
SS
8215 if (name_padding <= 0)
8216 name_padding = 0;
8217
8218 printf_filtered ("%s", name);
488f131b 8219 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
c906108c
SS
8220 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8221 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
8222 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
2ea28649 8223 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
c906108c
SS
8224}
8225
8226/* Specify how various signals in the inferior should be handled. */
8227
8228static void
0b39b52e 8229handle_command (const char *args, int from_tty)
c906108c 8230{
c906108c 8231 int digits, wordlen;
b926417a 8232 int sigfirst, siglast;
2ea28649 8233 enum gdb_signal oursig;
c906108c 8234 int allsigs;
c906108c
SS
8235
8236 if (args == NULL)
8237 {
e2e0b3e5 8238 error_no_arg (_("signal to handle"));
c906108c
SS
8239 }
8240
1777feb0 8241 /* Allocate and zero an array of flags for which signals to handle. */
c906108c 8242
adc6a863
PA
8243 const size_t nsigs = GDB_SIGNAL_LAST;
8244 unsigned char sigs[nsigs] {};
c906108c 8245
1777feb0 8246 /* Break the command line up into args. */
c906108c 8247
773a1edc 8248 gdb_argv built_argv (args);
c906108c
SS
8249
8250 /* Walk through the args, looking for signal oursigs, signal names, and
8251 actions. Signal numbers and signal names may be interspersed with
8252 actions, with the actions being performed for all signals cumulatively
1777feb0 8253 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
c906108c 8254
773a1edc 8255 for (char *arg : built_argv)
c906108c 8256 {
773a1edc
TT
8257 wordlen = strlen (arg);
8258 for (digits = 0; isdigit (arg[digits]); digits++)
c906108c
SS
8259 {;
8260 }
8261 allsigs = 0;
8262 sigfirst = siglast = -1;
8263
773a1edc 8264 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
c906108c
SS
8265 {
8266 /* Apply action to all signals except those used by the
1777feb0 8267 debugger. Silently skip those. */
c906108c
SS
8268 allsigs = 1;
8269 sigfirst = 0;
8270 siglast = nsigs - 1;
8271 }
773a1edc 8272 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
c906108c
SS
8273 {
8274 SET_SIGS (nsigs, sigs, signal_stop);
8275 SET_SIGS (nsigs, sigs, signal_print);
8276 }
773a1edc 8277 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
c906108c
SS
8278 {
8279 UNSET_SIGS (nsigs, sigs, signal_program);
8280 }
773a1edc 8281 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
c906108c
SS
8282 {
8283 SET_SIGS (nsigs, sigs, signal_print);
8284 }
773a1edc 8285 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
c906108c
SS
8286 {
8287 SET_SIGS (nsigs, sigs, signal_program);
8288 }
773a1edc 8289 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
c906108c
SS
8290 {
8291 UNSET_SIGS (nsigs, sigs, signal_stop);
8292 }
773a1edc 8293 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
c906108c
SS
8294 {
8295 SET_SIGS (nsigs, sigs, signal_program);
8296 }
773a1edc 8297 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
c906108c
SS
8298 {
8299 UNSET_SIGS (nsigs, sigs, signal_print);
8300 UNSET_SIGS (nsigs, sigs, signal_stop);
8301 }
773a1edc 8302 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
c906108c
SS
8303 {
8304 UNSET_SIGS (nsigs, sigs, signal_program);
8305 }
8306 else if (digits > 0)
8307 {
8308 /* It is numeric. The numeric signal refers to our own
8309 internal signal numbering from target.h, not to host/target
8310 signal number. This is a feature; users really should be
8311 using symbolic names anyway, and the common ones like
8312 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8313
8314 sigfirst = siglast = (int)
773a1edc
TT
8315 gdb_signal_from_command (atoi (arg));
8316 if (arg[digits] == '-')
c906108c
SS
8317 {
8318 siglast = (int)
773a1edc 8319 gdb_signal_from_command (atoi (arg + digits + 1));
c906108c
SS
8320 }
8321 if (sigfirst > siglast)
8322 {
1777feb0 8323 /* Bet he didn't figure we'd think of this case... */
b926417a 8324 std::swap (sigfirst, siglast);
c906108c
SS
8325 }
8326 }
8327 else
8328 {
773a1edc 8329 oursig = gdb_signal_from_name (arg);
a493e3e2 8330 if (oursig != GDB_SIGNAL_UNKNOWN)
c906108c
SS
8331 {
8332 sigfirst = siglast = (int) oursig;
8333 }
8334 else
8335 {
8336 /* Not a number and not a recognized flag word => complain. */
773a1edc 8337 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
c906108c
SS
8338 }
8339 }
8340
8341 /* If any signal numbers or symbol names were found, set flags for
1777feb0 8342 which signals to apply actions to. */
c906108c 8343
b926417a 8344 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
c906108c 8345 {
2ea28649 8346 switch ((enum gdb_signal) signum)
c906108c 8347 {
a493e3e2
PA
8348 case GDB_SIGNAL_TRAP:
8349 case GDB_SIGNAL_INT:
c906108c
SS
8350 if (!allsigs && !sigs[signum])
8351 {
9e2f0ad4 8352 if (query (_("%s is used by the debugger.\n\
3e43a32a 8353Are you sure you want to change it? "),
2ea28649 8354 gdb_signal_to_name ((enum gdb_signal) signum)))
c906108c
SS
8355 {
8356 sigs[signum] = 1;
8357 }
8358 else
c119e040 8359 printf_unfiltered (_("Not confirmed, unchanged.\n"));
c906108c
SS
8360 }
8361 break;
a493e3e2
PA
8362 case GDB_SIGNAL_0:
8363 case GDB_SIGNAL_DEFAULT:
8364 case GDB_SIGNAL_UNKNOWN:
c906108c
SS
8365 /* Make sure that "all" doesn't print these. */
8366 break;
8367 default:
8368 sigs[signum] = 1;
8369 break;
8370 }
8371 }
c906108c
SS
8372 }
8373
b926417a 8374 for (int signum = 0; signum < nsigs; signum++)
3a031f65
PA
8375 if (sigs[signum])
8376 {
2455069d 8377 signal_cache_update (-1);
adc6a863
PA
8378 target_pass_signals (signal_pass);
8379 target_program_signals (signal_program);
c906108c 8380
3a031f65
PA
8381 if (from_tty)
8382 {
8383 /* Show the results. */
8384 sig_print_header ();
8385 for (; signum < nsigs; signum++)
8386 if (sigs[signum])
aead7601 8387 sig_print_info ((enum gdb_signal) signum);
3a031f65
PA
8388 }
8389
8390 break;
8391 }
c906108c
SS
8392}
8393
de0bea00
MF
8394/* Complete the "handle" command. */
8395
eb3ff9a5 8396static void
de0bea00 8397handle_completer (struct cmd_list_element *ignore,
eb3ff9a5 8398 completion_tracker &tracker,
6f937416 8399 const char *text, const char *word)
de0bea00 8400{
de0bea00
MF
8401 static const char * const keywords[] =
8402 {
8403 "all",
8404 "stop",
8405 "ignore",
8406 "print",
8407 "pass",
8408 "nostop",
8409 "noignore",
8410 "noprint",
8411 "nopass",
8412 NULL,
8413 };
8414
eb3ff9a5
PA
8415 signal_completer (ignore, tracker, text, word);
8416 complete_on_enum (tracker, keywords, word, word);
de0bea00
MF
8417}
8418
2ea28649
PA
8419enum gdb_signal
8420gdb_signal_from_command (int num)
ed01b82c
PA
8421{
8422 if (num >= 1 && num <= 15)
2ea28649 8423 return (enum gdb_signal) num;
ed01b82c
PA
8424 error (_("Only signals 1-15 are valid as numeric signals.\n\
8425Use \"info signals\" for a list of symbolic signals."));
8426}
8427
c906108c
SS
8428/* Print current contents of the tables set by the handle command.
8429 It is possible we should just be printing signals actually used
8430 by the current target (but for things to work right when switching
8431 targets, all signals should be in the signal tables). */
8432
8433static void
1d12d88f 8434info_signals_command (const char *signum_exp, int from_tty)
c906108c 8435{
2ea28649 8436 enum gdb_signal oursig;
abbb1732 8437
c906108c
SS
8438 sig_print_header ();
8439
8440 if (signum_exp)
8441 {
8442 /* First see if this is a symbol name. */
2ea28649 8443 oursig = gdb_signal_from_name (signum_exp);
a493e3e2 8444 if (oursig == GDB_SIGNAL_UNKNOWN)
c906108c
SS
8445 {
8446 /* No, try numeric. */
8447 oursig =
2ea28649 8448 gdb_signal_from_command (parse_and_eval_long (signum_exp));
c906108c
SS
8449 }
8450 sig_print_info (oursig);
8451 return;
8452 }
8453
8454 printf_filtered ("\n");
8455 /* These ugly casts brought to you by the native VAX compiler. */
a493e3e2
PA
8456 for (oursig = GDB_SIGNAL_FIRST;
8457 (int) oursig < (int) GDB_SIGNAL_LAST;
2ea28649 8458 oursig = (enum gdb_signal) ((int) oursig + 1))
c906108c
SS
8459 {
8460 QUIT;
8461
a493e3e2
PA
8462 if (oursig != GDB_SIGNAL_UNKNOWN
8463 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
c906108c
SS
8464 sig_print_info (oursig);
8465 }
8466
3e43a32a
MS
8467 printf_filtered (_("\nUse the \"handle\" command "
8468 "to change these tables.\n"));
c906108c 8469}
4aa995e1
PA
8470
8471/* The $_siginfo convenience variable is a bit special. We don't know
8472 for sure the type of the value until we actually have a chance to
7a9dd1b2 8473 fetch the data. The type can change depending on gdbarch, so it is
4aa995e1
PA
8474 also dependent on which thread you have selected.
8475
8476 1. making $_siginfo be an internalvar that creates a new value on
8477 access.
8478
8479 2. making the value of $_siginfo be an lval_computed value. */
8480
8481/* This function implements the lval_computed support for reading a
8482 $_siginfo value. */
8483
8484static void
8485siginfo_value_read (struct value *v)
8486{
8487 LONGEST transferred;
8488
a911d87a
PA
8489 /* If we can access registers, so can we access $_siginfo. Likewise
8490 vice versa. */
8491 validate_registers_access ();
c709acd1 8492
4aa995e1 8493 transferred =
8b88a78e 8494 target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO,
4aa995e1
PA
8495 NULL,
8496 value_contents_all_raw (v),
8497 value_offset (v),
8498 TYPE_LENGTH (value_type (v)));
8499
8500 if (transferred != TYPE_LENGTH (value_type (v)))
8501 error (_("Unable to read siginfo"));
8502}
8503
8504/* This function implements the lval_computed support for writing a
8505 $_siginfo value. */
8506
8507static void
8508siginfo_value_write (struct value *v, struct value *fromval)
8509{
8510 LONGEST transferred;
8511
a911d87a
PA
8512 /* If we can access registers, so can we access $_siginfo. Likewise
8513 vice versa. */
8514 validate_registers_access ();
c709acd1 8515
8b88a78e 8516 transferred = target_write (current_top_target (),
4aa995e1
PA
8517 TARGET_OBJECT_SIGNAL_INFO,
8518 NULL,
8519 value_contents_all_raw (fromval),
8520 value_offset (v),
8521 TYPE_LENGTH (value_type (fromval)));
8522
8523 if (transferred != TYPE_LENGTH (value_type (fromval)))
8524 error (_("Unable to write siginfo"));
8525}
8526
c8f2448a 8527static const struct lval_funcs siginfo_value_funcs =
4aa995e1
PA
8528 {
8529 siginfo_value_read,
8530 siginfo_value_write
8531 };
8532
8533/* Return a new value with the correct type for the siginfo object of
78267919
UW
8534 the current thread using architecture GDBARCH. Return a void value
8535 if there's no object available. */
4aa995e1 8536
2c0b251b 8537static struct value *
22d2b532
SDJ
8538siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
8539 void *ignore)
4aa995e1 8540{
4aa995e1 8541 if (target_has_stack
d7e15655 8542 && inferior_ptid != null_ptid
78267919 8543 && gdbarch_get_siginfo_type_p (gdbarch))
4aa995e1 8544 {
78267919 8545 struct type *type = gdbarch_get_siginfo_type (gdbarch);
abbb1732 8546
78267919 8547 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
4aa995e1
PA
8548 }
8549
78267919 8550 return allocate_value (builtin_type (gdbarch)->builtin_void);
4aa995e1
PA
8551}
8552
c906108c 8553\f
16c381f0
JK
8554/* infcall_suspend_state contains state about the program itself like its
8555 registers and any signal it received when it last stopped.
8556 This state must be restored regardless of how the inferior function call
8557 ends (either successfully, or after it hits a breakpoint or signal)
8558 if the program is to properly continue where it left off. */
8559
6bf78e29 8560class infcall_suspend_state
7a292a7a 8561{
6bf78e29
AB
8562public:
8563 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
8564 once the inferior function call has finished. */
8565 infcall_suspend_state (struct gdbarch *gdbarch,
8566 const struct thread_info *tp,
8567 struct regcache *regcache)
8568 : m_thread_suspend (tp->suspend),
8569 m_registers (new readonly_detached_regcache (*regcache))
8570 {
8571 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
8572
8573 if (gdbarch_get_siginfo_type_p (gdbarch))
8574 {
8575 struct type *type = gdbarch_get_siginfo_type (gdbarch);
8576 size_t len = TYPE_LENGTH (type);
8577
8578 siginfo_data.reset ((gdb_byte *) xmalloc (len));
8579
8580 if (target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
8581 siginfo_data.get (), 0, len) != len)
8582 {
8583 /* Errors ignored. */
8584 siginfo_data.reset (nullptr);
8585 }
8586 }
8587
8588 if (siginfo_data)
8589 {
8590 m_siginfo_gdbarch = gdbarch;
8591 m_siginfo_data = std::move (siginfo_data);
8592 }
8593 }
8594
8595 /* Return a pointer to the stored register state. */
16c381f0 8596
6bf78e29
AB
8597 readonly_detached_regcache *registers () const
8598 {
8599 return m_registers.get ();
8600 }
8601
8602 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
8603
8604 void restore (struct gdbarch *gdbarch,
8605 struct thread_info *tp,
8606 struct regcache *regcache) const
8607 {
8608 tp->suspend = m_thread_suspend;
8609
8610 if (m_siginfo_gdbarch == gdbarch)
8611 {
8612 struct type *type = gdbarch_get_siginfo_type (gdbarch);
8613
8614 /* Errors ignored. */
8615 target_write (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
8616 m_siginfo_data.get (), 0, TYPE_LENGTH (type));
8617 }
8618
8619 /* The inferior can be gone if the user types "print exit(0)"
8620 (and perhaps other times). */
8621 if (target_has_execution)
8622 /* NB: The register write goes through to the target. */
8623 regcache->restore (registers ());
8624 }
8625
8626private:
8627 /* How the current thread stopped before the inferior function call was
8628 executed. */
8629 struct thread_suspend_state m_thread_suspend;
8630
8631 /* The registers before the inferior function call was executed. */
8632 std::unique_ptr<readonly_detached_regcache> m_registers;
1736ad11 8633
35515841 8634 /* Format of SIGINFO_DATA or NULL if it is not present. */
6bf78e29 8635 struct gdbarch *m_siginfo_gdbarch = nullptr;
1736ad11
JK
8636
8637 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
8638 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
8639 content would be invalid. */
6bf78e29 8640 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
b89667eb
DE
8641};
8642
cb524840
TT
8643infcall_suspend_state_up
8644save_infcall_suspend_state ()
b89667eb 8645{
b89667eb 8646 struct thread_info *tp = inferior_thread ();
1736ad11 8647 struct regcache *regcache = get_current_regcache ();
ac7936df 8648 struct gdbarch *gdbarch = regcache->arch ();
1736ad11 8649
6bf78e29
AB
8650 infcall_suspend_state_up inf_state
8651 (new struct infcall_suspend_state (gdbarch, tp, regcache));
1736ad11 8652
6bf78e29
AB
8653 /* Having saved the current state, adjust the thread state, discarding
8654 any stop signal information. The stop signal is not useful when
8655 starting an inferior function call, and run_inferior_call will not use
8656 the signal due to its `proceed' call with GDB_SIGNAL_0. */
a493e3e2 8657 tp->suspend.stop_signal = GDB_SIGNAL_0;
35515841 8658
b89667eb
DE
8659 return inf_state;
8660}
8661
8662/* Restore inferior session state to INF_STATE. */
8663
8664void
16c381f0 8665restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb
DE
8666{
8667 struct thread_info *tp = inferior_thread ();
1736ad11 8668 struct regcache *regcache = get_current_regcache ();
ac7936df 8669 struct gdbarch *gdbarch = regcache->arch ();
b89667eb 8670
6bf78e29 8671 inf_state->restore (gdbarch, tp, regcache);
16c381f0 8672 discard_infcall_suspend_state (inf_state);
b89667eb
DE
8673}
8674
b89667eb 8675void
16c381f0 8676discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb 8677{
dd848631 8678 delete inf_state;
b89667eb
DE
8679}
8680
daf6667d 8681readonly_detached_regcache *
16c381f0 8682get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
b89667eb 8683{
6bf78e29 8684 return inf_state->registers ();
b89667eb
DE
8685}
8686
16c381f0
JK
8687/* infcall_control_state contains state regarding gdb's control of the
8688 inferior itself like stepping control. It also contains session state like
8689 the user's currently selected frame. */
b89667eb 8690
16c381f0 8691struct infcall_control_state
b89667eb 8692{
16c381f0
JK
8693 struct thread_control_state thread_control;
8694 struct inferior_control_state inferior_control;
d82142e2
JK
8695
8696 /* Other fields: */
ee841dd8
TT
8697 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
8698 int stopped_by_random_signal = 0;
7a292a7a 8699
b89667eb 8700 /* ID if the selected frame when the inferior function call was made. */
ee841dd8 8701 struct frame_id selected_frame_id {};
7a292a7a
SS
8702};
8703
c906108c 8704/* Save all of the information associated with the inferior<==>gdb
b89667eb 8705 connection. */
c906108c 8706
cb524840
TT
8707infcall_control_state_up
8708save_infcall_control_state ()
c906108c 8709{
cb524840 8710 infcall_control_state_up inf_status (new struct infcall_control_state);
4e1c45ea 8711 struct thread_info *tp = inferior_thread ();
d6b48e9c 8712 struct inferior *inf = current_inferior ();
7a292a7a 8713
16c381f0
JK
8714 inf_status->thread_control = tp->control;
8715 inf_status->inferior_control = inf->control;
d82142e2 8716
8358c15c 8717 tp->control.step_resume_breakpoint = NULL;
5b79abe7 8718 tp->control.exception_resume_breakpoint = NULL;
8358c15c 8719
16c381f0
JK
8720 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
8721 chain. If caller's caller is walking the chain, they'll be happier if we
8722 hand them back the original chain when restore_infcall_control_state is
8723 called. */
8724 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
d82142e2
JK
8725
8726 /* Other fields: */
8727 inf_status->stop_stack_dummy = stop_stack_dummy;
8728 inf_status->stopped_by_random_signal = stopped_by_random_signal;
c5aa993b 8729
206415a3 8730 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
b89667eb 8731
7a292a7a 8732 return inf_status;
c906108c
SS
8733}
8734
bf469271
PA
8735static void
8736restore_selected_frame (const frame_id &fid)
c906108c 8737{
bf469271 8738 frame_info *frame = frame_find_by_id (fid);
c906108c 8739
aa0cd9c1
AC
8740 /* If inf_status->selected_frame_id is NULL, there was no previously
8741 selected frame. */
101dcfbe 8742 if (frame == NULL)
c906108c 8743 {
8a3fe4f8 8744 warning (_("Unable to restore previously selected frame."));
bf469271 8745 return;
c906108c
SS
8746 }
8747
0f7d239c 8748 select_frame (frame);
c906108c
SS
8749}
8750
b89667eb
DE
8751/* Restore inferior session state to INF_STATUS. */
8752
c906108c 8753void
16c381f0 8754restore_infcall_control_state (struct infcall_control_state *inf_status)
c906108c 8755{
4e1c45ea 8756 struct thread_info *tp = inferior_thread ();
d6b48e9c 8757 struct inferior *inf = current_inferior ();
4e1c45ea 8758
8358c15c
JK
8759 if (tp->control.step_resume_breakpoint)
8760 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
8761
5b79abe7
TT
8762 if (tp->control.exception_resume_breakpoint)
8763 tp->control.exception_resume_breakpoint->disposition
8764 = disp_del_at_next_stop;
8765
d82142e2 8766 /* Handle the bpstat_copy of the chain. */
16c381f0 8767 bpstat_clear (&tp->control.stop_bpstat);
d82142e2 8768
16c381f0
JK
8769 tp->control = inf_status->thread_control;
8770 inf->control = inf_status->inferior_control;
d82142e2
JK
8771
8772 /* Other fields: */
8773 stop_stack_dummy = inf_status->stop_stack_dummy;
8774 stopped_by_random_signal = inf_status->stopped_by_random_signal;
c906108c 8775
b89667eb 8776 if (target_has_stack)
c906108c 8777 {
bf469271 8778 /* The point of the try/catch is that if the stack is clobbered,
101dcfbe
AC
8779 walking the stack might encounter a garbage pointer and
8780 error() trying to dereference it. */
a70b8144 8781 try
bf469271
PA
8782 {
8783 restore_selected_frame (inf_status->selected_frame_id);
8784 }
230d2906 8785 catch (const gdb_exception_error &ex)
bf469271
PA
8786 {
8787 exception_fprintf (gdb_stderr, ex,
8788 "Unable to restore previously selected frame:\n");
8789 /* Error in restoring the selected frame. Select the
8790 innermost frame. */
8791 select_frame (get_current_frame ());
8792 }
c906108c 8793 }
c906108c 8794
ee841dd8 8795 delete inf_status;
7a292a7a 8796}
c906108c
SS
8797
8798void
16c381f0 8799discard_infcall_control_state (struct infcall_control_state *inf_status)
7a292a7a 8800{
8358c15c
JK
8801 if (inf_status->thread_control.step_resume_breakpoint)
8802 inf_status->thread_control.step_resume_breakpoint->disposition
8803 = disp_del_at_next_stop;
8804
5b79abe7
TT
8805 if (inf_status->thread_control.exception_resume_breakpoint)
8806 inf_status->thread_control.exception_resume_breakpoint->disposition
8807 = disp_del_at_next_stop;
8808
1777feb0 8809 /* See save_infcall_control_state for info on stop_bpstat. */
16c381f0 8810 bpstat_clear (&inf_status->thread_control.stop_bpstat);
8358c15c 8811
ee841dd8 8812 delete inf_status;
7a292a7a 8813}
b89667eb 8814\f
7f89fd65 8815/* See infrun.h. */
0c557179
SDJ
8816
8817void
8818clear_exit_convenience_vars (void)
8819{
8820 clear_internalvar (lookup_internalvar ("_exitsignal"));
8821 clear_internalvar (lookup_internalvar ("_exitcode"));
8822}
c5aa993b 8823\f
488f131b 8824
b2175913
MS
8825/* User interface for reverse debugging:
8826 Set exec-direction / show exec-direction commands
8827 (returns error unless target implements to_set_exec_direction method). */
8828
170742de 8829enum exec_direction_kind execution_direction = EXEC_FORWARD;
b2175913
MS
8830static const char exec_forward[] = "forward";
8831static const char exec_reverse[] = "reverse";
8832static const char *exec_direction = exec_forward;
40478521 8833static const char *const exec_direction_names[] = {
b2175913
MS
8834 exec_forward,
8835 exec_reverse,
8836 NULL
8837};
8838
8839static void
eb4c3f4a 8840set_exec_direction_func (const char *args, int from_tty,
b2175913
MS
8841 struct cmd_list_element *cmd)
8842{
8843 if (target_can_execute_reverse)
8844 {
8845 if (!strcmp (exec_direction, exec_forward))
8846 execution_direction = EXEC_FORWARD;
8847 else if (!strcmp (exec_direction, exec_reverse))
8848 execution_direction = EXEC_REVERSE;
8849 }
8bbed405
MS
8850 else
8851 {
8852 exec_direction = exec_forward;
8853 error (_("Target does not support this operation."));
8854 }
b2175913
MS
8855}
8856
8857static void
8858show_exec_direction_func (struct ui_file *out, int from_tty,
8859 struct cmd_list_element *cmd, const char *value)
8860{
8861 switch (execution_direction) {
8862 case EXEC_FORWARD:
8863 fprintf_filtered (out, _("Forward.\n"));
8864 break;
8865 case EXEC_REVERSE:
8866 fprintf_filtered (out, _("Reverse.\n"));
8867 break;
b2175913 8868 default:
d8b34453
PA
8869 internal_error (__FILE__, __LINE__,
8870 _("bogus execution_direction value: %d"),
8871 (int) execution_direction);
b2175913
MS
8872 }
8873}
8874
d4db2f36
PA
8875static void
8876show_schedule_multiple (struct ui_file *file, int from_tty,
8877 struct cmd_list_element *c, const char *value)
8878{
3e43a32a
MS
8879 fprintf_filtered (file, _("Resuming the execution of threads "
8880 "of all processes is %s.\n"), value);
d4db2f36 8881}
ad52ddc6 8882
22d2b532
SDJ
8883/* Implementation of `siginfo' variable. */
8884
8885static const struct internalvar_funcs siginfo_funcs =
8886{
8887 siginfo_make_value,
8888 NULL,
8889 NULL
8890};
8891
372316f1
PA
8892/* Callback for infrun's target events source. This is marked when a
8893 thread has a pending status to process. */
8894
8895static void
8896infrun_async_inferior_event_handler (gdb_client_data data)
8897{
372316f1
PA
8898 inferior_event_handler (INF_REG_EVENT, NULL);
8899}
8900
c906108c 8901void
96baa820 8902_initialize_infrun (void)
c906108c 8903{
de0bea00 8904 struct cmd_list_element *c;
c906108c 8905
372316f1
PA
8906 /* Register extra event sources in the event loop. */
8907 infrun_async_inferior_event_token
8908 = create_async_event_handler (infrun_async_inferior_event_handler, NULL);
8909
11db9430 8910 add_info ("signals", info_signals_command, _("\
1bedd215
AC
8911What debugger does when program gets various signals.\n\
8912Specify a signal as argument to print info on that signal only."));
c906108c
SS
8913 add_info_alias ("handle", "signals", 0);
8914
de0bea00 8915 c = add_com ("handle", class_run, handle_command, _("\
dfbd5e7b 8916Specify how to handle signals.\n\
486c7739 8917Usage: handle SIGNAL [ACTIONS]\n\
c906108c 8918Args are signals and actions to apply to those signals.\n\
dfbd5e7b 8919If no actions are specified, the current settings for the specified signals\n\
486c7739
MF
8920will be displayed instead.\n\
8921\n\
c906108c
SS
8922Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
8923from 1-15 are allowed for compatibility with old versions of GDB.\n\
8924Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
8925The special arg \"all\" is recognized to mean all signals except those\n\
1bedd215 8926used by the debugger, typically SIGTRAP and SIGINT.\n\
486c7739 8927\n\
1bedd215 8928Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
c906108c
SS
8929\"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
8930Stop means reenter debugger if this signal happens (implies print).\n\
8931Print means print a message if this signal happens.\n\
8932Pass means let program see this signal; otherwise program doesn't know.\n\
8933Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
dfbd5e7b
PA
8934Pass and Stop may be combined.\n\
8935\n\
8936Multiple signals may be specified. Signal numbers and signal names\n\
8937may be interspersed with actions, with the actions being performed for\n\
8938all signals cumulatively specified."));
de0bea00 8939 set_cmd_completer (c, handle_completer);
486c7739 8940
c906108c 8941 if (!dbx_commands)
1a966eab
AC
8942 stop_command = add_cmd ("stop", class_obscure,
8943 not_just_help_class_command, _("\
8944There is no `stop' command, but you can set a hook on `stop'.\n\
c906108c 8945This allows you to set a list of commands to be run each time execution\n\
1a966eab 8946of the program stops."), &cmdlist);
c906108c 8947
ccce17b0 8948 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
85c07804
AC
8949Set inferior debugging."), _("\
8950Show inferior debugging."), _("\
8951When non-zero, inferior specific debugging is enabled."),
ccce17b0
YQ
8952 NULL,
8953 show_debug_infrun,
8954 &setdebuglist, &showdebuglist);
527159b7 8955
3e43a32a
MS
8956 add_setshow_boolean_cmd ("displaced", class_maintenance,
8957 &debug_displaced, _("\
237fc4c9
PA
8958Set displaced stepping debugging."), _("\
8959Show displaced stepping debugging."), _("\
8960When non-zero, displaced stepping specific debugging is enabled."),
8961 NULL,
8962 show_debug_displaced,
8963 &setdebuglist, &showdebuglist);
8964
ad52ddc6
PA
8965 add_setshow_boolean_cmd ("non-stop", no_class,
8966 &non_stop_1, _("\
8967Set whether gdb controls the inferior in non-stop mode."), _("\
8968Show whether gdb controls the inferior in non-stop mode."), _("\
8969When debugging a multi-threaded program and this setting is\n\
8970off (the default, also called all-stop mode), when one thread stops\n\
8971(for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
8972all other threads in the program while you interact with the thread of\n\
8973interest. When you continue or step a thread, you can allow the other\n\
8974threads to run, or have them remain stopped, but while you inspect any\n\
8975thread's state, all threads stop.\n\
8976\n\
8977In non-stop mode, when one thread stops, other threads can continue\n\
8978to run freely. You'll be able to step each thread independently,\n\
8979leave it stopped or free to run as needed."),
8980 set_non_stop,
8981 show_non_stop,
8982 &setlist,
8983 &showlist);
8984
adc6a863 8985 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
c906108c
SS
8986 {
8987 signal_stop[i] = 1;
8988 signal_print[i] = 1;
8989 signal_program[i] = 1;
ab04a2af 8990 signal_catch[i] = 0;
c906108c
SS
8991 }
8992
4d9d9d04
PA
8993 /* Signals caused by debugger's own actions should not be given to
8994 the program afterwards.
8995
8996 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
8997 explicitly specifies that it should be delivered to the target
8998 program. Typically, that would occur when a user is debugging a
8999 target monitor on a simulator: the target monitor sets a
9000 breakpoint; the simulator encounters this breakpoint and halts
9001 the simulation handing control to GDB; GDB, noting that the stop
9002 address doesn't map to any known breakpoint, returns control back
9003 to the simulator; the simulator then delivers the hardware
9004 equivalent of a GDB_SIGNAL_TRAP to the program being
9005 debugged. */
a493e3e2
PA
9006 signal_program[GDB_SIGNAL_TRAP] = 0;
9007 signal_program[GDB_SIGNAL_INT] = 0;
c906108c
SS
9008
9009 /* Signals that are not errors should not normally enter the debugger. */
a493e3e2
PA
9010 signal_stop[GDB_SIGNAL_ALRM] = 0;
9011 signal_print[GDB_SIGNAL_ALRM] = 0;
9012 signal_stop[GDB_SIGNAL_VTALRM] = 0;
9013 signal_print[GDB_SIGNAL_VTALRM] = 0;
9014 signal_stop[GDB_SIGNAL_PROF] = 0;
9015 signal_print[GDB_SIGNAL_PROF] = 0;
9016 signal_stop[GDB_SIGNAL_CHLD] = 0;
9017 signal_print[GDB_SIGNAL_CHLD] = 0;
9018 signal_stop[GDB_SIGNAL_IO] = 0;
9019 signal_print[GDB_SIGNAL_IO] = 0;
9020 signal_stop[GDB_SIGNAL_POLL] = 0;
9021 signal_print[GDB_SIGNAL_POLL] = 0;
9022 signal_stop[GDB_SIGNAL_URG] = 0;
9023 signal_print[GDB_SIGNAL_URG] = 0;
9024 signal_stop[GDB_SIGNAL_WINCH] = 0;
9025 signal_print[GDB_SIGNAL_WINCH] = 0;
9026 signal_stop[GDB_SIGNAL_PRIO] = 0;
9027 signal_print[GDB_SIGNAL_PRIO] = 0;
c906108c 9028
cd0fc7c3
SS
9029 /* These signals are used internally by user-level thread
9030 implementations. (See signal(5) on Solaris.) Like the above
9031 signals, a healthy program receives and handles them as part of
9032 its normal operation. */
a493e3e2
PA
9033 signal_stop[GDB_SIGNAL_LWP] = 0;
9034 signal_print[GDB_SIGNAL_LWP] = 0;
9035 signal_stop[GDB_SIGNAL_WAITING] = 0;
9036 signal_print[GDB_SIGNAL_WAITING] = 0;
9037 signal_stop[GDB_SIGNAL_CANCEL] = 0;
9038 signal_print[GDB_SIGNAL_CANCEL] = 0;
bc7b765a
JB
9039 signal_stop[GDB_SIGNAL_LIBRT] = 0;
9040 signal_print[GDB_SIGNAL_LIBRT] = 0;
cd0fc7c3 9041
2455069d
UW
9042 /* Update cached state. */
9043 signal_cache_update (-1);
9044
85c07804
AC
9045 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
9046 &stop_on_solib_events, _("\
9047Set stopping for shared library events."), _("\
9048Show stopping for shared library events."), _("\
c906108c
SS
9049If nonzero, gdb will give control to the user when the dynamic linker\n\
9050notifies gdb of shared library events. The most common event of interest\n\
85c07804 9051to the user would be loading/unloading of a new library."),
f9e14852 9052 set_stop_on_solib_events,
920d2a44 9053 show_stop_on_solib_events,
85c07804 9054 &setlist, &showlist);
c906108c 9055
7ab04401
AC
9056 add_setshow_enum_cmd ("follow-fork-mode", class_run,
9057 follow_fork_mode_kind_names,
9058 &follow_fork_mode_string, _("\
9059Set debugger response to a program call of fork or vfork."), _("\
9060Show debugger response to a program call of fork or vfork."), _("\
c906108c
SS
9061A fork or vfork creates a new process. follow-fork-mode can be:\n\
9062 parent - the original process is debugged after a fork\n\
9063 child - the new process is debugged after a fork\n\
ea1dd7bc 9064The unfollowed process will continue to run.\n\
7ab04401
AC
9065By default, the debugger will follow the parent process."),
9066 NULL,
920d2a44 9067 show_follow_fork_mode_string,
7ab04401
AC
9068 &setlist, &showlist);
9069
6c95b8df
PA
9070 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9071 follow_exec_mode_names,
9072 &follow_exec_mode_string, _("\
9073Set debugger response to a program call of exec."), _("\
9074Show debugger response to a program call of exec."), _("\
9075An exec call replaces the program image of a process.\n\
9076\n\
9077follow-exec-mode can be:\n\
9078\n\
cce7e648 9079 new - the debugger creates a new inferior and rebinds the process\n\
6c95b8df
PA
9080to this new inferior. The program the process was running before\n\
9081the exec call can be restarted afterwards by restarting the original\n\
9082inferior.\n\
9083\n\
9084 same - the debugger keeps the process bound to the same inferior.\n\
9085The new executable image replaces the previous executable loaded in\n\
9086the inferior. Restarting the inferior after the exec call restarts\n\
9087the executable the process was running after the exec call.\n\
9088\n\
9089By default, the debugger will use the same inferior."),
9090 NULL,
9091 show_follow_exec_mode_string,
9092 &setlist, &showlist);
9093
7ab04401
AC
9094 add_setshow_enum_cmd ("scheduler-locking", class_run,
9095 scheduler_enums, &scheduler_mode, _("\
9096Set mode for locking scheduler during execution."), _("\
9097Show mode for locking scheduler during execution."), _("\
f2665db5
MM
9098off == no locking (threads may preempt at any time)\n\
9099on == full locking (no thread except the current thread may run)\n\
9100 This applies to both normal execution and replay mode.\n\
9101step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
9102 In this mode, other threads may run during other commands.\n\
9103 This applies to both normal execution and replay mode.\n\
9104replay == scheduler locked in replay mode and unlocked during normal execution."),
7ab04401 9105 set_schedlock_func, /* traps on target vector */
920d2a44 9106 show_scheduler_mode,
7ab04401 9107 &setlist, &showlist);
5fbbeb29 9108
d4db2f36
PA
9109 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9110Set mode for resuming threads of all processes."), _("\
9111Show mode for resuming threads of all processes."), _("\
9112When on, execution commands (such as 'continue' or 'next') resume all\n\
9113threads of all processes. When off (which is the default), execution\n\
9114commands only resume the threads of the current process. The set of\n\
9115threads that are resumed is further refined by the scheduler-locking\n\
9116mode (see help set scheduler-locking)."),
9117 NULL,
9118 show_schedule_multiple,
9119 &setlist, &showlist);
9120
5bf193a2
AC
9121 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9122Set mode of the step operation."), _("\
9123Show mode of the step operation."), _("\
9124When set, doing a step over a function without debug line information\n\
9125will stop at the first instruction of that function. Otherwise, the\n\
9126function is skipped and the step command stops at a different source line."),
9127 NULL,
920d2a44 9128 show_step_stop_if_no_debug,
5bf193a2 9129 &setlist, &showlist);
ca6724c1 9130
72d0e2c5
YQ
9131 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9132 &can_use_displaced_stepping, _("\
237fc4c9
PA
9133Set debugger's willingness to use displaced stepping."), _("\
9134Show debugger's willingness to use displaced stepping."), _("\
fff08868
HZ
9135If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9136supported by the target architecture. If off, gdb will not use displaced\n\
9137stepping to step over breakpoints, even if such is supported by the target\n\
9138architecture. If auto (which is the default), gdb will use displaced stepping\n\
9139if the target architecture supports it and non-stop mode is active, but will not\n\
9140use it in all-stop mode (see help set non-stop)."),
72d0e2c5
YQ
9141 NULL,
9142 show_can_use_displaced_stepping,
9143 &setlist, &showlist);
237fc4c9 9144
b2175913
MS
9145 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9146 &exec_direction, _("Set direction of execution.\n\
9147Options are 'forward' or 'reverse'."),
9148 _("Show direction of execution (forward/reverse)."),
9149 _("Tells gdb whether to execute forward or backward."),
9150 set_exec_direction_func, show_exec_direction_func,
9151 &setlist, &showlist);
9152
6c95b8df
PA
9153 /* Set/show detach-on-fork: user-settable mode. */
9154
9155 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9156Set whether gdb will detach the child of a fork."), _("\
9157Show whether gdb will detach the child of a fork."), _("\
9158Tells gdb whether to detach the child of a fork."),
9159 NULL, NULL, &setlist, &showlist);
9160
03583c20
UW
9161 /* Set/show disable address space randomization mode. */
9162
9163 add_setshow_boolean_cmd ("disable-randomization", class_support,
9164 &disable_randomization, _("\
9165Set disabling of debuggee's virtual address space randomization."), _("\
9166Show disabling of debuggee's virtual address space randomization."), _("\
9167When this mode is on (which is the default), randomization of the virtual\n\
9168address space is disabled. Standalone programs run with the randomization\n\
9169enabled by default on some platforms."),
9170 &set_disable_randomization,
9171 &show_disable_randomization,
9172 &setlist, &showlist);
9173
ca6724c1 9174 /* ptid initializations */
ca6724c1
KB
9175 inferior_ptid = null_ptid;
9176 target_last_wait_ptid = minus_one_ptid;
5231c1fd 9177
76727919
TT
9178 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed);
9179 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested);
9180 gdb::observers::thread_exit.attach (infrun_thread_thread_exit);
9181 gdb::observers::inferior_exit.attach (infrun_inferior_exit);
4aa995e1
PA
9182
9183 /* Explicitly create without lookup, since that tries to create a
9184 value with a void typed value, and when we get here, gdbarch
9185 isn't initialized yet. At this point, we're quite sure there
9186 isn't another convenience variable of the same name. */
22d2b532 9187 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
d914c394
SS
9188
9189 add_setshow_boolean_cmd ("observer", no_class,
9190 &observer_mode_1, _("\
9191Set whether gdb controls the inferior in observer mode."), _("\
9192Show whether gdb controls the inferior in observer mode."), _("\
9193In observer mode, GDB can get data from the inferior, but not\n\
9194affect its execution. Registers and memory may not be changed,\n\
9195breakpoints may not be set, and the program cannot be interrupted\n\
9196or signalled."),
9197 set_observer_mode,
9198 show_observer_mode,
9199 &setlist,
9200 &showlist);
c906108c 9201}