]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/infrun.c
gdb: fix typo "breapoint" -> "breakpoint"
[thirdparty/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2020 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "breakpoint.h"
28 #include "gdbcore.h"
29 #include "gdbcmd.h"
30 #include "target.h"
31 #include "target-connection.h"
32 #include "gdbthread.h"
33 #include "annotate.h"
34 #include "symfile.h"
35 #include "top.h"
36 #include "inf-loop.h"
37 #include "regcache.h"
38 #include "value.h"
39 #include "observable.h"
40 #include "language.h"
41 #include "solib.h"
42 #include "main.h"
43 #include "block.h"
44 #include "mi/mi-common.h"
45 #include "event-top.h"
46 #include "record.h"
47 #include "record-full.h"
48 #include "inline-frame.h"
49 #include "jit.h"
50 #include "tracepoint.h"
51 #include "skip.h"
52 #include "probe.h"
53 #include "objfiles.h"
54 #include "completer.h"
55 #include "target-descriptions.h"
56 #include "target-dcache.h"
57 #include "terminal.h"
58 #include "solist.h"
59 #include "gdbsupport/event-loop.h"
60 #include "thread-fsm.h"
61 #include "gdbsupport/enum-flags.h"
62 #include "progspace-and-thread.h"
63 #include "gdbsupport/gdb_optional.h"
64 #include "arch-utils.h"
65 #include "gdbsupport/scope-exit.h"
66 #include "gdbsupport/forward-scope-exit.h"
67 #include "gdbsupport/gdb_select.h"
68 #include <unordered_map>
69 #include "async-event.h"
70 #include "gdbsupport/selftest.h"
71 #include "scoped-mock-context.h"
72 #include "test-target.h"
73
74 /* Prototypes for local functions */
75
76 static void sig_print_info (enum gdb_signal);
77
78 static void sig_print_header (void);
79
80 static void follow_inferior_reset_breakpoints (void);
81
82 static int currently_stepping (struct thread_info *tp);
83
84 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
85
86 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
87
88 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
89
90 static int maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
91
92 static void resume (gdb_signal sig);
93
94 static void wait_for_inferior (inferior *inf);
95
96 /* Asynchronous signal handler registered as event loop source for
97 when we have pending events ready to be passed to the core. */
98 static struct async_event_handler *infrun_async_inferior_event_token;
99
100 /* Stores whether infrun_async was previously enabled or disabled.
101 Starts off as -1, indicating "never enabled/disabled". */
102 static int infrun_is_async = -1;
103
104 /* See infrun.h. */
105
106 void
107 infrun_async (int enable)
108 {
109 if (infrun_is_async != enable)
110 {
111 infrun_is_async = enable;
112
113 if (debug_infrun)
114 fprintf_unfiltered (gdb_stdlog,
115 "infrun: infrun_async(%d)\n",
116 enable);
117
118 if (enable)
119 mark_async_event_handler (infrun_async_inferior_event_token);
120 else
121 clear_async_event_handler (infrun_async_inferior_event_token);
122 }
123 }
124
125 /* See infrun.h. */
126
127 void
128 mark_infrun_async_event_handler (void)
129 {
130 mark_async_event_handler (infrun_async_inferior_event_token);
131 }
132
133 /* When set, stop the 'step' command if we enter a function which has
134 no line number information. The normal behavior is that we step
135 over such function. */
136 bool step_stop_if_no_debug = false;
137 static void
138 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
139 struct cmd_list_element *c, const char *value)
140 {
141 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
142 }
143
144 /* proceed and normal_stop use this to notify the user when the
145 inferior stopped in a different thread than it had been running
146 in. */
147
148 static ptid_t previous_inferior_ptid;
149
150 /* If set (default for legacy reasons), when following a fork, GDB
151 will detach from one of the fork branches, child or parent.
152 Exactly which branch is detached depends on 'set follow-fork-mode'
153 setting. */
154
155 static bool detach_fork = true;
156
157 bool debug_displaced = false;
158 static void
159 show_debug_displaced (struct ui_file *file, int from_tty,
160 struct cmd_list_element *c, const char *value)
161 {
162 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
163 }
164
165 unsigned int debug_infrun = 0;
166 static void
167 show_debug_infrun (struct ui_file *file, int from_tty,
168 struct cmd_list_element *c, const char *value)
169 {
170 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
171 }
172
173
174 /* Support for disabling address space randomization. */
175
176 bool disable_randomization = true;
177
178 static void
179 show_disable_randomization (struct ui_file *file, int from_tty,
180 struct cmd_list_element *c, const char *value)
181 {
182 if (target_supports_disable_randomization ())
183 fprintf_filtered (file,
184 _("Disabling randomization of debuggee's "
185 "virtual address space is %s.\n"),
186 value);
187 else
188 fputs_filtered (_("Disabling randomization of debuggee's "
189 "virtual address space is unsupported on\n"
190 "this platform.\n"), file);
191 }
192
193 static void
194 set_disable_randomization (const char *args, int from_tty,
195 struct cmd_list_element *c)
196 {
197 if (!target_supports_disable_randomization ())
198 error (_("Disabling randomization of debuggee's "
199 "virtual address space is unsupported on\n"
200 "this platform."));
201 }
202
203 /* User interface for non-stop mode. */
204
205 bool non_stop = false;
206 static bool non_stop_1 = false;
207
208 static void
209 set_non_stop (const char *args, int from_tty,
210 struct cmd_list_element *c)
211 {
212 if (target_has_execution)
213 {
214 non_stop_1 = non_stop;
215 error (_("Cannot change this setting while the inferior is running."));
216 }
217
218 non_stop = non_stop_1;
219 }
220
221 static void
222 show_non_stop (struct ui_file *file, int from_tty,
223 struct cmd_list_element *c, const char *value)
224 {
225 fprintf_filtered (file,
226 _("Controlling the inferior in non-stop mode is %s.\n"),
227 value);
228 }
229
230 /* "Observer mode" is somewhat like a more extreme version of
231 non-stop, in which all GDB operations that might affect the
232 target's execution have been disabled. */
233
234 bool observer_mode = false;
235 static bool observer_mode_1 = false;
236
237 static void
238 set_observer_mode (const char *args, int from_tty,
239 struct cmd_list_element *c)
240 {
241 if (target_has_execution)
242 {
243 observer_mode_1 = observer_mode;
244 error (_("Cannot change this setting while the inferior is running."));
245 }
246
247 observer_mode = observer_mode_1;
248
249 may_write_registers = !observer_mode;
250 may_write_memory = !observer_mode;
251 may_insert_breakpoints = !observer_mode;
252 may_insert_tracepoints = !observer_mode;
253 /* We can insert fast tracepoints in or out of observer mode,
254 but enable them if we're going into this mode. */
255 if (observer_mode)
256 may_insert_fast_tracepoints = true;
257 may_stop = !observer_mode;
258 update_target_permissions ();
259
260 /* Going *into* observer mode we must force non-stop, then
261 going out we leave it that way. */
262 if (observer_mode)
263 {
264 pagination_enabled = 0;
265 non_stop = non_stop_1 = true;
266 }
267
268 if (from_tty)
269 printf_filtered (_("Observer mode is now %s.\n"),
270 (observer_mode ? "on" : "off"));
271 }
272
273 static void
274 show_observer_mode (struct ui_file *file, int from_tty,
275 struct cmd_list_element *c, const char *value)
276 {
277 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
278 }
279
280 /* This updates the value of observer mode based on changes in
281 permissions. Note that we are deliberately ignoring the values of
282 may-write-registers and may-write-memory, since the user may have
283 reason to enable these during a session, for instance to turn on a
284 debugging-related global. */
285
286 void
287 update_observer_mode (void)
288 {
289 bool newval = (!may_insert_breakpoints
290 && !may_insert_tracepoints
291 && may_insert_fast_tracepoints
292 && !may_stop
293 && non_stop);
294
295 /* Let the user know if things change. */
296 if (newval != observer_mode)
297 printf_filtered (_("Observer mode is now %s.\n"),
298 (newval ? "on" : "off"));
299
300 observer_mode = observer_mode_1 = newval;
301 }
302
303 /* Tables of how to react to signals; the user sets them. */
304
305 static unsigned char signal_stop[GDB_SIGNAL_LAST];
306 static unsigned char signal_print[GDB_SIGNAL_LAST];
307 static unsigned char signal_program[GDB_SIGNAL_LAST];
308
309 /* Table of signals that are registered with "catch signal". A
310 non-zero entry indicates that the signal is caught by some "catch
311 signal" command. */
312 static unsigned char signal_catch[GDB_SIGNAL_LAST];
313
314 /* Table of signals that the target may silently handle.
315 This is automatically determined from the flags above,
316 and simply cached here. */
317 static unsigned char signal_pass[GDB_SIGNAL_LAST];
318
319 #define SET_SIGS(nsigs,sigs,flags) \
320 do { \
321 int signum = (nsigs); \
322 while (signum-- > 0) \
323 if ((sigs)[signum]) \
324 (flags)[signum] = 1; \
325 } while (0)
326
327 #define UNSET_SIGS(nsigs,sigs,flags) \
328 do { \
329 int signum = (nsigs); \
330 while (signum-- > 0) \
331 if ((sigs)[signum]) \
332 (flags)[signum] = 0; \
333 } while (0)
334
335 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
336 this function is to avoid exporting `signal_program'. */
337
338 void
339 update_signals_program_target (void)
340 {
341 target_program_signals (signal_program);
342 }
343
344 /* Value to pass to target_resume() to cause all threads to resume. */
345
346 #define RESUME_ALL minus_one_ptid
347
348 /* Command list pointer for the "stop" placeholder. */
349
350 static struct cmd_list_element *stop_command;
351
352 /* Nonzero if we want to give control to the user when we're notified
353 of shared library events by the dynamic linker. */
354 int stop_on_solib_events;
355
356 /* Enable or disable optional shared library event breakpoints
357 as appropriate when the above flag is changed. */
358
359 static void
360 set_stop_on_solib_events (const char *args,
361 int from_tty, struct cmd_list_element *c)
362 {
363 update_solib_breakpoints ();
364 }
365
366 static void
367 show_stop_on_solib_events (struct ui_file *file, int from_tty,
368 struct cmd_list_element *c, const char *value)
369 {
370 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
371 value);
372 }
373
374 /* Nonzero after stop if current stack frame should be printed. */
375
376 static int stop_print_frame;
377
378 /* This is a cached copy of the target/ptid/waitstatus of the last
379 event returned by target_wait()/deprecated_target_wait_hook().
380 This information is returned by get_last_target_status(). */
381 static process_stratum_target *target_last_proc_target;
382 static ptid_t target_last_wait_ptid;
383 static struct target_waitstatus target_last_waitstatus;
384
385 void init_thread_stepping_state (struct thread_info *tss);
386
387 static const char follow_fork_mode_child[] = "child";
388 static const char follow_fork_mode_parent[] = "parent";
389
390 static const char *const follow_fork_mode_kind_names[] = {
391 follow_fork_mode_child,
392 follow_fork_mode_parent,
393 NULL
394 };
395
396 static const char *follow_fork_mode_string = follow_fork_mode_parent;
397 static void
398 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
399 struct cmd_list_element *c, const char *value)
400 {
401 fprintf_filtered (file,
402 _("Debugger response to a program "
403 "call of fork or vfork is \"%s\".\n"),
404 value);
405 }
406 \f
407
408 /* Handle changes to the inferior list based on the type of fork,
409 which process is being followed, and whether the other process
410 should be detached. On entry inferior_ptid must be the ptid of
411 the fork parent. At return inferior_ptid is the ptid of the
412 followed inferior. */
413
414 static bool
415 follow_fork_inferior (bool follow_child, bool detach_fork)
416 {
417 int has_vforked;
418 ptid_t parent_ptid, child_ptid;
419
420 has_vforked = (inferior_thread ()->pending_follow.kind
421 == TARGET_WAITKIND_VFORKED);
422 parent_ptid = inferior_ptid;
423 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
424
425 if (has_vforked
426 && !non_stop /* Non-stop always resumes both branches. */
427 && current_ui->prompt_state == PROMPT_BLOCKED
428 && !(follow_child || detach_fork || sched_multi))
429 {
430 /* The parent stays blocked inside the vfork syscall until the
431 child execs or exits. If we don't let the child run, then
432 the parent stays blocked. If we're telling the parent to run
433 in the foreground, the user will not be able to ctrl-c to get
434 back the terminal, effectively hanging the debug session. */
435 fprintf_filtered (gdb_stderr, _("\
436 Can not resume the parent process over vfork in the foreground while\n\
437 holding the child stopped. Try \"set detach-on-fork\" or \
438 \"set schedule-multiple\".\n"));
439 return 1;
440 }
441
442 if (!follow_child)
443 {
444 /* Detach new forked process? */
445 if (detach_fork)
446 {
447 /* Before detaching from the child, remove all breakpoints
448 from it. If we forked, then this has already been taken
449 care of by infrun.c. If we vforked however, any
450 breakpoint inserted in the parent is visible in the
451 child, even those added while stopped in a vfork
452 catchpoint. This will remove the breakpoints from the
453 parent also, but they'll be reinserted below. */
454 if (has_vforked)
455 {
456 /* Keep breakpoints list in sync. */
457 remove_breakpoints_inf (current_inferior ());
458 }
459
460 if (print_inferior_events)
461 {
462 /* Ensure that we have a process ptid. */
463 ptid_t process_ptid = ptid_t (child_ptid.pid ());
464
465 target_terminal::ours_for_output ();
466 fprintf_filtered (gdb_stdlog,
467 _("[Detaching after %s from child %s]\n"),
468 has_vforked ? "vfork" : "fork",
469 target_pid_to_str (process_ptid).c_str ());
470 }
471 }
472 else
473 {
474 struct inferior *parent_inf, *child_inf;
475
476 /* Add process to GDB's tables. */
477 child_inf = add_inferior (child_ptid.pid ());
478
479 parent_inf = current_inferior ();
480 child_inf->attach_flag = parent_inf->attach_flag;
481 copy_terminal_info (child_inf, parent_inf);
482 child_inf->gdbarch = parent_inf->gdbarch;
483 copy_inferior_target_desc_info (child_inf, parent_inf);
484
485 scoped_restore_current_pspace_and_thread restore_pspace_thread;
486
487 set_current_inferior (child_inf);
488 switch_to_no_thread ();
489 child_inf->symfile_flags = SYMFILE_NO_READ;
490 push_target (parent_inf->process_target ());
491 thread_info *child_thr
492 = add_thread_silent (child_inf->process_target (), child_ptid);
493
494 /* If this is a vfork child, then the address-space is
495 shared with the parent. */
496 if (has_vforked)
497 {
498 child_inf->pspace = parent_inf->pspace;
499 child_inf->aspace = parent_inf->aspace;
500
501 exec_on_vfork ();
502
503 /* The parent will be frozen until the child is done
504 with the shared region. Keep track of the
505 parent. */
506 child_inf->vfork_parent = parent_inf;
507 child_inf->pending_detach = 0;
508 parent_inf->vfork_child = child_inf;
509 parent_inf->pending_detach = 0;
510
511 /* Now that the inferiors and program spaces are all
512 wired up, we can switch to the child thread (which
513 switches inferior and program space too). */
514 switch_to_thread (child_thr);
515 }
516 else
517 {
518 child_inf->aspace = new_address_space ();
519 child_inf->pspace = new program_space (child_inf->aspace);
520 child_inf->removable = 1;
521 set_current_program_space (child_inf->pspace);
522 clone_program_space (child_inf->pspace, parent_inf->pspace);
523
524 /* solib_create_inferior_hook relies on the current
525 thread. */
526 switch_to_thread (child_thr);
527
528 /* Let the shared library layer (e.g., solib-svr4) learn
529 about this new process, relocate the cloned exec, pull
530 in shared libraries, and install the solib event
531 breakpoint. If a "cloned-VM" event was propagated
532 better throughout the core, this wouldn't be
533 required. */
534 solib_create_inferior_hook (0);
535 }
536 }
537
538 if (has_vforked)
539 {
540 struct inferior *parent_inf;
541
542 parent_inf = current_inferior ();
543
544 /* If we detached from the child, then we have to be careful
545 to not insert breakpoints in the parent until the child
546 is done with the shared memory region. However, if we're
547 staying attached to the child, then we can and should
548 insert breakpoints, so that we can debug it. A
549 subsequent child exec or exit is enough to know when does
550 the child stops using the parent's address space. */
551 parent_inf->waiting_for_vfork_done = detach_fork;
552 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
553 }
554 }
555 else
556 {
557 /* Follow the child. */
558 struct inferior *parent_inf, *child_inf;
559 struct program_space *parent_pspace;
560
561 if (print_inferior_events)
562 {
563 std::string parent_pid = target_pid_to_str (parent_ptid);
564 std::string child_pid = target_pid_to_str (child_ptid);
565
566 target_terminal::ours_for_output ();
567 fprintf_filtered (gdb_stdlog,
568 _("[Attaching after %s %s to child %s]\n"),
569 parent_pid.c_str (),
570 has_vforked ? "vfork" : "fork",
571 child_pid.c_str ());
572 }
573
574 /* Add the new inferior first, so that the target_detach below
575 doesn't unpush the target. */
576
577 child_inf = add_inferior (child_ptid.pid ());
578
579 parent_inf = current_inferior ();
580 child_inf->attach_flag = parent_inf->attach_flag;
581 copy_terminal_info (child_inf, parent_inf);
582 child_inf->gdbarch = parent_inf->gdbarch;
583 copy_inferior_target_desc_info (child_inf, parent_inf);
584
585 parent_pspace = parent_inf->pspace;
586
587 process_stratum_target *target = parent_inf->process_target ();
588
589 {
590 /* Hold a strong reference to the target while (maybe)
591 detaching the parent. Otherwise detaching could close the
592 target. */
593 auto target_ref = target_ops_ref::new_reference (target);
594
595 /* If we're vforking, we want to hold on to the parent until
596 the child exits or execs. At child exec or exit time we
597 can remove the old breakpoints from the parent and detach
598 or resume debugging it. Otherwise, detach the parent now;
599 we'll want to reuse it's program/address spaces, but we
600 can't set them to the child before removing breakpoints
601 from the parent, otherwise, the breakpoints module could
602 decide to remove breakpoints from the wrong process (since
603 they'd be assigned to the same address space). */
604
605 if (has_vforked)
606 {
607 gdb_assert (child_inf->vfork_parent == NULL);
608 gdb_assert (parent_inf->vfork_child == NULL);
609 child_inf->vfork_parent = parent_inf;
610 child_inf->pending_detach = 0;
611 parent_inf->vfork_child = child_inf;
612 parent_inf->pending_detach = detach_fork;
613 parent_inf->waiting_for_vfork_done = 0;
614 }
615 else if (detach_fork)
616 {
617 if (print_inferior_events)
618 {
619 /* Ensure that we have a process ptid. */
620 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
621
622 target_terminal::ours_for_output ();
623 fprintf_filtered (gdb_stdlog,
624 _("[Detaching after fork from "
625 "parent %s]\n"),
626 target_pid_to_str (process_ptid).c_str ());
627 }
628
629 target_detach (parent_inf, 0);
630 parent_inf = NULL;
631 }
632
633 /* Note that the detach above makes PARENT_INF dangling. */
634
635 /* Add the child thread to the appropriate lists, and switch
636 to this new thread, before cloning the program space, and
637 informing the solib layer about this new process. */
638
639 set_current_inferior (child_inf);
640 push_target (target);
641 }
642
643 thread_info *child_thr = add_thread_silent (target, child_ptid);
644
645 /* If this is a vfork child, then the address-space is shared
646 with the parent. If we detached from the parent, then we can
647 reuse the parent's program/address spaces. */
648 if (has_vforked || detach_fork)
649 {
650 child_inf->pspace = parent_pspace;
651 child_inf->aspace = child_inf->pspace->aspace;
652
653 exec_on_vfork ();
654 }
655 else
656 {
657 child_inf->aspace = new_address_space ();
658 child_inf->pspace = new program_space (child_inf->aspace);
659 child_inf->removable = 1;
660 child_inf->symfile_flags = SYMFILE_NO_READ;
661 set_current_program_space (child_inf->pspace);
662 clone_program_space (child_inf->pspace, parent_pspace);
663
664 /* Let the shared library layer (e.g., solib-svr4) learn
665 about this new process, relocate the cloned exec, pull in
666 shared libraries, and install the solib event breakpoint.
667 If a "cloned-VM" event was propagated better throughout
668 the core, this wouldn't be required. */
669 solib_create_inferior_hook (0);
670 }
671
672 switch_to_thread (child_thr);
673 }
674
675 return target_follow_fork (follow_child, detach_fork);
676 }
677
678 /* Tell the target to follow the fork we're stopped at. Returns true
679 if the inferior should be resumed; false, if the target for some
680 reason decided it's best not to resume. */
681
682 static bool
683 follow_fork ()
684 {
685 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
686 bool should_resume = true;
687 struct thread_info *tp;
688
689 /* Copy user stepping state to the new inferior thread. FIXME: the
690 followed fork child thread should have a copy of most of the
691 parent thread structure's run control related fields, not just these.
692 Initialized to avoid "may be used uninitialized" warnings from gcc. */
693 struct breakpoint *step_resume_breakpoint = NULL;
694 struct breakpoint *exception_resume_breakpoint = NULL;
695 CORE_ADDR step_range_start = 0;
696 CORE_ADDR step_range_end = 0;
697 int current_line = 0;
698 symtab *current_symtab = NULL;
699 struct frame_id step_frame_id = { 0 };
700 struct thread_fsm *thread_fsm = NULL;
701
702 if (!non_stop)
703 {
704 process_stratum_target *wait_target;
705 ptid_t wait_ptid;
706 struct target_waitstatus wait_status;
707
708 /* Get the last target status returned by target_wait(). */
709 get_last_target_status (&wait_target, &wait_ptid, &wait_status);
710
711 /* If not stopped at a fork event, then there's nothing else to
712 do. */
713 if (wait_status.kind != TARGET_WAITKIND_FORKED
714 && wait_status.kind != TARGET_WAITKIND_VFORKED)
715 return 1;
716
717 /* Check if we switched over from WAIT_PTID, since the event was
718 reported. */
719 if (wait_ptid != minus_one_ptid
720 && (current_inferior ()->process_target () != wait_target
721 || inferior_ptid != wait_ptid))
722 {
723 /* We did. Switch back to WAIT_PTID thread, to tell the
724 target to follow it (in either direction). We'll
725 afterwards refuse to resume, and inform the user what
726 happened. */
727 thread_info *wait_thread = find_thread_ptid (wait_target, wait_ptid);
728 switch_to_thread (wait_thread);
729 should_resume = false;
730 }
731 }
732
733 tp = inferior_thread ();
734
735 /* If there were any forks/vforks that were caught and are now to be
736 followed, then do so now. */
737 switch (tp->pending_follow.kind)
738 {
739 case TARGET_WAITKIND_FORKED:
740 case TARGET_WAITKIND_VFORKED:
741 {
742 ptid_t parent, child;
743
744 /* If the user did a next/step, etc, over a fork call,
745 preserve the stepping state in the fork child. */
746 if (follow_child && should_resume)
747 {
748 step_resume_breakpoint = clone_momentary_breakpoint
749 (tp->control.step_resume_breakpoint);
750 step_range_start = tp->control.step_range_start;
751 step_range_end = tp->control.step_range_end;
752 current_line = tp->current_line;
753 current_symtab = tp->current_symtab;
754 step_frame_id = tp->control.step_frame_id;
755 exception_resume_breakpoint
756 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
757 thread_fsm = tp->thread_fsm;
758
759 /* For now, delete the parent's sr breakpoint, otherwise,
760 parent/child sr breakpoints are considered duplicates,
761 and the child version will not be installed. Remove
762 this when the breakpoints module becomes aware of
763 inferiors and address spaces. */
764 delete_step_resume_breakpoint (tp);
765 tp->control.step_range_start = 0;
766 tp->control.step_range_end = 0;
767 tp->control.step_frame_id = null_frame_id;
768 delete_exception_resume_breakpoint (tp);
769 tp->thread_fsm = NULL;
770 }
771
772 parent = inferior_ptid;
773 child = tp->pending_follow.value.related_pid;
774
775 process_stratum_target *parent_targ = tp->inf->process_target ();
776 /* Set up inferior(s) as specified by the caller, and tell the
777 target to do whatever is necessary to follow either parent
778 or child. */
779 if (follow_fork_inferior (follow_child, detach_fork))
780 {
781 /* Target refused to follow, or there's some other reason
782 we shouldn't resume. */
783 should_resume = 0;
784 }
785 else
786 {
787 /* This pending follow fork event is now handled, one way
788 or another. The previous selected thread may be gone
789 from the lists by now, but if it is still around, need
790 to clear the pending follow request. */
791 tp = find_thread_ptid (parent_targ, parent);
792 if (tp)
793 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
794
795 /* This makes sure we don't try to apply the "Switched
796 over from WAIT_PID" logic above. */
797 nullify_last_target_wait_ptid ();
798
799 /* If we followed the child, switch to it... */
800 if (follow_child)
801 {
802 thread_info *child_thr = find_thread_ptid (parent_targ, child);
803 switch_to_thread (child_thr);
804
805 /* ... and preserve the stepping state, in case the
806 user was stepping over the fork call. */
807 if (should_resume)
808 {
809 tp = inferior_thread ();
810 tp->control.step_resume_breakpoint
811 = step_resume_breakpoint;
812 tp->control.step_range_start = step_range_start;
813 tp->control.step_range_end = step_range_end;
814 tp->current_line = current_line;
815 tp->current_symtab = current_symtab;
816 tp->control.step_frame_id = step_frame_id;
817 tp->control.exception_resume_breakpoint
818 = exception_resume_breakpoint;
819 tp->thread_fsm = thread_fsm;
820 }
821 else
822 {
823 /* If we get here, it was because we're trying to
824 resume from a fork catchpoint, but, the user
825 has switched threads away from the thread that
826 forked. In that case, the resume command
827 issued is most likely not applicable to the
828 child, so just warn, and refuse to resume. */
829 warning (_("Not resuming: switched threads "
830 "before following fork child."));
831 }
832
833 /* Reset breakpoints in the child as appropriate. */
834 follow_inferior_reset_breakpoints ();
835 }
836 }
837 }
838 break;
839 case TARGET_WAITKIND_SPURIOUS:
840 /* Nothing to follow. */
841 break;
842 default:
843 internal_error (__FILE__, __LINE__,
844 "Unexpected pending_follow.kind %d\n",
845 tp->pending_follow.kind);
846 break;
847 }
848
849 return should_resume;
850 }
851
852 static void
853 follow_inferior_reset_breakpoints (void)
854 {
855 struct thread_info *tp = inferior_thread ();
856
857 /* Was there a step_resume breakpoint? (There was if the user
858 did a "next" at the fork() call.) If so, explicitly reset its
859 thread number. Cloned step_resume breakpoints are disabled on
860 creation, so enable it here now that it is associated with the
861 correct thread.
862
863 step_resumes are a form of bp that are made to be per-thread.
864 Since we created the step_resume bp when the parent process
865 was being debugged, and now are switching to the child process,
866 from the breakpoint package's viewpoint, that's a switch of
867 "threads". We must update the bp's notion of which thread
868 it is for, or it'll be ignored when it triggers. */
869
870 if (tp->control.step_resume_breakpoint)
871 {
872 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
873 tp->control.step_resume_breakpoint->loc->enabled = 1;
874 }
875
876 /* Treat exception_resume breakpoints like step_resume breakpoints. */
877 if (tp->control.exception_resume_breakpoint)
878 {
879 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
880 tp->control.exception_resume_breakpoint->loc->enabled = 1;
881 }
882
883 /* Reinsert all breakpoints in the child. The user may have set
884 breakpoints after catching the fork, in which case those
885 were never set in the child, but only in the parent. This makes
886 sure the inserted breakpoints match the breakpoint list. */
887
888 breakpoint_re_set ();
889 insert_breakpoints ();
890 }
891
892 /* The child has exited or execed: resume threads of the parent the
893 user wanted to be executing. */
894
895 static int
896 proceed_after_vfork_done (struct thread_info *thread,
897 void *arg)
898 {
899 int pid = * (int *) arg;
900
901 if (thread->ptid.pid () == pid
902 && thread->state == THREAD_RUNNING
903 && !thread->executing
904 && !thread->stop_requested
905 && thread->suspend.stop_signal == GDB_SIGNAL_0)
906 {
907 if (debug_infrun)
908 fprintf_unfiltered (gdb_stdlog,
909 "infrun: resuming vfork parent thread %s\n",
910 target_pid_to_str (thread->ptid).c_str ());
911
912 switch_to_thread (thread);
913 clear_proceed_status (0);
914 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
915 }
916
917 return 0;
918 }
919
920 /* Called whenever we notice an exec or exit event, to handle
921 detaching or resuming a vfork parent. */
922
923 static void
924 handle_vfork_child_exec_or_exit (int exec)
925 {
926 struct inferior *inf = current_inferior ();
927
928 if (inf->vfork_parent)
929 {
930 int resume_parent = -1;
931
932 /* This exec or exit marks the end of the shared memory region
933 between the parent and the child. Break the bonds. */
934 inferior *vfork_parent = inf->vfork_parent;
935 inf->vfork_parent->vfork_child = NULL;
936 inf->vfork_parent = NULL;
937
938 /* If the user wanted to detach from the parent, now is the
939 time. */
940 if (vfork_parent->pending_detach)
941 {
942 struct program_space *pspace;
943 struct address_space *aspace;
944
945 /* follow-fork child, detach-on-fork on. */
946
947 vfork_parent->pending_detach = 0;
948
949 scoped_restore_current_pspace_and_thread restore_thread;
950
951 /* We're letting loose of the parent. */
952 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
953 switch_to_thread (tp);
954
955 /* We're about to detach from the parent, which implicitly
956 removes breakpoints from its address space. There's a
957 catch here: we want to reuse the spaces for the child,
958 but, parent/child are still sharing the pspace at this
959 point, although the exec in reality makes the kernel give
960 the child a fresh set of new pages. The problem here is
961 that the breakpoints module being unaware of this, would
962 likely chose the child process to write to the parent
963 address space. Swapping the child temporarily away from
964 the spaces has the desired effect. Yes, this is "sort
965 of" a hack. */
966
967 pspace = inf->pspace;
968 aspace = inf->aspace;
969 inf->aspace = NULL;
970 inf->pspace = NULL;
971
972 if (print_inferior_events)
973 {
974 std::string pidstr
975 = target_pid_to_str (ptid_t (vfork_parent->pid));
976
977 target_terminal::ours_for_output ();
978
979 if (exec)
980 {
981 fprintf_filtered (gdb_stdlog,
982 _("[Detaching vfork parent %s "
983 "after child exec]\n"), pidstr.c_str ());
984 }
985 else
986 {
987 fprintf_filtered (gdb_stdlog,
988 _("[Detaching vfork parent %s "
989 "after child exit]\n"), pidstr.c_str ());
990 }
991 }
992
993 target_detach (vfork_parent, 0);
994
995 /* Put it back. */
996 inf->pspace = pspace;
997 inf->aspace = aspace;
998 }
999 else if (exec)
1000 {
1001 /* We're staying attached to the parent, so, really give the
1002 child a new address space. */
1003 inf->pspace = new program_space (maybe_new_address_space ());
1004 inf->aspace = inf->pspace->aspace;
1005 inf->removable = 1;
1006 set_current_program_space (inf->pspace);
1007
1008 resume_parent = vfork_parent->pid;
1009 }
1010 else
1011 {
1012 /* If this is a vfork child exiting, then the pspace and
1013 aspaces were shared with the parent. Since we're
1014 reporting the process exit, we'll be mourning all that is
1015 found in the address space, and switching to null_ptid,
1016 preparing to start a new inferior. But, since we don't
1017 want to clobber the parent's address/program spaces, we
1018 go ahead and create a new one for this exiting
1019 inferior. */
1020
1021 /* Switch to no-thread while running clone_program_space, so
1022 that clone_program_space doesn't want to read the
1023 selected frame of a dead process. */
1024 scoped_restore_current_thread restore_thread;
1025 switch_to_no_thread ();
1026
1027 inf->pspace = new program_space (maybe_new_address_space ());
1028 inf->aspace = inf->pspace->aspace;
1029 set_current_program_space (inf->pspace);
1030 inf->removable = 1;
1031 inf->symfile_flags = SYMFILE_NO_READ;
1032 clone_program_space (inf->pspace, vfork_parent->pspace);
1033
1034 resume_parent = vfork_parent->pid;
1035 }
1036
1037 gdb_assert (current_program_space == inf->pspace);
1038
1039 if (non_stop && resume_parent != -1)
1040 {
1041 /* If the user wanted the parent to be running, let it go
1042 free now. */
1043 scoped_restore_current_thread restore_thread;
1044
1045 if (debug_infrun)
1046 fprintf_unfiltered (gdb_stdlog,
1047 "infrun: resuming vfork parent process %d\n",
1048 resume_parent);
1049
1050 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
1051 }
1052 }
1053 }
1054
1055 /* Enum strings for "set|show follow-exec-mode". */
1056
1057 static const char follow_exec_mode_new[] = "new";
1058 static const char follow_exec_mode_same[] = "same";
1059 static const char *const follow_exec_mode_names[] =
1060 {
1061 follow_exec_mode_new,
1062 follow_exec_mode_same,
1063 NULL,
1064 };
1065
1066 static const char *follow_exec_mode_string = follow_exec_mode_same;
1067 static void
1068 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1069 struct cmd_list_element *c, const char *value)
1070 {
1071 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1072 }
1073
1074 /* EXEC_FILE_TARGET is assumed to be non-NULL. */
1075
1076 static void
1077 follow_exec (ptid_t ptid, const char *exec_file_target)
1078 {
1079 struct inferior *inf = current_inferior ();
1080 int pid = ptid.pid ();
1081 ptid_t process_ptid;
1082
1083 /* Switch terminal for any messages produced e.g. by
1084 breakpoint_re_set. */
1085 target_terminal::ours_for_output ();
1086
1087 /* This is an exec event that we actually wish to pay attention to.
1088 Refresh our symbol table to the newly exec'd program, remove any
1089 momentary bp's, etc.
1090
1091 If there are breakpoints, they aren't really inserted now,
1092 since the exec() transformed our inferior into a fresh set
1093 of instructions.
1094
1095 We want to preserve symbolic breakpoints on the list, since
1096 we have hopes that they can be reset after the new a.out's
1097 symbol table is read.
1098
1099 However, any "raw" breakpoints must be removed from the list
1100 (e.g., the solib bp's), since their address is probably invalid
1101 now.
1102
1103 And, we DON'T want to call delete_breakpoints() here, since
1104 that may write the bp's "shadow contents" (the instruction
1105 value that was overwritten with a TRAP instruction). Since
1106 we now have a new a.out, those shadow contents aren't valid. */
1107
1108 mark_breakpoints_out ();
1109
1110 /* The target reports the exec event to the main thread, even if
1111 some other thread does the exec, and even if the main thread was
1112 stopped or already gone. We may still have non-leader threads of
1113 the process on our list. E.g., on targets that don't have thread
1114 exit events (like remote); or on native Linux in non-stop mode if
1115 there were only two threads in the inferior and the non-leader
1116 one is the one that execs (and nothing forces an update of the
1117 thread list up to here). When debugging remotely, it's best to
1118 avoid extra traffic, when possible, so avoid syncing the thread
1119 list with the target, and instead go ahead and delete all threads
1120 of the process but one that reported the event. Note this must
1121 be done before calling update_breakpoints_after_exec, as
1122 otherwise clearing the threads' resources would reference stale
1123 thread breakpoints -- it may have been one of these threads that
1124 stepped across the exec. We could just clear their stepping
1125 states, but as long as we're iterating, might as well delete
1126 them. Deleting them now rather than at the next user-visible
1127 stop provides a nicer sequence of events for user and MI
1128 notifications. */
1129 for (thread_info *th : all_threads_safe ())
1130 if (th->ptid.pid () == pid && th->ptid != ptid)
1131 delete_thread (th);
1132
1133 /* We also need to clear any left over stale state for the
1134 leader/event thread. E.g., if there was any step-resume
1135 breakpoint or similar, it's gone now. We cannot truly
1136 step-to-next statement through an exec(). */
1137 thread_info *th = inferior_thread ();
1138 th->control.step_resume_breakpoint = NULL;
1139 th->control.exception_resume_breakpoint = NULL;
1140 th->control.single_step_breakpoints = NULL;
1141 th->control.step_range_start = 0;
1142 th->control.step_range_end = 0;
1143
1144 /* The user may have had the main thread held stopped in the
1145 previous image (e.g., schedlock on, or non-stop). Release
1146 it now. */
1147 th->stop_requested = 0;
1148
1149 update_breakpoints_after_exec ();
1150
1151 /* What is this a.out's name? */
1152 process_ptid = ptid_t (pid);
1153 printf_unfiltered (_("%s is executing new program: %s\n"),
1154 target_pid_to_str (process_ptid).c_str (),
1155 exec_file_target);
1156
1157 /* We've followed the inferior through an exec. Therefore, the
1158 inferior has essentially been killed & reborn. */
1159
1160 breakpoint_init_inferior (inf_execd);
1161
1162 gdb::unique_xmalloc_ptr<char> exec_file_host
1163 = exec_file_find (exec_file_target, NULL);
1164
1165 /* If we were unable to map the executable target pathname onto a host
1166 pathname, tell the user that. Otherwise GDB's subsequent behavior
1167 is confusing. Maybe it would even be better to stop at this point
1168 so that the user can specify a file manually before continuing. */
1169 if (exec_file_host == NULL)
1170 warning (_("Could not load symbols for executable %s.\n"
1171 "Do you need \"set sysroot\"?"),
1172 exec_file_target);
1173
1174 /* Reset the shared library package. This ensures that we get a
1175 shlib event when the child reaches "_start", at which point the
1176 dld will have had a chance to initialize the child. */
1177 /* Also, loading a symbol file below may trigger symbol lookups, and
1178 we don't want those to be satisfied by the libraries of the
1179 previous incarnation of this process. */
1180 no_shared_libraries (NULL, 0);
1181
1182 if (follow_exec_mode_string == follow_exec_mode_new)
1183 {
1184 /* The user wants to keep the old inferior and program spaces
1185 around. Create a new fresh one, and switch to it. */
1186
1187 /* Do exit processing for the original inferior before setting the new
1188 inferior's pid. Having two inferiors with the same pid would confuse
1189 find_inferior_p(t)id. Transfer the terminal state and info from the
1190 old to the new inferior. */
1191 inf = add_inferior_with_spaces ();
1192 swap_terminal_info (inf, current_inferior ());
1193 exit_inferior_silent (current_inferior ());
1194
1195 inf->pid = pid;
1196 target_follow_exec (inf, exec_file_target);
1197
1198 inferior *org_inferior = current_inferior ();
1199 switch_to_inferior_no_thread (inf);
1200 push_target (org_inferior->process_target ());
1201 thread_info *thr = add_thread (inf->process_target (), ptid);
1202 switch_to_thread (thr);
1203 }
1204 else
1205 {
1206 /* The old description may no longer be fit for the new image.
1207 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1208 old description; we'll read a new one below. No need to do
1209 this on "follow-exec-mode new", as the old inferior stays
1210 around (its description is later cleared/refetched on
1211 restart). */
1212 target_clear_description ();
1213 }
1214
1215 gdb_assert (current_program_space == inf->pspace);
1216
1217 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1218 because the proper displacement for a PIE (Position Independent
1219 Executable) main symbol file will only be computed by
1220 solib_create_inferior_hook below. breakpoint_re_set would fail
1221 to insert the breakpoints with the zero displacement. */
1222 try_open_exec_file (exec_file_host.get (), inf, SYMFILE_DEFER_BP_RESET);
1223
1224 /* If the target can specify a description, read it. Must do this
1225 after flipping to the new executable (because the target supplied
1226 description must be compatible with the executable's
1227 architecture, and the old executable may e.g., be 32-bit, while
1228 the new one 64-bit), and before anything involving memory or
1229 registers. */
1230 target_find_description ();
1231
1232 solib_create_inferior_hook (0);
1233
1234 jit_inferior_created_hook ();
1235
1236 breakpoint_re_set ();
1237
1238 /* Reinsert all breakpoints. (Those which were symbolic have
1239 been reset to the proper address in the new a.out, thanks
1240 to symbol_file_command...). */
1241 insert_breakpoints ();
1242
1243 /* The next resume of this inferior should bring it to the shlib
1244 startup breakpoints. (If the user had also set bp's on
1245 "main" from the old (parent) process, then they'll auto-
1246 matically get reset there in the new process.). */
1247 }
1248
1249 /* The queue of threads that need to do a step-over operation to get
1250 past e.g., a breakpoint. What technique is used to step over the
1251 breakpoint/watchpoint does not matter -- all threads end up in the
1252 same queue, to maintain rough temporal order of execution, in order
1253 to avoid starvation, otherwise, we could e.g., find ourselves
1254 constantly stepping the same couple threads past their breakpoints
1255 over and over, if the single-step finish fast enough. */
1256 struct thread_info *step_over_queue_head;
1257
1258 /* Bit flags indicating what the thread needs to step over. */
1259
1260 enum step_over_what_flag
1261 {
1262 /* Step over a breakpoint. */
1263 STEP_OVER_BREAKPOINT = 1,
1264
1265 /* Step past a non-continuable watchpoint, in order to let the
1266 instruction execute so we can evaluate the watchpoint
1267 expression. */
1268 STEP_OVER_WATCHPOINT = 2
1269 };
1270 DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
1271
1272 /* Info about an instruction that is being stepped over. */
1273
1274 struct step_over_info
1275 {
1276 /* If we're stepping past a breakpoint, this is the address space
1277 and address of the instruction the breakpoint is set at. We'll
1278 skip inserting all breakpoints here. Valid iff ASPACE is
1279 non-NULL. */
1280 const address_space *aspace;
1281 CORE_ADDR address;
1282
1283 /* The instruction being stepped over triggers a nonsteppable
1284 watchpoint. If true, we'll skip inserting watchpoints. */
1285 int nonsteppable_watchpoint_p;
1286
1287 /* The thread's global number. */
1288 int thread;
1289 };
1290
1291 /* The step-over info of the location that is being stepped over.
1292
1293 Note that with async/breakpoint always-inserted mode, a user might
1294 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1295 being stepped over. As setting a new breakpoint inserts all
1296 breakpoints, we need to make sure the breakpoint being stepped over
1297 isn't inserted then. We do that by only clearing the step-over
1298 info when the step-over is actually finished (or aborted).
1299
1300 Presently GDB can only step over one breakpoint at any given time.
1301 Given threads that can't run code in the same address space as the
1302 breakpoint's can't really miss the breakpoint, GDB could be taught
1303 to step-over at most one breakpoint per address space (so this info
1304 could move to the address space object if/when GDB is extended).
1305 The set of breakpoints being stepped over will normally be much
1306 smaller than the set of all breakpoints, so a flag in the
1307 breakpoint location structure would be wasteful. A separate list
1308 also saves complexity and run-time, as otherwise we'd have to go
1309 through all breakpoint locations clearing their flag whenever we
1310 start a new sequence. Similar considerations weigh against storing
1311 this info in the thread object. Plus, not all step overs actually
1312 have breakpoint locations -- e.g., stepping past a single-step
1313 breakpoint, or stepping to complete a non-continuable
1314 watchpoint. */
1315 static struct step_over_info step_over_info;
1316
1317 /* Record the address of the breakpoint/instruction we're currently
1318 stepping over.
1319 N.B. We record the aspace and address now, instead of say just the thread,
1320 because when we need the info later the thread may be running. */
1321
1322 static void
1323 set_step_over_info (const address_space *aspace, CORE_ADDR address,
1324 int nonsteppable_watchpoint_p,
1325 int thread)
1326 {
1327 step_over_info.aspace = aspace;
1328 step_over_info.address = address;
1329 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1330 step_over_info.thread = thread;
1331 }
1332
1333 /* Called when we're not longer stepping over a breakpoint / an
1334 instruction, so all breakpoints are free to be (re)inserted. */
1335
1336 static void
1337 clear_step_over_info (void)
1338 {
1339 if (debug_infrun)
1340 fprintf_unfiltered (gdb_stdlog,
1341 "infrun: clear_step_over_info\n");
1342 step_over_info.aspace = NULL;
1343 step_over_info.address = 0;
1344 step_over_info.nonsteppable_watchpoint_p = 0;
1345 step_over_info.thread = -1;
1346 }
1347
1348 /* See infrun.h. */
1349
1350 int
1351 stepping_past_instruction_at (struct address_space *aspace,
1352 CORE_ADDR address)
1353 {
1354 return (step_over_info.aspace != NULL
1355 && breakpoint_address_match (aspace, address,
1356 step_over_info.aspace,
1357 step_over_info.address));
1358 }
1359
1360 /* See infrun.h. */
1361
1362 int
1363 thread_is_stepping_over_breakpoint (int thread)
1364 {
1365 return (step_over_info.thread != -1
1366 && thread == step_over_info.thread);
1367 }
1368
1369 /* See infrun.h. */
1370
1371 int
1372 stepping_past_nonsteppable_watchpoint (void)
1373 {
1374 return step_over_info.nonsteppable_watchpoint_p;
1375 }
1376
1377 /* Returns true if step-over info is valid. */
1378
1379 static int
1380 step_over_info_valid_p (void)
1381 {
1382 return (step_over_info.aspace != NULL
1383 || stepping_past_nonsteppable_watchpoint ());
1384 }
1385
1386 \f
1387 /* Displaced stepping. */
1388
1389 /* In non-stop debugging mode, we must take special care to manage
1390 breakpoints properly; in particular, the traditional strategy for
1391 stepping a thread past a breakpoint it has hit is unsuitable.
1392 'Displaced stepping' is a tactic for stepping one thread past a
1393 breakpoint it has hit while ensuring that other threads running
1394 concurrently will hit the breakpoint as they should.
1395
1396 The traditional way to step a thread T off a breakpoint in a
1397 multi-threaded program in all-stop mode is as follows:
1398
1399 a0) Initially, all threads are stopped, and breakpoints are not
1400 inserted.
1401 a1) We single-step T, leaving breakpoints uninserted.
1402 a2) We insert breakpoints, and resume all threads.
1403
1404 In non-stop debugging, however, this strategy is unsuitable: we
1405 don't want to have to stop all threads in the system in order to
1406 continue or step T past a breakpoint. Instead, we use displaced
1407 stepping:
1408
1409 n0) Initially, T is stopped, other threads are running, and
1410 breakpoints are inserted.
1411 n1) We copy the instruction "under" the breakpoint to a separate
1412 location, outside the main code stream, making any adjustments
1413 to the instruction, register, and memory state as directed by
1414 T's architecture.
1415 n2) We single-step T over the instruction at its new location.
1416 n3) We adjust the resulting register and memory state as directed
1417 by T's architecture. This includes resetting T's PC to point
1418 back into the main instruction stream.
1419 n4) We resume T.
1420
1421 This approach depends on the following gdbarch methods:
1422
1423 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1424 indicate where to copy the instruction, and how much space must
1425 be reserved there. We use these in step n1.
1426
1427 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1428 address, and makes any necessary adjustments to the instruction,
1429 register contents, and memory. We use this in step n1.
1430
1431 - gdbarch_displaced_step_fixup adjusts registers and memory after
1432 we have successfully single-stepped the instruction, to yield the
1433 same effect the instruction would have had if we had executed it
1434 at its original address. We use this in step n3.
1435
1436 The gdbarch_displaced_step_copy_insn and
1437 gdbarch_displaced_step_fixup functions must be written so that
1438 copying an instruction with gdbarch_displaced_step_copy_insn,
1439 single-stepping across the copied instruction, and then applying
1440 gdbarch_displaced_insn_fixup should have the same effects on the
1441 thread's memory and registers as stepping the instruction in place
1442 would have. Exactly which responsibilities fall to the copy and
1443 which fall to the fixup is up to the author of those functions.
1444
1445 See the comments in gdbarch.sh for details.
1446
1447 Note that displaced stepping and software single-step cannot
1448 currently be used in combination, although with some care I think
1449 they could be made to. Software single-step works by placing
1450 breakpoints on all possible subsequent instructions; if the
1451 displaced instruction is a PC-relative jump, those breakpoints
1452 could fall in very strange places --- on pages that aren't
1453 executable, or at addresses that are not proper instruction
1454 boundaries. (We do generally let other threads run while we wait
1455 to hit the software single-step breakpoint, and they might
1456 encounter such a corrupted instruction.) One way to work around
1457 this would be to have gdbarch_displaced_step_copy_insn fully
1458 simulate the effect of PC-relative instructions (and return NULL)
1459 on architectures that use software single-stepping.
1460
1461 In non-stop mode, we can have independent and simultaneous step
1462 requests, so more than one thread may need to simultaneously step
1463 over a breakpoint. The current implementation assumes there is
1464 only one scratch space per process. In this case, we have to
1465 serialize access to the scratch space. If thread A wants to step
1466 over a breakpoint, but we are currently waiting for some other
1467 thread to complete a displaced step, we leave thread A stopped and
1468 place it in the displaced_step_request_queue. Whenever a displaced
1469 step finishes, we pick the next thread in the queue and start a new
1470 displaced step operation on it. See displaced_step_prepare and
1471 displaced_step_fixup for details. */
1472
1473 /* Default destructor for displaced_step_closure. */
1474
1475 displaced_step_closure::~displaced_step_closure () = default;
1476
1477 /* Get the displaced stepping state of process PID. */
1478
1479 static displaced_step_inferior_state *
1480 get_displaced_stepping_state (inferior *inf)
1481 {
1482 return &inf->displaced_step_state;
1483 }
1484
1485 /* Returns true if any inferior has a thread doing a displaced
1486 step. */
1487
1488 static bool
1489 displaced_step_in_progress_any_inferior ()
1490 {
1491 for (inferior *i : all_inferiors ())
1492 {
1493 if (i->displaced_step_state.step_thread != nullptr)
1494 return true;
1495 }
1496
1497 return false;
1498 }
1499
1500 /* Return true if thread represented by PTID is doing a displaced
1501 step. */
1502
1503 static int
1504 displaced_step_in_progress_thread (thread_info *thread)
1505 {
1506 gdb_assert (thread != NULL);
1507
1508 return get_displaced_stepping_state (thread->inf)->step_thread == thread;
1509 }
1510
1511 /* Return true if process PID has a thread doing a displaced step. */
1512
1513 static int
1514 displaced_step_in_progress (inferior *inf)
1515 {
1516 return get_displaced_stepping_state (inf)->step_thread != nullptr;
1517 }
1518
1519 /* If inferior is in displaced stepping, and ADDR equals to starting address
1520 of copy area, return corresponding displaced_step_closure. Otherwise,
1521 return NULL. */
1522
1523 struct displaced_step_closure*
1524 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1525 {
1526 displaced_step_inferior_state *displaced
1527 = get_displaced_stepping_state (current_inferior ());
1528
1529 /* If checking the mode of displaced instruction in copy area. */
1530 if (displaced->step_thread != nullptr
1531 && displaced->step_copy == addr)
1532 return displaced->step_closure.get ();
1533
1534 return NULL;
1535 }
1536
1537 static void
1538 infrun_inferior_exit (struct inferior *inf)
1539 {
1540 inf->displaced_step_state.reset ();
1541 }
1542
1543 /* If ON, and the architecture supports it, GDB will use displaced
1544 stepping to step over breakpoints. If OFF, or if the architecture
1545 doesn't support it, GDB will instead use the traditional
1546 hold-and-step approach. If AUTO (which is the default), GDB will
1547 decide which technique to use to step over breakpoints depending on
1548 whether the target works in a non-stop way (see use_displaced_stepping). */
1549
1550 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1551
1552 static void
1553 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1554 struct cmd_list_element *c,
1555 const char *value)
1556 {
1557 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1558 fprintf_filtered (file,
1559 _("Debugger's willingness to use displaced stepping "
1560 "to step over breakpoints is %s (currently %s).\n"),
1561 value, target_is_non_stop_p () ? "on" : "off");
1562 else
1563 fprintf_filtered (file,
1564 _("Debugger's willingness to use displaced stepping "
1565 "to step over breakpoints is %s.\n"), value);
1566 }
1567
1568 /* Return true if the gdbarch implements the required methods to use
1569 displaced stepping. */
1570
1571 static bool
1572 gdbarch_supports_displaced_stepping (gdbarch *arch)
1573 {
1574 /* Only check for the presence of step_copy_insn. Other required methods
1575 are checked by the gdbarch validation. */
1576 return gdbarch_displaced_step_copy_insn_p (arch);
1577 }
1578
1579 /* Return non-zero if displaced stepping can/should be used to step
1580 over breakpoints of thread TP. */
1581
1582 static bool
1583 use_displaced_stepping (thread_info *tp)
1584 {
1585 /* If the user disabled it explicitly, don't use displaced stepping. */
1586 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1587 return false;
1588
1589 /* If "auto", only use displaced stepping if the target operates in a non-stop
1590 way. */
1591 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1592 && !target_is_non_stop_p ())
1593 return false;
1594
1595 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1596
1597 /* If the architecture doesn't implement displaced stepping, don't use
1598 it. */
1599 if (!gdbarch_supports_displaced_stepping (gdbarch))
1600 return false;
1601
1602 /* If recording, don't use displaced stepping. */
1603 if (find_record_target () != nullptr)
1604 return false;
1605
1606 displaced_step_inferior_state *displaced_state
1607 = get_displaced_stepping_state (tp->inf);
1608
1609 /* If displaced stepping failed before for this inferior, don't bother trying
1610 again. */
1611 if (displaced_state->failed_before)
1612 return false;
1613
1614 return true;
1615 }
1616
1617 /* Simple function wrapper around displaced_step_inferior_state::reset. */
1618
1619 static void
1620 displaced_step_reset (displaced_step_inferior_state *displaced)
1621 {
1622 displaced->reset ();
1623 }
1624
1625 /* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1626 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1627
1628 using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
1629
1630 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1631 void
1632 displaced_step_dump_bytes (struct ui_file *file,
1633 const gdb_byte *buf,
1634 size_t len)
1635 {
1636 int i;
1637
1638 for (i = 0; i < len; i++)
1639 fprintf_unfiltered (file, "%02x ", buf[i]);
1640 fputs_unfiltered ("\n", file);
1641 }
1642
1643 /* Prepare to single-step, using displaced stepping.
1644
1645 Note that we cannot use displaced stepping when we have a signal to
1646 deliver. If we have a signal to deliver and an instruction to step
1647 over, then after the step, there will be no indication from the
1648 target whether the thread entered a signal handler or ignored the
1649 signal and stepped over the instruction successfully --- both cases
1650 result in a simple SIGTRAP. In the first case we mustn't do a
1651 fixup, and in the second case we must --- but we can't tell which.
1652 Comments in the code for 'random signals' in handle_inferior_event
1653 explain how we handle this case instead.
1654
1655 Returns 1 if preparing was successful -- this thread is going to be
1656 stepped now; 0 if displaced stepping this thread got queued; or -1
1657 if this instruction can't be displaced stepped. */
1658
1659 static int
1660 displaced_step_prepare_throw (thread_info *tp)
1661 {
1662 regcache *regcache = get_thread_regcache (tp);
1663 struct gdbarch *gdbarch = regcache->arch ();
1664 const address_space *aspace = regcache->aspace ();
1665 CORE_ADDR original, copy;
1666 ULONGEST len;
1667 int status;
1668
1669 /* We should never reach this function if the architecture does not
1670 support displaced stepping. */
1671 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
1672
1673 /* Nor if the thread isn't meant to step over a breakpoint. */
1674 gdb_assert (tp->control.trap_expected);
1675
1676 /* Disable range stepping while executing in the scratch pad. We
1677 want a single-step even if executing the displaced instruction in
1678 the scratch buffer lands within the stepping range (e.g., a
1679 jump/branch). */
1680 tp->control.may_range_step = 0;
1681
1682 /* We have to displaced step one thread at a time, as we only have
1683 access to a single scratch space per inferior. */
1684
1685 displaced_step_inferior_state *displaced
1686 = get_displaced_stepping_state (tp->inf);
1687
1688 if (displaced->step_thread != nullptr)
1689 {
1690 /* Already waiting for a displaced step to finish. Defer this
1691 request and place in queue. */
1692
1693 if (debug_displaced)
1694 fprintf_unfiltered (gdb_stdlog,
1695 "displaced: deferring step of %s\n",
1696 target_pid_to_str (tp->ptid).c_str ());
1697
1698 thread_step_over_chain_enqueue (tp);
1699 return 0;
1700 }
1701 else
1702 {
1703 if (debug_displaced)
1704 fprintf_unfiltered (gdb_stdlog,
1705 "displaced: stepping %s now\n",
1706 target_pid_to_str (tp->ptid).c_str ());
1707 }
1708
1709 displaced_step_reset (displaced);
1710
1711 scoped_restore_current_thread restore_thread;
1712
1713 switch_to_thread (tp);
1714
1715 original = regcache_read_pc (regcache);
1716
1717 copy = gdbarch_displaced_step_location (gdbarch);
1718 len = gdbarch_max_insn_length (gdbarch);
1719
1720 if (breakpoint_in_range_p (aspace, copy, len))
1721 {
1722 /* There's a breakpoint set in the scratch pad location range
1723 (which is usually around the entry point). We'd either
1724 install it before resuming, which would overwrite/corrupt the
1725 scratch pad, or if it was already inserted, this displaced
1726 step would overwrite it. The latter is OK in the sense that
1727 we already assume that no thread is going to execute the code
1728 in the scratch pad range (after initial startup) anyway, but
1729 the former is unacceptable. Simply punt and fallback to
1730 stepping over this breakpoint in-line. */
1731 if (debug_displaced)
1732 {
1733 fprintf_unfiltered (gdb_stdlog,
1734 "displaced: breakpoint set in scratch pad. "
1735 "Stepping over breakpoint in-line instead.\n");
1736 }
1737
1738 return -1;
1739 }
1740
1741 /* Save the original contents of the copy area. */
1742 displaced->step_saved_copy.resize (len);
1743 status = target_read_memory (copy, displaced->step_saved_copy.data (), len);
1744 if (status != 0)
1745 throw_error (MEMORY_ERROR,
1746 _("Error accessing memory address %s (%s) for "
1747 "displaced-stepping scratch space."),
1748 paddress (gdbarch, copy), safe_strerror (status));
1749 if (debug_displaced)
1750 {
1751 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1752 paddress (gdbarch, copy));
1753 displaced_step_dump_bytes (gdb_stdlog,
1754 displaced->step_saved_copy.data (),
1755 len);
1756 };
1757
1758 displaced->step_closure
1759 = gdbarch_displaced_step_copy_insn (gdbarch, original, copy, regcache);
1760 if (displaced->step_closure == NULL)
1761 {
1762 /* The architecture doesn't know how or want to displaced step
1763 this instruction or instruction sequence. Fallback to
1764 stepping over the breakpoint in-line. */
1765 return -1;
1766 }
1767
1768 /* Save the information we need to fix things up if the step
1769 succeeds. */
1770 displaced->step_thread = tp;
1771 displaced->step_gdbarch = gdbarch;
1772 displaced->step_original = original;
1773 displaced->step_copy = copy;
1774
1775 {
1776 displaced_step_reset_cleanup cleanup (displaced);
1777
1778 /* Resume execution at the copy. */
1779 regcache_write_pc (regcache, copy);
1780
1781 cleanup.release ();
1782 }
1783
1784 if (debug_displaced)
1785 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1786 paddress (gdbarch, copy));
1787
1788 return 1;
1789 }
1790
1791 /* Wrapper for displaced_step_prepare_throw that disabled further
1792 attempts at displaced stepping if we get a memory error. */
1793
1794 static int
1795 displaced_step_prepare (thread_info *thread)
1796 {
1797 int prepared = -1;
1798
1799 try
1800 {
1801 prepared = displaced_step_prepare_throw (thread);
1802 }
1803 catch (const gdb_exception_error &ex)
1804 {
1805 struct displaced_step_inferior_state *displaced_state;
1806
1807 if (ex.error != MEMORY_ERROR
1808 && ex.error != NOT_SUPPORTED_ERROR)
1809 throw;
1810
1811 if (debug_infrun)
1812 {
1813 fprintf_unfiltered (gdb_stdlog,
1814 "infrun: disabling displaced stepping: %s\n",
1815 ex.what ());
1816 }
1817
1818 /* Be verbose if "set displaced-stepping" is "on", silent if
1819 "auto". */
1820 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1821 {
1822 warning (_("disabling displaced stepping: %s"),
1823 ex.what ());
1824 }
1825
1826 /* Disable further displaced stepping attempts. */
1827 displaced_state
1828 = get_displaced_stepping_state (thread->inf);
1829 displaced_state->failed_before = 1;
1830 }
1831
1832 return prepared;
1833 }
1834
1835 static void
1836 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1837 const gdb_byte *myaddr, int len)
1838 {
1839 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
1840
1841 inferior_ptid = ptid;
1842 write_memory (memaddr, myaddr, len);
1843 }
1844
1845 /* Restore the contents of the copy area for thread PTID. */
1846
1847 static void
1848 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1849 ptid_t ptid)
1850 {
1851 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1852
1853 write_memory_ptid (ptid, displaced->step_copy,
1854 displaced->step_saved_copy.data (), len);
1855 if (debug_displaced)
1856 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1857 target_pid_to_str (ptid).c_str (),
1858 paddress (displaced->step_gdbarch,
1859 displaced->step_copy));
1860 }
1861
1862 /* If we displaced stepped an instruction successfully, adjust
1863 registers and memory to yield the same effect the instruction would
1864 have had if we had executed it at its original address, and return
1865 1. If the instruction didn't complete, relocate the PC and return
1866 -1. If the thread wasn't displaced stepping, return 0. */
1867
1868 static int
1869 displaced_step_fixup (thread_info *event_thread, enum gdb_signal signal)
1870 {
1871 struct displaced_step_inferior_state *displaced
1872 = get_displaced_stepping_state (event_thread->inf);
1873 int ret;
1874
1875 /* Was this event for the thread we displaced? */
1876 if (displaced->step_thread != event_thread)
1877 return 0;
1878
1879 /* Fixup may need to read memory/registers. Switch to the thread
1880 that we're fixing up. Also, target_stopped_by_watchpoint checks
1881 the current thread, and displaced_step_restore performs ptid-dependent
1882 memory accesses using current_inferior() and current_top_target(). */
1883 switch_to_thread (event_thread);
1884
1885 displaced_step_reset_cleanup cleanup (displaced);
1886
1887 displaced_step_restore (displaced, displaced->step_thread->ptid);
1888
1889 /* Did the instruction complete successfully? */
1890 if (signal == GDB_SIGNAL_TRAP
1891 && !(target_stopped_by_watchpoint ()
1892 && (gdbarch_have_nonsteppable_watchpoint (displaced->step_gdbarch)
1893 || target_have_steppable_watchpoint)))
1894 {
1895 /* Fix up the resulting state. */
1896 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1897 displaced->step_closure.get (),
1898 displaced->step_original,
1899 displaced->step_copy,
1900 get_thread_regcache (displaced->step_thread));
1901 ret = 1;
1902 }
1903 else
1904 {
1905 /* Since the instruction didn't complete, all we can do is
1906 relocate the PC. */
1907 struct regcache *regcache = get_thread_regcache (event_thread);
1908 CORE_ADDR pc = regcache_read_pc (regcache);
1909
1910 pc = displaced->step_original + (pc - displaced->step_copy);
1911 regcache_write_pc (regcache, pc);
1912 ret = -1;
1913 }
1914
1915 return ret;
1916 }
1917
1918 /* Data to be passed around while handling an event. This data is
1919 discarded between events. */
1920 struct execution_control_state
1921 {
1922 process_stratum_target *target;
1923 ptid_t ptid;
1924 /* The thread that got the event, if this was a thread event; NULL
1925 otherwise. */
1926 struct thread_info *event_thread;
1927
1928 struct target_waitstatus ws;
1929 int stop_func_filled_in;
1930 CORE_ADDR stop_func_start;
1931 CORE_ADDR stop_func_end;
1932 const char *stop_func_name;
1933 int wait_some_more;
1934
1935 /* True if the event thread hit the single-step breakpoint of
1936 another thread. Thus the event doesn't cause a stop, the thread
1937 needs to be single-stepped past the single-step breakpoint before
1938 we can switch back to the original stepping thread. */
1939 int hit_singlestep_breakpoint;
1940 };
1941
1942 /* Clear ECS and set it to point at TP. */
1943
1944 static void
1945 reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1946 {
1947 memset (ecs, 0, sizeof (*ecs));
1948 ecs->event_thread = tp;
1949 ecs->ptid = tp->ptid;
1950 }
1951
1952 static void keep_going_pass_signal (struct execution_control_state *ecs);
1953 static void prepare_to_wait (struct execution_control_state *ecs);
1954 static int keep_going_stepped_thread (struct thread_info *tp);
1955 static step_over_what thread_still_needs_step_over (struct thread_info *tp);
1956
1957 /* Are there any pending step-over requests? If so, run all we can
1958 now and return true. Otherwise, return false. */
1959
1960 static int
1961 start_step_over (void)
1962 {
1963 struct thread_info *tp, *next;
1964
1965 /* Don't start a new step-over if we already have an in-line
1966 step-over operation ongoing. */
1967 if (step_over_info_valid_p ())
1968 return 0;
1969
1970 for (tp = step_over_queue_head; tp != NULL; tp = next)
1971 {
1972 struct execution_control_state ecss;
1973 struct execution_control_state *ecs = &ecss;
1974 step_over_what step_what;
1975 int must_be_in_line;
1976
1977 gdb_assert (!tp->stop_requested);
1978
1979 next = thread_step_over_chain_next (tp);
1980
1981 /* If this inferior already has a displaced step in process,
1982 don't start a new one. */
1983 if (displaced_step_in_progress (tp->inf))
1984 continue;
1985
1986 step_what = thread_still_needs_step_over (tp);
1987 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
1988 || ((step_what & STEP_OVER_BREAKPOINT)
1989 && !use_displaced_stepping (tp)));
1990
1991 /* We currently stop all threads of all processes to step-over
1992 in-line. If we need to start a new in-line step-over, let
1993 any pending displaced steps finish first. */
1994 if (must_be_in_line && displaced_step_in_progress_any_inferior ())
1995 return 0;
1996
1997 thread_step_over_chain_remove (tp);
1998
1999 if (step_over_queue_head == NULL)
2000 {
2001 if (debug_infrun)
2002 fprintf_unfiltered (gdb_stdlog,
2003 "infrun: step-over queue now empty\n");
2004 }
2005
2006 if (tp->control.trap_expected
2007 || tp->resumed
2008 || tp->executing)
2009 {
2010 internal_error (__FILE__, __LINE__,
2011 "[%s] has inconsistent state: "
2012 "trap_expected=%d, resumed=%d, executing=%d\n",
2013 target_pid_to_str (tp->ptid).c_str (),
2014 tp->control.trap_expected,
2015 tp->resumed,
2016 tp->executing);
2017 }
2018
2019 if (debug_infrun)
2020 fprintf_unfiltered (gdb_stdlog,
2021 "infrun: resuming [%s] for step-over\n",
2022 target_pid_to_str (tp->ptid).c_str ());
2023
2024 /* keep_going_pass_signal skips the step-over if the breakpoint
2025 is no longer inserted. In all-stop, we want to keep looking
2026 for a thread that needs a step-over instead of resuming TP,
2027 because we wouldn't be able to resume anything else until the
2028 target stops again. In non-stop, the resume always resumes
2029 only TP, so it's OK to let the thread resume freely. */
2030 if (!target_is_non_stop_p () && !step_what)
2031 continue;
2032
2033 switch_to_thread (tp);
2034 reset_ecs (ecs, tp);
2035 keep_going_pass_signal (ecs);
2036
2037 if (!ecs->wait_some_more)
2038 error (_("Command aborted."));
2039
2040 gdb_assert (tp->resumed);
2041
2042 /* If we started a new in-line step-over, we're done. */
2043 if (step_over_info_valid_p ())
2044 {
2045 gdb_assert (tp->control.trap_expected);
2046 return 1;
2047 }
2048
2049 if (!target_is_non_stop_p ())
2050 {
2051 /* On all-stop, shouldn't have resumed unless we needed a
2052 step over. */
2053 gdb_assert (tp->control.trap_expected
2054 || tp->step_after_step_resume_breakpoint);
2055
2056 /* With remote targets (at least), in all-stop, we can't
2057 issue any further remote commands until the program stops
2058 again. */
2059 return 1;
2060 }
2061
2062 /* Either the thread no longer needed a step-over, or a new
2063 displaced stepping sequence started. Even in the latter
2064 case, continue looking. Maybe we can also start another
2065 displaced step on a thread of other process. */
2066 }
2067
2068 return 0;
2069 }
2070
2071 /* Update global variables holding ptids to hold NEW_PTID if they were
2072 holding OLD_PTID. */
2073 static void
2074 infrun_thread_ptid_changed (process_stratum_target *target,
2075 ptid_t old_ptid, ptid_t new_ptid)
2076 {
2077 if (inferior_ptid == old_ptid
2078 && current_inferior ()->process_target () == target)
2079 inferior_ptid = new_ptid;
2080 }
2081
2082 \f
2083
2084 static const char schedlock_off[] = "off";
2085 static const char schedlock_on[] = "on";
2086 static const char schedlock_step[] = "step";
2087 static const char schedlock_replay[] = "replay";
2088 static const char *const scheduler_enums[] = {
2089 schedlock_off,
2090 schedlock_on,
2091 schedlock_step,
2092 schedlock_replay,
2093 NULL
2094 };
2095 static const char *scheduler_mode = schedlock_replay;
2096 static void
2097 show_scheduler_mode (struct ui_file *file, int from_tty,
2098 struct cmd_list_element *c, const char *value)
2099 {
2100 fprintf_filtered (file,
2101 _("Mode for locking scheduler "
2102 "during execution is \"%s\".\n"),
2103 value);
2104 }
2105
2106 static void
2107 set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
2108 {
2109 if (!target_can_lock_scheduler)
2110 {
2111 scheduler_mode = schedlock_off;
2112 error (_("Target '%s' cannot support this command."), target_shortname);
2113 }
2114 }
2115
2116 /* True if execution commands resume all threads of all processes by
2117 default; otherwise, resume only threads of the current inferior
2118 process. */
2119 bool sched_multi = false;
2120
2121 /* Try to setup for software single stepping over the specified location.
2122 Return 1 if target_resume() should use hardware single step.
2123
2124 GDBARCH the current gdbarch.
2125 PC the location to step over. */
2126
2127 static int
2128 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2129 {
2130 int hw_step = 1;
2131
2132 if (execution_direction == EXEC_FORWARD
2133 && gdbarch_software_single_step_p (gdbarch))
2134 hw_step = !insert_single_step_breakpoints (gdbarch);
2135
2136 return hw_step;
2137 }
2138
2139 /* See infrun.h. */
2140
2141 ptid_t
2142 user_visible_resume_ptid (int step)
2143 {
2144 ptid_t resume_ptid;
2145
2146 if (non_stop)
2147 {
2148 /* With non-stop mode on, threads are always handled
2149 individually. */
2150 resume_ptid = inferior_ptid;
2151 }
2152 else if ((scheduler_mode == schedlock_on)
2153 || (scheduler_mode == schedlock_step && step))
2154 {
2155 /* User-settable 'scheduler' mode requires solo thread
2156 resume. */
2157 resume_ptid = inferior_ptid;
2158 }
2159 else if ((scheduler_mode == schedlock_replay)
2160 && target_record_will_replay (minus_one_ptid, execution_direction))
2161 {
2162 /* User-settable 'scheduler' mode requires solo thread resume in replay
2163 mode. */
2164 resume_ptid = inferior_ptid;
2165 }
2166 else if (!sched_multi && target_supports_multi_process ())
2167 {
2168 /* Resume all threads of the current process (and none of other
2169 processes). */
2170 resume_ptid = ptid_t (inferior_ptid.pid ());
2171 }
2172 else
2173 {
2174 /* Resume all threads of all processes. */
2175 resume_ptid = RESUME_ALL;
2176 }
2177
2178 return resume_ptid;
2179 }
2180
2181 /* See infrun.h. */
2182
2183 process_stratum_target *
2184 user_visible_resume_target (ptid_t resume_ptid)
2185 {
2186 return (resume_ptid == minus_one_ptid && sched_multi
2187 ? NULL
2188 : current_inferior ()->process_target ());
2189 }
2190
2191 /* Return a ptid representing the set of threads that we will resume,
2192 in the perspective of the target, assuming run control handling
2193 does not require leaving some threads stopped (e.g., stepping past
2194 breakpoint). USER_STEP indicates whether we're about to start the
2195 target for a stepping command. */
2196
2197 static ptid_t
2198 internal_resume_ptid (int user_step)
2199 {
2200 /* In non-stop, we always control threads individually. Note that
2201 the target may always work in non-stop mode even with "set
2202 non-stop off", in which case user_visible_resume_ptid could
2203 return a wildcard ptid. */
2204 if (target_is_non_stop_p ())
2205 return inferior_ptid;
2206 else
2207 return user_visible_resume_ptid (user_step);
2208 }
2209
2210 /* Wrapper for target_resume, that handles infrun-specific
2211 bookkeeping. */
2212
2213 static void
2214 do_target_resume (ptid_t resume_ptid, int step, enum gdb_signal sig)
2215 {
2216 struct thread_info *tp = inferior_thread ();
2217
2218 gdb_assert (!tp->stop_requested);
2219
2220 /* Install inferior's terminal modes. */
2221 target_terminal::inferior ();
2222
2223 /* Avoid confusing the next resume, if the next stop/resume
2224 happens to apply to another thread. */
2225 tp->suspend.stop_signal = GDB_SIGNAL_0;
2226
2227 /* Advise target which signals may be handled silently.
2228
2229 If we have removed breakpoints because we are stepping over one
2230 in-line (in any thread), we need to receive all signals to avoid
2231 accidentally skipping a breakpoint during execution of a signal
2232 handler.
2233
2234 Likewise if we're displaced stepping, otherwise a trap for a
2235 breakpoint in a signal handler might be confused with the
2236 displaced step finishing. We don't make the displaced_step_fixup
2237 step distinguish the cases instead, because:
2238
2239 - a backtrace while stopped in the signal handler would show the
2240 scratch pad as frame older than the signal handler, instead of
2241 the real mainline code.
2242
2243 - when the thread is later resumed, the signal handler would
2244 return to the scratch pad area, which would no longer be
2245 valid. */
2246 if (step_over_info_valid_p ()
2247 || displaced_step_in_progress (tp->inf))
2248 target_pass_signals ({});
2249 else
2250 target_pass_signals (signal_pass);
2251
2252 target_resume (resume_ptid, step, sig);
2253
2254 target_commit_resume ();
2255
2256 if (target_can_async_p ())
2257 target_async (1);
2258 }
2259
2260 /* Resume the inferior. SIG is the signal to give the inferior
2261 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2262 call 'resume', which handles exceptions. */
2263
2264 static void
2265 resume_1 (enum gdb_signal sig)
2266 {
2267 struct regcache *regcache = get_current_regcache ();
2268 struct gdbarch *gdbarch = regcache->arch ();
2269 struct thread_info *tp = inferior_thread ();
2270 const address_space *aspace = regcache->aspace ();
2271 ptid_t resume_ptid;
2272 /* This represents the user's step vs continue request. When
2273 deciding whether "set scheduler-locking step" applies, it's the
2274 user's intention that counts. */
2275 const int user_step = tp->control.stepping_command;
2276 /* This represents what we'll actually request the target to do.
2277 This can decay from a step to a continue, if e.g., we need to
2278 implement single-stepping with breakpoints (software
2279 single-step). */
2280 int step;
2281
2282 gdb_assert (!tp->stop_requested);
2283 gdb_assert (!thread_is_in_step_over_chain (tp));
2284
2285 if (tp->suspend.waitstatus_pending_p)
2286 {
2287 if (debug_infrun)
2288 {
2289 std::string statstr
2290 = target_waitstatus_to_string (&tp->suspend.waitstatus);
2291
2292 fprintf_unfiltered (gdb_stdlog,
2293 "infrun: resume: thread %s has pending wait "
2294 "status %s (currently_stepping=%d).\n",
2295 target_pid_to_str (tp->ptid).c_str (),
2296 statstr.c_str (),
2297 currently_stepping (tp));
2298 }
2299
2300 tp->inf->process_target ()->threads_executing = true;
2301 tp->resumed = true;
2302
2303 /* FIXME: What should we do if we are supposed to resume this
2304 thread with a signal? Maybe we should maintain a queue of
2305 pending signals to deliver. */
2306 if (sig != GDB_SIGNAL_0)
2307 {
2308 warning (_("Couldn't deliver signal %s to %s."),
2309 gdb_signal_to_name (sig),
2310 target_pid_to_str (tp->ptid).c_str ());
2311 }
2312
2313 tp->suspend.stop_signal = GDB_SIGNAL_0;
2314
2315 if (target_can_async_p ())
2316 {
2317 target_async (1);
2318 /* Tell the event loop we have an event to process. */
2319 mark_async_event_handler (infrun_async_inferior_event_token);
2320 }
2321 return;
2322 }
2323
2324 tp->stepped_breakpoint = 0;
2325
2326 /* Depends on stepped_breakpoint. */
2327 step = currently_stepping (tp);
2328
2329 if (current_inferior ()->waiting_for_vfork_done)
2330 {
2331 /* Don't try to single-step a vfork parent that is waiting for
2332 the child to get out of the shared memory region (by exec'ing
2333 or exiting). This is particularly important on software
2334 single-step archs, as the child process would trip on the
2335 software single step breakpoint inserted for the parent
2336 process. Since the parent will not actually execute any
2337 instruction until the child is out of the shared region (such
2338 are vfork's semantics), it is safe to simply continue it.
2339 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2340 the parent, and tell it to `keep_going', which automatically
2341 re-sets it stepping. */
2342 if (debug_infrun)
2343 fprintf_unfiltered (gdb_stdlog,
2344 "infrun: resume : clear step\n");
2345 step = 0;
2346 }
2347
2348 CORE_ADDR pc = regcache_read_pc (regcache);
2349
2350 if (debug_infrun)
2351 fprintf_unfiltered (gdb_stdlog,
2352 "infrun: resume (step=%d, signal=%s), "
2353 "trap_expected=%d, current thread [%s] at %s\n",
2354 step, gdb_signal_to_symbol_string (sig),
2355 tp->control.trap_expected,
2356 target_pid_to_str (inferior_ptid).c_str (),
2357 paddress (gdbarch, pc));
2358
2359 /* Normally, by the time we reach `resume', the breakpoints are either
2360 removed or inserted, as appropriate. The exception is if we're sitting
2361 at a permanent breakpoint; we need to step over it, but permanent
2362 breakpoints can't be removed. So we have to test for it here. */
2363 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2364 {
2365 if (sig != GDB_SIGNAL_0)
2366 {
2367 /* We have a signal to pass to the inferior. The resume
2368 may, or may not take us to the signal handler. If this
2369 is a step, we'll need to stop in the signal handler, if
2370 there's one, (if the target supports stepping into
2371 handlers), or in the next mainline instruction, if
2372 there's no handler. If this is a continue, we need to be
2373 sure to run the handler with all breakpoints inserted.
2374 In all cases, set a breakpoint at the current address
2375 (where the handler returns to), and once that breakpoint
2376 is hit, resume skipping the permanent breakpoint. If
2377 that breakpoint isn't hit, then we've stepped into the
2378 signal handler (or hit some other event). We'll delete
2379 the step-resume breakpoint then. */
2380
2381 if (debug_infrun)
2382 fprintf_unfiltered (gdb_stdlog,
2383 "infrun: resume: skipping permanent breakpoint, "
2384 "deliver signal first\n");
2385
2386 clear_step_over_info ();
2387 tp->control.trap_expected = 0;
2388
2389 if (tp->control.step_resume_breakpoint == NULL)
2390 {
2391 /* Set a "high-priority" step-resume, as we don't want
2392 user breakpoints at PC to trigger (again) when this
2393 hits. */
2394 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2395 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2396
2397 tp->step_after_step_resume_breakpoint = step;
2398 }
2399
2400 insert_breakpoints ();
2401 }
2402 else
2403 {
2404 /* There's no signal to pass, we can go ahead and skip the
2405 permanent breakpoint manually. */
2406 if (debug_infrun)
2407 fprintf_unfiltered (gdb_stdlog,
2408 "infrun: resume: skipping permanent breakpoint\n");
2409 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2410 /* Update pc to reflect the new address from which we will
2411 execute instructions. */
2412 pc = regcache_read_pc (regcache);
2413
2414 if (step)
2415 {
2416 /* We've already advanced the PC, so the stepping part
2417 is done. Now we need to arrange for a trap to be
2418 reported to handle_inferior_event. Set a breakpoint
2419 at the current PC, and run to it. Don't update
2420 prev_pc, because if we end in
2421 switch_back_to_stepped_thread, we want the "expected
2422 thread advanced also" branch to be taken. IOW, we
2423 don't want this thread to step further from PC
2424 (overstep). */
2425 gdb_assert (!step_over_info_valid_p ());
2426 insert_single_step_breakpoint (gdbarch, aspace, pc);
2427 insert_breakpoints ();
2428
2429 resume_ptid = internal_resume_ptid (user_step);
2430 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
2431 tp->resumed = true;
2432 return;
2433 }
2434 }
2435 }
2436
2437 /* If we have a breakpoint to step over, make sure to do a single
2438 step only. Same if we have software watchpoints. */
2439 if (tp->control.trap_expected || bpstat_should_step ())
2440 tp->control.may_range_step = 0;
2441
2442 /* If displaced stepping is enabled, step over breakpoints by executing a
2443 copy of the instruction at a different address.
2444
2445 We can't use displaced stepping when we have a signal to deliver;
2446 the comments for displaced_step_prepare explain why. The
2447 comments in the handle_inferior event for dealing with 'random
2448 signals' explain what we do instead.
2449
2450 We can't use displaced stepping when we are waiting for vfork_done
2451 event, displaced stepping breaks the vfork child similarly as single
2452 step software breakpoint. */
2453 if (tp->control.trap_expected
2454 && use_displaced_stepping (tp)
2455 && !step_over_info_valid_p ()
2456 && sig == GDB_SIGNAL_0
2457 && !current_inferior ()->waiting_for_vfork_done)
2458 {
2459 int prepared = displaced_step_prepare (tp);
2460
2461 if (prepared == 0)
2462 {
2463 if (debug_infrun)
2464 fprintf_unfiltered (gdb_stdlog,
2465 "Got placed in step-over queue\n");
2466
2467 tp->control.trap_expected = 0;
2468 return;
2469 }
2470 else if (prepared < 0)
2471 {
2472 /* Fallback to stepping over the breakpoint in-line. */
2473
2474 if (target_is_non_stop_p ())
2475 stop_all_threads ();
2476
2477 set_step_over_info (regcache->aspace (),
2478 regcache_read_pc (regcache), 0, tp->global_num);
2479
2480 step = maybe_software_singlestep (gdbarch, pc);
2481
2482 insert_breakpoints ();
2483 }
2484 else if (prepared > 0)
2485 {
2486 struct displaced_step_inferior_state *displaced;
2487
2488 /* Update pc to reflect the new address from which we will
2489 execute instructions due to displaced stepping. */
2490 pc = regcache_read_pc (get_thread_regcache (tp));
2491
2492 displaced = get_displaced_stepping_state (tp->inf);
2493 step = gdbarch_displaced_step_hw_singlestep
2494 (gdbarch, displaced->step_closure.get ());
2495 }
2496 }
2497
2498 /* Do we need to do it the hard way, w/temp breakpoints? */
2499 else if (step)
2500 step = maybe_software_singlestep (gdbarch, pc);
2501
2502 /* Currently, our software single-step implementation leads to different
2503 results than hardware single-stepping in one situation: when stepping
2504 into delivering a signal which has an associated signal handler,
2505 hardware single-step will stop at the first instruction of the handler,
2506 while software single-step will simply skip execution of the handler.
2507
2508 For now, this difference in behavior is accepted since there is no
2509 easy way to actually implement single-stepping into a signal handler
2510 without kernel support.
2511
2512 However, there is one scenario where this difference leads to follow-on
2513 problems: if we're stepping off a breakpoint by removing all breakpoints
2514 and then single-stepping. In this case, the software single-step
2515 behavior means that even if there is a *breakpoint* in the signal
2516 handler, GDB still would not stop.
2517
2518 Fortunately, we can at least fix this particular issue. We detect
2519 here the case where we are about to deliver a signal while software
2520 single-stepping with breakpoints removed. In this situation, we
2521 revert the decisions to remove all breakpoints and insert single-
2522 step breakpoints, and instead we install a step-resume breakpoint
2523 at the current address, deliver the signal without stepping, and
2524 once we arrive back at the step-resume breakpoint, actually step
2525 over the breakpoint we originally wanted to step over. */
2526 if (thread_has_single_step_breakpoints_set (tp)
2527 && sig != GDB_SIGNAL_0
2528 && step_over_info_valid_p ())
2529 {
2530 /* If we have nested signals or a pending signal is delivered
2531 immediately after a handler returns, might already have
2532 a step-resume breakpoint set on the earlier handler. We cannot
2533 set another step-resume breakpoint; just continue on until the
2534 original breakpoint is hit. */
2535 if (tp->control.step_resume_breakpoint == NULL)
2536 {
2537 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2538 tp->step_after_step_resume_breakpoint = 1;
2539 }
2540
2541 delete_single_step_breakpoints (tp);
2542
2543 clear_step_over_info ();
2544 tp->control.trap_expected = 0;
2545
2546 insert_breakpoints ();
2547 }
2548
2549 /* If STEP is set, it's a request to use hardware stepping
2550 facilities. But in that case, we should never
2551 use singlestep breakpoint. */
2552 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2553
2554 /* Decide the set of threads to ask the target to resume. */
2555 if (tp->control.trap_expected)
2556 {
2557 /* We're allowing a thread to run past a breakpoint it has
2558 hit, either by single-stepping the thread with the breakpoint
2559 removed, or by displaced stepping, with the breakpoint inserted.
2560 In the former case, we need to single-step only this thread,
2561 and keep others stopped, as they can miss this breakpoint if
2562 allowed to run. That's not really a problem for displaced
2563 stepping, but, we still keep other threads stopped, in case
2564 another thread is also stopped for a breakpoint waiting for
2565 its turn in the displaced stepping queue. */
2566 resume_ptid = inferior_ptid;
2567 }
2568 else
2569 resume_ptid = internal_resume_ptid (user_step);
2570
2571 if (execution_direction != EXEC_REVERSE
2572 && step && breakpoint_inserted_here_p (aspace, pc))
2573 {
2574 /* There are two cases where we currently need to step a
2575 breakpoint instruction when we have a signal to deliver:
2576
2577 - See handle_signal_stop where we handle random signals that
2578 could take out us out of the stepping range. Normally, in
2579 that case we end up continuing (instead of stepping) over the
2580 signal handler with a breakpoint at PC, but there are cases
2581 where we should _always_ single-step, even if we have a
2582 step-resume breakpoint, like when a software watchpoint is
2583 set. Assuming single-stepping and delivering a signal at the
2584 same time would takes us to the signal handler, then we could
2585 have removed the breakpoint at PC to step over it. However,
2586 some hardware step targets (like e.g., Mac OS) can't step
2587 into signal handlers, and for those, we need to leave the
2588 breakpoint at PC inserted, as otherwise if the handler
2589 recurses and executes PC again, it'll miss the breakpoint.
2590 So we leave the breakpoint inserted anyway, but we need to
2591 record that we tried to step a breakpoint instruction, so
2592 that adjust_pc_after_break doesn't end up confused.
2593
2594 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2595 in one thread after another thread that was stepping had been
2596 momentarily paused for a step-over. When we re-resume the
2597 stepping thread, it may be resumed from that address with a
2598 breakpoint that hasn't trapped yet. Seen with
2599 gdb.threads/non-stop-fair-events.exp, on targets that don't
2600 do displaced stepping. */
2601
2602 if (debug_infrun)
2603 fprintf_unfiltered (gdb_stdlog,
2604 "infrun: resume: [%s] stepped breakpoint\n",
2605 target_pid_to_str (tp->ptid).c_str ());
2606
2607 tp->stepped_breakpoint = 1;
2608
2609 /* Most targets can step a breakpoint instruction, thus
2610 executing it normally. But if this one cannot, just
2611 continue and we will hit it anyway. */
2612 if (gdbarch_cannot_step_breakpoint (gdbarch))
2613 step = 0;
2614 }
2615
2616 if (debug_displaced
2617 && tp->control.trap_expected
2618 && use_displaced_stepping (tp)
2619 && !step_over_info_valid_p ())
2620 {
2621 struct regcache *resume_regcache = get_thread_regcache (tp);
2622 struct gdbarch *resume_gdbarch = resume_regcache->arch ();
2623 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2624 gdb_byte buf[4];
2625
2626 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2627 paddress (resume_gdbarch, actual_pc));
2628 read_memory (actual_pc, buf, sizeof (buf));
2629 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2630 }
2631
2632 if (tp->control.may_range_step)
2633 {
2634 /* If we're resuming a thread with the PC out of the step
2635 range, then we're doing some nested/finer run control
2636 operation, like stepping the thread out of the dynamic
2637 linker or the displaced stepping scratch pad. We
2638 shouldn't have allowed a range step then. */
2639 gdb_assert (pc_in_thread_step_range (pc, tp));
2640 }
2641
2642 do_target_resume (resume_ptid, step, sig);
2643 tp->resumed = true;
2644 }
2645
2646 /* Resume the inferior. SIG is the signal to give the inferior
2647 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2648 rolls back state on error. */
2649
2650 static void
2651 resume (gdb_signal sig)
2652 {
2653 try
2654 {
2655 resume_1 (sig);
2656 }
2657 catch (const gdb_exception &ex)
2658 {
2659 /* If resuming is being aborted for any reason, delete any
2660 single-step breakpoint resume_1 may have created, to avoid
2661 confusing the following resumption, and to avoid leaving
2662 single-step breakpoints perturbing other threads, in case
2663 we're running in non-stop mode. */
2664 if (inferior_ptid != null_ptid)
2665 delete_single_step_breakpoints (inferior_thread ());
2666 throw;
2667 }
2668 }
2669
2670 \f
2671 /* Proceeding. */
2672
2673 /* See infrun.h. */
2674
2675 /* Counter that tracks number of user visible stops. This can be used
2676 to tell whether a command has proceeded the inferior past the
2677 current location. This allows e.g., inferior function calls in
2678 breakpoint commands to not interrupt the command list. When the
2679 call finishes successfully, the inferior is standing at the same
2680 breakpoint as if nothing happened (and so we don't call
2681 normal_stop). */
2682 static ULONGEST current_stop_id;
2683
2684 /* See infrun.h. */
2685
2686 ULONGEST
2687 get_stop_id (void)
2688 {
2689 return current_stop_id;
2690 }
2691
2692 /* Called when we report a user visible stop. */
2693
2694 static void
2695 new_stop_id (void)
2696 {
2697 current_stop_id++;
2698 }
2699
2700 /* Clear out all variables saying what to do when inferior is continued.
2701 First do this, then set the ones you want, then call `proceed'. */
2702
2703 static void
2704 clear_proceed_status_thread (struct thread_info *tp)
2705 {
2706 if (debug_infrun)
2707 fprintf_unfiltered (gdb_stdlog,
2708 "infrun: clear_proceed_status_thread (%s)\n",
2709 target_pid_to_str (tp->ptid).c_str ());
2710
2711 /* If we're starting a new sequence, then the previous finished
2712 single-step is no longer relevant. */
2713 if (tp->suspend.waitstatus_pending_p)
2714 {
2715 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
2716 {
2717 if (debug_infrun)
2718 fprintf_unfiltered (gdb_stdlog,
2719 "infrun: clear_proceed_status: pending "
2720 "event of %s was a finished step. "
2721 "Discarding.\n",
2722 target_pid_to_str (tp->ptid).c_str ());
2723
2724 tp->suspend.waitstatus_pending_p = 0;
2725 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
2726 }
2727 else if (debug_infrun)
2728 {
2729 std::string statstr
2730 = target_waitstatus_to_string (&tp->suspend.waitstatus);
2731
2732 fprintf_unfiltered (gdb_stdlog,
2733 "infrun: clear_proceed_status_thread: thread %s "
2734 "has pending wait status %s "
2735 "(currently_stepping=%d).\n",
2736 target_pid_to_str (tp->ptid).c_str (),
2737 statstr.c_str (),
2738 currently_stepping (tp));
2739 }
2740 }
2741
2742 /* If this signal should not be seen by program, give it zero.
2743 Used for debugging signals. */
2744 if (!signal_pass_state (tp->suspend.stop_signal))
2745 tp->suspend.stop_signal = GDB_SIGNAL_0;
2746
2747 delete tp->thread_fsm;
2748 tp->thread_fsm = NULL;
2749
2750 tp->control.trap_expected = 0;
2751 tp->control.step_range_start = 0;
2752 tp->control.step_range_end = 0;
2753 tp->control.may_range_step = 0;
2754 tp->control.step_frame_id = null_frame_id;
2755 tp->control.step_stack_frame_id = null_frame_id;
2756 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2757 tp->control.step_start_function = NULL;
2758 tp->stop_requested = 0;
2759
2760 tp->control.stop_step = 0;
2761
2762 tp->control.proceed_to_finish = 0;
2763
2764 tp->control.stepping_command = 0;
2765
2766 /* Discard any remaining commands or status from previous stop. */
2767 bpstat_clear (&tp->control.stop_bpstat);
2768 }
2769
2770 void
2771 clear_proceed_status (int step)
2772 {
2773 /* With scheduler-locking replay, stop replaying other threads if we're
2774 not replaying the user-visible resume ptid.
2775
2776 This is a convenience feature to not require the user to explicitly
2777 stop replaying the other threads. We're assuming that the user's
2778 intent is to resume tracing the recorded process. */
2779 if (!non_stop && scheduler_mode == schedlock_replay
2780 && target_record_is_replaying (minus_one_ptid)
2781 && !target_record_will_replay (user_visible_resume_ptid (step),
2782 execution_direction))
2783 target_record_stop_replaying ();
2784
2785 if (!non_stop && inferior_ptid != null_ptid)
2786 {
2787 ptid_t resume_ptid = user_visible_resume_ptid (step);
2788 process_stratum_target *resume_target
2789 = user_visible_resume_target (resume_ptid);
2790
2791 /* In all-stop mode, delete the per-thread status of all threads
2792 we're about to resume, implicitly and explicitly. */
2793 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
2794 clear_proceed_status_thread (tp);
2795 }
2796
2797 if (inferior_ptid != null_ptid)
2798 {
2799 struct inferior *inferior;
2800
2801 if (non_stop)
2802 {
2803 /* If in non-stop mode, only delete the per-thread status of
2804 the current thread. */
2805 clear_proceed_status_thread (inferior_thread ());
2806 }
2807
2808 inferior = current_inferior ();
2809 inferior->control.stop_soon = NO_STOP_QUIETLY;
2810 }
2811
2812 gdb::observers::about_to_proceed.notify ();
2813 }
2814
2815 /* Returns true if TP is still stopped at a breakpoint that needs
2816 stepping-over in order to make progress. If the breakpoint is gone
2817 meanwhile, we can skip the whole step-over dance. */
2818
2819 static int
2820 thread_still_needs_step_over_bp (struct thread_info *tp)
2821 {
2822 if (tp->stepping_over_breakpoint)
2823 {
2824 struct regcache *regcache = get_thread_regcache (tp);
2825
2826 if (breakpoint_here_p (regcache->aspace (),
2827 regcache_read_pc (regcache))
2828 == ordinary_breakpoint_here)
2829 return 1;
2830
2831 tp->stepping_over_breakpoint = 0;
2832 }
2833
2834 return 0;
2835 }
2836
2837 /* Check whether thread TP still needs to start a step-over in order
2838 to make progress when resumed. Returns an bitwise or of enum
2839 step_over_what bits, indicating what needs to be stepped over. */
2840
2841 static step_over_what
2842 thread_still_needs_step_over (struct thread_info *tp)
2843 {
2844 step_over_what what = 0;
2845
2846 if (thread_still_needs_step_over_bp (tp))
2847 what |= STEP_OVER_BREAKPOINT;
2848
2849 if (tp->stepping_over_watchpoint
2850 && !target_have_steppable_watchpoint)
2851 what |= STEP_OVER_WATCHPOINT;
2852
2853 return what;
2854 }
2855
2856 /* Returns true if scheduler locking applies. STEP indicates whether
2857 we're about to do a step/next-like command to a thread. */
2858
2859 static int
2860 schedlock_applies (struct thread_info *tp)
2861 {
2862 return (scheduler_mode == schedlock_on
2863 || (scheduler_mode == schedlock_step
2864 && tp->control.stepping_command)
2865 || (scheduler_mode == schedlock_replay
2866 && target_record_will_replay (minus_one_ptid,
2867 execution_direction)));
2868 }
2869
2870 /* Calls target_commit_resume on all targets. */
2871
2872 static void
2873 commit_resume_all_targets ()
2874 {
2875 scoped_restore_current_thread restore_thread;
2876
2877 /* Map between process_target and a representative inferior. This
2878 is to avoid committing a resume in the same target more than
2879 once. Resumptions must be idempotent, so this is an
2880 optimization. */
2881 std::unordered_map<process_stratum_target *, inferior *> conn_inf;
2882
2883 for (inferior *inf : all_non_exited_inferiors ())
2884 if (inf->has_execution ())
2885 conn_inf[inf->process_target ()] = inf;
2886
2887 for (const auto &ci : conn_inf)
2888 {
2889 inferior *inf = ci.second;
2890 switch_to_inferior_no_thread (inf);
2891 target_commit_resume ();
2892 }
2893 }
2894
2895 /* Check that all the targets we're about to resume are in non-stop
2896 mode. Ideally, we'd only care whether all targets support
2897 target-async, but we're not there yet. E.g., stop_all_threads
2898 doesn't know how to handle all-stop targets. Also, the remote
2899 protocol in all-stop mode is synchronous, irrespective of
2900 target-async, which means that things like a breakpoint re-set
2901 triggered by one target would try to read memory from all targets
2902 and fail. */
2903
2904 static void
2905 check_multi_target_resumption (process_stratum_target *resume_target)
2906 {
2907 if (!non_stop && resume_target == nullptr)
2908 {
2909 scoped_restore_current_thread restore_thread;
2910
2911 /* This is used to track whether we're resuming more than one
2912 target. */
2913 process_stratum_target *first_connection = nullptr;
2914
2915 /* The first inferior we see with a target that does not work in
2916 always-non-stop mode. */
2917 inferior *first_not_non_stop = nullptr;
2918
2919 for (inferior *inf : all_non_exited_inferiors (resume_target))
2920 {
2921 switch_to_inferior_no_thread (inf);
2922
2923 if (!target_has_execution)
2924 continue;
2925
2926 process_stratum_target *proc_target
2927 = current_inferior ()->process_target();
2928
2929 if (!target_is_non_stop_p ())
2930 first_not_non_stop = inf;
2931
2932 if (first_connection == nullptr)
2933 first_connection = proc_target;
2934 else if (first_connection != proc_target
2935 && first_not_non_stop != nullptr)
2936 {
2937 switch_to_inferior_no_thread (first_not_non_stop);
2938
2939 proc_target = current_inferior ()->process_target();
2940
2941 error (_("Connection %d (%s) does not support "
2942 "multi-target resumption."),
2943 proc_target->connection_number,
2944 make_target_connection_string (proc_target).c_str ());
2945 }
2946 }
2947 }
2948 }
2949
2950 /* Basic routine for continuing the program in various fashions.
2951
2952 ADDR is the address to resume at, or -1 for resume where stopped.
2953 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
2954 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
2955
2956 You should call clear_proceed_status before calling proceed. */
2957
2958 void
2959 proceed (CORE_ADDR addr, enum gdb_signal siggnal)
2960 {
2961 struct regcache *regcache;
2962 struct gdbarch *gdbarch;
2963 CORE_ADDR pc;
2964 struct execution_control_state ecss;
2965 struct execution_control_state *ecs = &ecss;
2966 int started;
2967
2968 /* If we're stopped at a fork/vfork, follow the branch set by the
2969 "set follow-fork-mode" command; otherwise, we'll just proceed
2970 resuming the current thread. */
2971 if (!follow_fork ())
2972 {
2973 /* The target for some reason decided not to resume. */
2974 normal_stop ();
2975 if (target_can_async_p ())
2976 inferior_event_handler (INF_EXEC_COMPLETE);
2977 return;
2978 }
2979
2980 /* We'll update this if & when we switch to a new thread. */
2981 previous_inferior_ptid = inferior_ptid;
2982
2983 regcache = get_current_regcache ();
2984 gdbarch = regcache->arch ();
2985 const address_space *aspace = regcache->aspace ();
2986
2987 pc = regcache_read_pc_protected (regcache);
2988
2989 thread_info *cur_thr = inferior_thread ();
2990
2991 /* Fill in with reasonable starting values. */
2992 init_thread_stepping_state (cur_thr);
2993
2994 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
2995
2996 ptid_t resume_ptid
2997 = user_visible_resume_ptid (cur_thr->control.stepping_command);
2998 process_stratum_target *resume_target
2999 = user_visible_resume_target (resume_ptid);
3000
3001 check_multi_target_resumption (resume_target);
3002
3003 if (addr == (CORE_ADDR) -1)
3004 {
3005 if (pc == cur_thr->suspend.stop_pc
3006 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
3007 && execution_direction != EXEC_REVERSE)
3008 /* There is a breakpoint at the address we will resume at,
3009 step one instruction before inserting breakpoints so that
3010 we do not stop right away (and report a second hit at this
3011 breakpoint).
3012
3013 Note, we don't do this in reverse, because we won't
3014 actually be executing the breakpoint insn anyway.
3015 We'll be (un-)executing the previous instruction. */
3016 cur_thr->stepping_over_breakpoint = 1;
3017 else if (gdbarch_single_step_through_delay_p (gdbarch)
3018 && gdbarch_single_step_through_delay (gdbarch,
3019 get_current_frame ()))
3020 /* We stepped onto an instruction that needs to be stepped
3021 again before re-inserting the breakpoint, do so. */
3022 cur_thr->stepping_over_breakpoint = 1;
3023 }
3024 else
3025 {
3026 regcache_write_pc (regcache, addr);
3027 }
3028
3029 if (siggnal != GDB_SIGNAL_DEFAULT)
3030 cur_thr->suspend.stop_signal = siggnal;
3031
3032 /* If an exception is thrown from this point on, make sure to
3033 propagate GDB's knowledge of the executing state to the
3034 frontend/user running state. */
3035 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
3036
3037 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3038 threads (e.g., we might need to set threads stepping over
3039 breakpoints first), from the user/frontend's point of view, all
3040 threads in RESUME_PTID are now running. Unless we're calling an
3041 inferior function, as in that case we pretend the inferior
3042 doesn't run at all. */
3043 if (!cur_thr->control.in_infcall)
3044 set_running (resume_target, resume_ptid, true);
3045
3046 if (debug_infrun)
3047 fprintf_unfiltered (gdb_stdlog,
3048 "infrun: proceed (addr=%s, signal=%s)\n",
3049 paddress (gdbarch, addr),
3050 gdb_signal_to_symbol_string (siggnal));
3051
3052 annotate_starting ();
3053
3054 /* Make sure that output from GDB appears before output from the
3055 inferior. */
3056 gdb_flush (gdb_stdout);
3057
3058 /* Since we've marked the inferior running, give it the terminal. A
3059 QUIT/Ctrl-C from here on is forwarded to the target (which can
3060 still detect attempts to unblock a stuck connection with repeated
3061 Ctrl-C from within target_pass_ctrlc). */
3062 target_terminal::inferior ();
3063
3064 /* In a multi-threaded task we may select another thread and
3065 then continue or step.
3066
3067 But if a thread that we're resuming had stopped at a breakpoint,
3068 it will immediately cause another breakpoint stop without any
3069 execution (i.e. it will report a breakpoint hit incorrectly). So
3070 we must step over it first.
3071
3072 Look for threads other than the current (TP) that reported a
3073 breakpoint hit and haven't been resumed yet since. */
3074
3075 /* If scheduler locking applies, we can avoid iterating over all
3076 threads. */
3077 if (!non_stop && !schedlock_applies (cur_thr))
3078 {
3079 for (thread_info *tp : all_non_exited_threads (resume_target,
3080 resume_ptid))
3081 {
3082 switch_to_thread_no_regs (tp);
3083
3084 /* Ignore the current thread here. It's handled
3085 afterwards. */
3086 if (tp == cur_thr)
3087 continue;
3088
3089 if (!thread_still_needs_step_over (tp))
3090 continue;
3091
3092 gdb_assert (!thread_is_in_step_over_chain (tp));
3093
3094 if (debug_infrun)
3095 fprintf_unfiltered (gdb_stdlog,
3096 "infrun: need to step-over [%s] first\n",
3097 target_pid_to_str (tp->ptid).c_str ());
3098
3099 thread_step_over_chain_enqueue (tp);
3100 }
3101
3102 switch_to_thread (cur_thr);
3103 }
3104
3105 /* Enqueue the current thread last, so that we move all other
3106 threads over their breakpoints first. */
3107 if (cur_thr->stepping_over_breakpoint)
3108 thread_step_over_chain_enqueue (cur_thr);
3109
3110 /* If the thread isn't started, we'll still need to set its prev_pc,
3111 so that switch_back_to_stepped_thread knows the thread hasn't
3112 advanced. Must do this before resuming any thread, as in
3113 all-stop/remote, once we resume we can't send any other packet
3114 until the target stops again. */
3115 cur_thr->prev_pc = regcache_read_pc_protected (regcache);
3116
3117 {
3118 scoped_restore save_defer_tc = make_scoped_defer_target_commit_resume ();
3119
3120 started = start_step_over ();
3121
3122 if (step_over_info_valid_p ())
3123 {
3124 /* Either this thread started a new in-line step over, or some
3125 other thread was already doing one. In either case, don't
3126 resume anything else until the step-over is finished. */
3127 }
3128 else if (started && !target_is_non_stop_p ())
3129 {
3130 /* A new displaced stepping sequence was started. In all-stop,
3131 we can't talk to the target anymore until it next stops. */
3132 }
3133 else if (!non_stop && target_is_non_stop_p ())
3134 {
3135 /* In all-stop, but the target is always in non-stop mode.
3136 Start all other threads that are implicitly resumed too. */
3137 for (thread_info *tp : all_non_exited_threads (resume_target,
3138 resume_ptid))
3139 {
3140 switch_to_thread_no_regs (tp);
3141
3142 if (!tp->inf->has_execution ())
3143 {
3144 if (debug_infrun)
3145 fprintf_unfiltered (gdb_stdlog,
3146 "infrun: proceed: [%s] target has "
3147 "no execution\n",
3148 target_pid_to_str (tp->ptid).c_str ());
3149 continue;
3150 }
3151
3152 if (tp->resumed)
3153 {
3154 if (debug_infrun)
3155 fprintf_unfiltered (gdb_stdlog,
3156 "infrun: proceed: [%s] resumed\n",
3157 target_pid_to_str (tp->ptid).c_str ());
3158 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
3159 continue;
3160 }
3161
3162 if (thread_is_in_step_over_chain (tp))
3163 {
3164 if (debug_infrun)
3165 fprintf_unfiltered (gdb_stdlog,
3166 "infrun: proceed: [%s] needs step-over\n",
3167 target_pid_to_str (tp->ptid).c_str ());
3168 continue;
3169 }
3170
3171 if (debug_infrun)
3172 fprintf_unfiltered (gdb_stdlog,
3173 "infrun: proceed: resuming %s\n",
3174 target_pid_to_str (tp->ptid).c_str ());
3175
3176 reset_ecs (ecs, tp);
3177 switch_to_thread (tp);
3178 keep_going_pass_signal (ecs);
3179 if (!ecs->wait_some_more)
3180 error (_("Command aborted."));
3181 }
3182 }
3183 else if (!cur_thr->resumed && !thread_is_in_step_over_chain (cur_thr))
3184 {
3185 /* The thread wasn't started, and isn't queued, run it now. */
3186 reset_ecs (ecs, cur_thr);
3187 switch_to_thread (cur_thr);
3188 keep_going_pass_signal (ecs);
3189 if (!ecs->wait_some_more)
3190 error (_("Command aborted."));
3191 }
3192 }
3193
3194 commit_resume_all_targets ();
3195
3196 finish_state.release ();
3197
3198 /* If we've switched threads above, switch back to the previously
3199 current thread. We don't want the user to see a different
3200 selected thread. */
3201 switch_to_thread (cur_thr);
3202
3203 /* Tell the event loop to wait for it to stop. If the target
3204 supports asynchronous execution, it'll do this from within
3205 target_resume. */
3206 if (!target_can_async_p ())
3207 mark_async_event_handler (infrun_async_inferior_event_token);
3208 }
3209 \f
3210
3211 /* Start remote-debugging of a machine over a serial link. */
3212
3213 void
3214 start_remote (int from_tty)
3215 {
3216 inferior *inf = current_inferior ();
3217 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
3218
3219 /* Always go on waiting for the target, regardless of the mode. */
3220 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3221 indicate to wait_for_inferior that a target should timeout if
3222 nothing is returned (instead of just blocking). Because of this,
3223 targets expecting an immediate response need to, internally, set
3224 things up so that the target_wait() is forced to eventually
3225 timeout. */
3226 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3227 differentiate to its caller what the state of the target is after
3228 the initial open has been performed. Here we're assuming that
3229 the target has stopped. It should be possible to eventually have
3230 target_open() return to the caller an indication that the target
3231 is currently running and GDB state should be set to the same as
3232 for an async run. */
3233 wait_for_inferior (inf);
3234
3235 /* Now that the inferior has stopped, do any bookkeeping like
3236 loading shared libraries. We want to do this before normal_stop,
3237 so that the displayed frame is up to date. */
3238 post_create_inferior (current_top_target (), from_tty);
3239
3240 normal_stop ();
3241 }
3242
3243 /* Initialize static vars when a new inferior begins. */
3244
3245 void
3246 init_wait_for_inferior (void)
3247 {
3248 /* These are meaningless until the first time through wait_for_inferior. */
3249
3250 breakpoint_init_inferior (inf_starting);
3251
3252 clear_proceed_status (0);
3253
3254 nullify_last_target_wait_ptid ();
3255
3256 previous_inferior_ptid = inferior_ptid;
3257 }
3258
3259 \f
3260
3261 static void handle_inferior_event (struct execution_control_state *ecs);
3262
3263 static void handle_step_into_function (struct gdbarch *gdbarch,
3264 struct execution_control_state *ecs);
3265 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3266 struct execution_control_state *ecs);
3267 static void handle_signal_stop (struct execution_control_state *ecs);
3268 static void check_exception_resume (struct execution_control_state *,
3269 struct frame_info *);
3270
3271 static void end_stepping_range (struct execution_control_state *ecs);
3272 static void stop_waiting (struct execution_control_state *ecs);
3273 static void keep_going (struct execution_control_state *ecs);
3274 static void process_event_stop_test (struct execution_control_state *ecs);
3275 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
3276
3277 /* This function is attached as a "thread_stop_requested" observer.
3278 Cleanup local state that assumed the PTID was to be resumed, and
3279 report the stop to the frontend. */
3280
3281 static void
3282 infrun_thread_stop_requested (ptid_t ptid)
3283 {
3284 process_stratum_target *curr_target = current_inferior ()->process_target ();
3285
3286 /* PTID was requested to stop. If the thread was already stopped,
3287 but the user/frontend doesn't know about that yet (e.g., the
3288 thread had been temporarily paused for some step-over), set up
3289 for reporting the stop now. */
3290 for (thread_info *tp : all_threads (curr_target, ptid))
3291 {
3292 if (tp->state != THREAD_RUNNING)
3293 continue;
3294 if (tp->executing)
3295 continue;
3296
3297 /* Remove matching threads from the step-over queue, so
3298 start_step_over doesn't try to resume them
3299 automatically. */
3300 if (thread_is_in_step_over_chain (tp))
3301 thread_step_over_chain_remove (tp);
3302
3303 /* If the thread is stopped, but the user/frontend doesn't
3304 know about that yet, queue a pending event, as if the
3305 thread had just stopped now. Unless the thread already had
3306 a pending event. */
3307 if (!tp->suspend.waitstatus_pending_p)
3308 {
3309 tp->suspend.waitstatus_pending_p = 1;
3310 tp->suspend.waitstatus.kind = TARGET_WAITKIND_STOPPED;
3311 tp->suspend.waitstatus.value.sig = GDB_SIGNAL_0;
3312 }
3313
3314 /* Clear the inline-frame state, since we're re-processing the
3315 stop. */
3316 clear_inline_frame_state (tp);
3317
3318 /* If this thread was paused because some other thread was
3319 doing an inline-step over, let that finish first. Once
3320 that happens, we'll restart all threads and consume pending
3321 stop events then. */
3322 if (step_over_info_valid_p ())
3323 continue;
3324
3325 /* Otherwise we can process the (new) pending event now. Set
3326 it so this pending event is considered by
3327 do_target_wait. */
3328 tp->resumed = true;
3329 }
3330 }
3331
3332 static void
3333 infrun_thread_thread_exit (struct thread_info *tp, int silent)
3334 {
3335 if (target_last_proc_target == tp->inf->process_target ()
3336 && target_last_wait_ptid == tp->ptid)
3337 nullify_last_target_wait_ptid ();
3338 }
3339
3340 /* Delete the step resume, single-step and longjmp/exception resume
3341 breakpoints of TP. */
3342
3343 static void
3344 delete_thread_infrun_breakpoints (struct thread_info *tp)
3345 {
3346 delete_step_resume_breakpoint (tp);
3347 delete_exception_resume_breakpoint (tp);
3348 delete_single_step_breakpoints (tp);
3349 }
3350
3351 /* If the target still has execution, call FUNC for each thread that
3352 just stopped. In all-stop, that's all the non-exited threads; in
3353 non-stop, that's the current thread, only. */
3354
3355 typedef void (*for_each_just_stopped_thread_callback_func)
3356 (struct thread_info *tp);
3357
3358 static void
3359 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
3360 {
3361 if (!target_has_execution || inferior_ptid == null_ptid)
3362 return;
3363
3364 if (target_is_non_stop_p ())
3365 {
3366 /* If in non-stop mode, only the current thread stopped. */
3367 func (inferior_thread ());
3368 }
3369 else
3370 {
3371 /* In all-stop mode, all threads have stopped. */
3372 for (thread_info *tp : all_non_exited_threads ())
3373 func (tp);
3374 }
3375 }
3376
3377 /* Delete the step resume and longjmp/exception resume breakpoints of
3378 the threads that just stopped. */
3379
3380 static void
3381 delete_just_stopped_threads_infrun_breakpoints (void)
3382 {
3383 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
3384 }
3385
3386 /* Delete the single-step breakpoints of the threads that just
3387 stopped. */
3388
3389 static void
3390 delete_just_stopped_threads_single_step_breakpoints (void)
3391 {
3392 for_each_just_stopped_thread (delete_single_step_breakpoints);
3393 }
3394
3395 /* See infrun.h. */
3396
3397 void
3398 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3399 const struct target_waitstatus *ws)
3400 {
3401 std::string status_string = target_waitstatus_to_string (ws);
3402 string_file stb;
3403
3404 /* The text is split over several lines because it was getting too long.
3405 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3406 output as a unit; we want only one timestamp printed if debug_timestamp
3407 is set. */
3408
3409 stb.printf ("infrun: target_wait (%d.%ld.%ld",
3410 waiton_ptid.pid (),
3411 waiton_ptid.lwp (),
3412 waiton_ptid.tid ());
3413 if (waiton_ptid.pid () != -1)
3414 stb.printf (" [%s]", target_pid_to_str (waiton_ptid).c_str ());
3415 stb.printf (", status) =\n");
3416 stb.printf ("infrun: %d.%ld.%ld [%s],\n",
3417 result_ptid.pid (),
3418 result_ptid.lwp (),
3419 result_ptid.tid (),
3420 target_pid_to_str (result_ptid).c_str ());
3421 stb.printf ("infrun: %s\n", status_string.c_str ());
3422
3423 /* This uses %s in part to handle %'s in the text, but also to avoid
3424 a gcc error: the format attribute requires a string literal. */
3425 fprintf_unfiltered (gdb_stdlog, "%s", stb.c_str ());
3426 }
3427
3428 /* Select a thread at random, out of those which are resumed and have
3429 had events. */
3430
3431 static struct thread_info *
3432 random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
3433 {
3434 int num_events = 0;
3435
3436 auto has_event = [&] (thread_info *tp)
3437 {
3438 return (tp->ptid.matches (waiton_ptid)
3439 && tp->resumed
3440 && tp->suspend.waitstatus_pending_p);
3441 };
3442
3443 /* First see how many events we have. Count only resumed threads
3444 that have an event pending. */
3445 for (thread_info *tp : inf->non_exited_threads ())
3446 if (has_event (tp))
3447 num_events++;
3448
3449 if (num_events == 0)
3450 return NULL;
3451
3452 /* Now randomly pick a thread out of those that have had events. */
3453 int random_selector = (int) ((num_events * (double) rand ())
3454 / (RAND_MAX + 1.0));
3455
3456 if (debug_infrun && num_events > 1)
3457 fprintf_unfiltered (gdb_stdlog,
3458 "infrun: Found %d events, selecting #%d\n",
3459 num_events, random_selector);
3460
3461 /* Select the Nth thread that has had an event. */
3462 for (thread_info *tp : inf->non_exited_threads ())
3463 if (has_event (tp))
3464 if (random_selector-- == 0)
3465 return tp;
3466
3467 gdb_assert_not_reached ("event thread not found");
3468 }
3469
3470 /* Wrapper for target_wait that first checks whether threads have
3471 pending statuses to report before actually asking the target for
3472 more events. INF is the inferior we're using to call target_wait
3473 on. */
3474
3475 static ptid_t
3476 do_target_wait_1 (inferior *inf, ptid_t ptid,
3477 target_waitstatus *status, int options)
3478 {
3479 ptid_t event_ptid;
3480 struct thread_info *tp;
3481
3482 /* We know that we are looking for an event in the target of inferior
3483 INF, but we don't know which thread the event might come from. As
3484 such we want to make sure that INFERIOR_PTID is reset so that none of
3485 the wait code relies on it - doing so is always a mistake. */
3486 switch_to_inferior_no_thread (inf);
3487
3488 /* First check if there is a resumed thread with a wait status
3489 pending. */
3490 if (ptid == minus_one_ptid || ptid.is_pid ())
3491 {
3492 tp = random_pending_event_thread (inf, ptid);
3493 }
3494 else
3495 {
3496 if (debug_infrun)
3497 fprintf_unfiltered (gdb_stdlog,
3498 "infrun: Waiting for specific thread %s.\n",
3499 target_pid_to_str (ptid).c_str ());
3500
3501 /* We have a specific thread to check. */
3502 tp = find_thread_ptid (inf, ptid);
3503 gdb_assert (tp != NULL);
3504 if (!tp->suspend.waitstatus_pending_p)
3505 tp = NULL;
3506 }
3507
3508 if (tp != NULL
3509 && (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3510 || tp->suspend.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
3511 {
3512 struct regcache *regcache = get_thread_regcache (tp);
3513 struct gdbarch *gdbarch = regcache->arch ();
3514 CORE_ADDR pc;
3515 int discard = 0;
3516
3517 pc = regcache_read_pc (regcache);
3518
3519 if (pc != tp->suspend.stop_pc)
3520 {
3521 if (debug_infrun)
3522 fprintf_unfiltered (gdb_stdlog,
3523 "infrun: PC of %s changed. was=%s, now=%s\n",
3524 target_pid_to_str (tp->ptid).c_str (),
3525 paddress (gdbarch, tp->suspend.stop_pc),
3526 paddress (gdbarch, pc));
3527 discard = 1;
3528 }
3529 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
3530 {
3531 if (debug_infrun)
3532 fprintf_unfiltered (gdb_stdlog,
3533 "infrun: previous breakpoint of %s, at %s gone\n",
3534 target_pid_to_str (tp->ptid).c_str (),
3535 paddress (gdbarch, pc));
3536
3537 discard = 1;
3538 }
3539
3540 if (discard)
3541 {
3542 if (debug_infrun)
3543 fprintf_unfiltered (gdb_stdlog,
3544 "infrun: pending event of %s cancelled.\n",
3545 target_pid_to_str (tp->ptid).c_str ());
3546
3547 tp->suspend.waitstatus.kind = TARGET_WAITKIND_SPURIOUS;
3548 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3549 }
3550 }
3551
3552 if (tp != NULL)
3553 {
3554 if (debug_infrun)
3555 {
3556 std::string statstr
3557 = target_waitstatus_to_string (&tp->suspend.waitstatus);
3558
3559 fprintf_unfiltered (gdb_stdlog,
3560 "infrun: Using pending wait status %s for %s.\n",
3561 statstr.c_str (),
3562 target_pid_to_str (tp->ptid).c_str ());
3563 }
3564
3565 /* Now that we've selected our final event LWP, un-adjust its PC
3566 if it was a software breakpoint (and the target doesn't
3567 always adjust the PC itself). */
3568 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3569 && !target_supports_stopped_by_sw_breakpoint ())
3570 {
3571 struct regcache *regcache;
3572 struct gdbarch *gdbarch;
3573 int decr_pc;
3574
3575 regcache = get_thread_regcache (tp);
3576 gdbarch = regcache->arch ();
3577
3578 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3579 if (decr_pc != 0)
3580 {
3581 CORE_ADDR pc;
3582
3583 pc = regcache_read_pc (regcache);
3584 regcache_write_pc (regcache, pc + decr_pc);
3585 }
3586 }
3587
3588 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3589 *status = tp->suspend.waitstatus;
3590 tp->suspend.waitstatus_pending_p = 0;
3591
3592 /* Wake up the event loop again, until all pending events are
3593 processed. */
3594 if (target_is_async_p ())
3595 mark_async_event_handler (infrun_async_inferior_event_token);
3596 return tp->ptid;
3597 }
3598
3599 /* But if we don't find one, we'll have to wait. */
3600
3601 if (deprecated_target_wait_hook)
3602 event_ptid = deprecated_target_wait_hook (ptid, status, options);
3603 else
3604 event_ptid = target_wait (ptid, status, options);
3605
3606 return event_ptid;
3607 }
3608
3609 /* Wrapper for target_wait that first checks whether threads have
3610 pending statuses to report before actually asking the target for
3611 more events. Polls for events from all inferiors/targets. */
3612
3613 static bool
3614 do_target_wait (ptid_t wait_ptid, execution_control_state *ecs, int options)
3615 {
3616 int num_inferiors = 0;
3617 int random_selector;
3618
3619 /* For fairness, we pick the first inferior/target to poll at random
3620 out of all inferiors that may report events, and then continue
3621 polling the rest of the inferior list starting from that one in a
3622 circular fashion until the whole list is polled once. */
3623
3624 auto inferior_matches = [&wait_ptid] (inferior *inf)
3625 {
3626 return (inf->process_target () != NULL
3627 && ptid_t (inf->pid).matches (wait_ptid));
3628 };
3629
3630 /* First see how many matching inferiors we have. */
3631 for (inferior *inf : all_inferiors ())
3632 if (inferior_matches (inf))
3633 num_inferiors++;
3634
3635 if (num_inferiors == 0)
3636 {
3637 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3638 return false;
3639 }
3640
3641 /* Now randomly pick an inferior out of those that matched. */
3642 random_selector = (int)
3643 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
3644
3645 if (debug_infrun && num_inferiors > 1)
3646 fprintf_unfiltered (gdb_stdlog,
3647 "infrun: Found %d inferiors, starting at #%d\n",
3648 num_inferiors, random_selector);
3649
3650 /* Select the Nth inferior that matched. */
3651
3652 inferior *selected = nullptr;
3653
3654 for (inferior *inf : all_inferiors ())
3655 if (inferior_matches (inf))
3656 if (random_selector-- == 0)
3657 {
3658 selected = inf;
3659 break;
3660 }
3661
3662 /* Now poll for events out of each of the matching inferior's
3663 targets, starting from the selected one. */
3664
3665 auto do_wait = [&] (inferior *inf)
3666 {
3667 ecs->ptid = do_target_wait_1 (inf, wait_ptid, &ecs->ws, options);
3668 ecs->target = inf->process_target ();
3669 return (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
3670 };
3671
3672 /* Needed in 'all-stop + target-non-stop' mode, because we end up
3673 here spuriously after the target is all stopped and we've already
3674 reported the stop to the user, polling for events. */
3675 scoped_restore_current_thread restore_thread;
3676
3677 int inf_num = selected->num;
3678 for (inferior *inf = selected; inf != NULL; inf = inf->next)
3679 if (inferior_matches (inf))
3680 if (do_wait (inf))
3681 return true;
3682
3683 for (inferior *inf = inferior_list;
3684 inf != NULL && inf->num < inf_num;
3685 inf = inf->next)
3686 if (inferior_matches (inf))
3687 if (do_wait (inf))
3688 return true;
3689
3690 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3691 return false;
3692 }
3693
3694 /* Prepare and stabilize the inferior for detaching it. E.g.,
3695 detaching while a thread is displaced stepping is a recipe for
3696 crashing it, as nothing would readjust the PC out of the scratch
3697 pad. */
3698
3699 void
3700 prepare_for_detach (void)
3701 {
3702 struct inferior *inf = current_inferior ();
3703 ptid_t pid_ptid = ptid_t (inf->pid);
3704
3705 displaced_step_inferior_state *displaced = get_displaced_stepping_state (inf);
3706
3707 /* Is any thread of this process displaced stepping? If not,
3708 there's nothing else to do. */
3709 if (displaced->step_thread == nullptr)
3710 return;
3711
3712 if (debug_infrun)
3713 fprintf_unfiltered (gdb_stdlog,
3714 "displaced-stepping in-process while detaching");
3715
3716 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
3717
3718 while (displaced->step_thread != nullptr)
3719 {
3720 struct execution_control_state ecss;
3721 struct execution_control_state *ecs;
3722
3723 ecs = &ecss;
3724 memset (ecs, 0, sizeof (*ecs));
3725
3726 overlay_cache_invalid = 1;
3727 /* Flush target cache before starting to handle each event.
3728 Target was running and cache could be stale. This is just a
3729 heuristic. Running threads may modify target memory, but we
3730 don't get any event. */
3731 target_dcache_invalidate ();
3732
3733 do_target_wait (pid_ptid, ecs, 0);
3734
3735 if (debug_infrun)
3736 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3737
3738 /* If an error happens while handling the event, propagate GDB's
3739 knowledge of the executing state to the frontend/user running
3740 state. */
3741 scoped_finish_thread_state finish_state (inf->process_target (),
3742 minus_one_ptid);
3743
3744 /* Now figure out what to do with the result of the result. */
3745 handle_inferior_event (ecs);
3746
3747 /* No error, don't finish the state yet. */
3748 finish_state.release ();
3749
3750 /* Breakpoints and watchpoints are not installed on the target
3751 at this point, and signals are passed directly to the
3752 inferior, so this must mean the process is gone. */
3753 if (!ecs->wait_some_more)
3754 {
3755 restore_detaching.release ();
3756 error (_("Program exited while detaching"));
3757 }
3758 }
3759
3760 restore_detaching.release ();
3761 }
3762
3763 /* Wait for control to return from inferior to debugger.
3764
3765 If inferior gets a signal, we may decide to start it up again
3766 instead of returning. That is why there is a loop in this function.
3767 When this function actually returns it means the inferior
3768 should be left stopped and GDB should read more commands. */
3769
3770 static void
3771 wait_for_inferior (inferior *inf)
3772 {
3773 if (debug_infrun)
3774 fprintf_unfiltered
3775 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
3776
3777 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
3778
3779 /* If an error happens while handling the event, propagate GDB's
3780 knowledge of the executing state to the frontend/user running
3781 state. */
3782 scoped_finish_thread_state finish_state
3783 (inf->process_target (), minus_one_ptid);
3784
3785 while (1)
3786 {
3787 struct execution_control_state ecss;
3788 struct execution_control_state *ecs = &ecss;
3789
3790 memset (ecs, 0, sizeof (*ecs));
3791
3792 overlay_cache_invalid = 1;
3793
3794 /* Flush target cache before starting to handle each event.
3795 Target was running and cache could be stale. This is just a
3796 heuristic. Running threads may modify target memory, but we
3797 don't get any event. */
3798 target_dcache_invalidate ();
3799
3800 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, 0);
3801 ecs->target = inf->process_target ();
3802
3803 if (debug_infrun)
3804 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
3805
3806 /* Now figure out what to do with the result of the result. */
3807 handle_inferior_event (ecs);
3808
3809 if (!ecs->wait_some_more)
3810 break;
3811 }
3812
3813 /* No error, don't finish the state yet. */
3814 finish_state.release ();
3815 }
3816
3817 /* Cleanup that reinstalls the readline callback handler, if the
3818 target is running in the background. If while handling the target
3819 event something triggered a secondary prompt, like e.g., a
3820 pagination prompt, we'll have removed the callback handler (see
3821 gdb_readline_wrapper_line). Need to do this as we go back to the
3822 event loop, ready to process further input. Note this has no
3823 effect if the handler hasn't actually been removed, because calling
3824 rl_callback_handler_install resets the line buffer, thus losing
3825 input. */
3826
3827 static void
3828 reinstall_readline_callback_handler_cleanup ()
3829 {
3830 struct ui *ui = current_ui;
3831
3832 if (!ui->async)
3833 {
3834 /* We're not going back to the top level event loop yet. Don't
3835 install the readline callback, as it'd prep the terminal,
3836 readline-style (raw, noecho) (e.g., --batch). We'll install
3837 it the next time the prompt is displayed, when we're ready
3838 for input. */
3839 return;
3840 }
3841
3842 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
3843 gdb_rl_callback_handler_reinstall ();
3844 }
3845
3846 /* Clean up the FSMs of threads that are now stopped. In non-stop,
3847 that's just the event thread. In all-stop, that's all threads. */
3848
3849 static void
3850 clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
3851 {
3852 if (ecs->event_thread != NULL
3853 && ecs->event_thread->thread_fsm != NULL)
3854 ecs->event_thread->thread_fsm->clean_up (ecs->event_thread);
3855
3856 if (!non_stop)
3857 {
3858 for (thread_info *thr : all_non_exited_threads ())
3859 {
3860 if (thr->thread_fsm == NULL)
3861 continue;
3862 if (thr == ecs->event_thread)
3863 continue;
3864
3865 switch_to_thread (thr);
3866 thr->thread_fsm->clean_up (thr);
3867 }
3868
3869 if (ecs->event_thread != NULL)
3870 switch_to_thread (ecs->event_thread);
3871 }
3872 }
3873
3874 /* Helper for all_uis_check_sync_execution_done that works on the
3875 current UI. */
3876
3877 static void
3878 check_curr_ui_sync_execution_done (void)
3879 {
3880 struct ui *ui = current_ui;
3881
3882 if (ui->prompt_state == PROMPT_NEEDED
3883 && ui->async
3884 && !gdb_in_secondary_prompt_p (ui))
3885 {
3886 target_terminal::ours ();
3887 gdb::observers::sync_execution_done.notify ();
3888 ui_register_input_event_handler (ui);
3889 }
3890 }
3891
3892 /* See infrun.h. */
3893
3894 void
3895 all_uis_check_sync_execution_done (void)
3896 {
3897 SWITCH_THRU_ALL_UIS ()
3898 {
3899 check_curr_ui_sync_execution_done ();
3900 }
3901 }
3902
3903 /* See infrun.h. */
3904
3905 void
3906 all_uis_on_sync_execution_starting (void)
3907 {
3908 SWITCH_THRU_ALL_UIS ()
3909 {
3910 if (current_ui->prompt_state == PROMPT_NEEDED)
3911 async_disable_stdin ();
3912 }
3913 }
3914
3915 /* Asynchronous version of wait_for_inferior. It is called by the
3916 event loop whenever a change of state is detected on the file
3917 descriptor corresponding to the target. It can be called more than
3918 once to complete a single execution command. In such cases we need
3919 to keep the state in a global variable ECSS. If it is the last time
3920 that this function is called for a single execution command, then
3921 report to the user that the inferior has stopped, and do the
3922 necessary cleanups. */
3923
3924 void
3925 fetch_inferior_event ()
3926 {
3927 struct execution_control_state ecss;
3928 struct execution_control_state *ecs = &ecss;
3929 int cmd_done = 0;
3930
3931 memset (ecs, 0, sizeof (*ecs));
3932
3933 /* Events are always processed with the main UI as current UI. This
3934 way, warnings, debug output, etc. are always consistently sent to
3935 the main console. */
3936 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
3937
3938 /* End up with readline processing input, if necessary. */
3939 {
3940 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
3941
3942 /* We're handling a live event, so make sure we're doing live
3943 debugging. If we're looking at traceframes while the target is
3944 running, we're going to need to get back to that mode after
3945 handling the event. */
3946 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
3947 if (non_stop)
3948 {
3949 maybe_restore_traceframe.emplace ();
3950 set_current_traceframe (-1);
3951 }
3952
3953 /* The user/frontend should not notice a thread switch due to
3954 internal events. Make sure we revert to the user selected
3955 thread and frame after handling the event and running any
3956 breakpoint commands. */
3957 scoped_restore_current_thread restore_thread;
3958
3959 overlay_cache_invalid = 1;
3960 /* Flush target cache before starting to handle each event. Target
3961 was running and cache could be stale. This is just a heuristic.
3962 Running threads may modify target memory, but we don't get any
3963 event. */
3964 target_dcache_invalidate ();
3965
3966 scoped_restore save_exec_dir
3967 = make_scoped_restore (&execution_direction,
3968 target_execution_direction ());
3969
3970 if (!do_target_wait (minus_one_ptid, ecs, TARGET_WNOHANG))
3971 return;
3972
3973 gdb_assert (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
3974
3975 /* Switch to the target that generated the event, so we can do
3976 target calls. Any inferior bound to the target will do, so we
3977 just switch to the first we find. */
3978 for (inferior *inf : all_inferiors (ecs->target))
3979 {
3980 switch_to_inferior_no_thread (inf);
3981 break;
3982 }
3983
3984 if (debug_infrun)
3985 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
3986
3987 /* If an error happens while handling the event, propagate GDB's
3988 knowledge of the executing state to the frontend/user running
3989 state. */
3990 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs->ptid;
3991 scoped_finish_thread_state finish_state (ecs->target, finish_ptid);
3992
3993 /* Get executed before scoped_restore_current_thread above to apply
3994 still for the thread which has thrown the exception. */
3995 auto defer_bpstat_clear
3996 = make_scope_exit (bpstat_clear_actions);
3997 auto defer_delete_threads
3998 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
3999
4000 /* Now figure out what to do with the result of the result. */
4001 handle_inferior_event (ecs);
4002
4003 if (!ecs->wait_some_more)
4004 {
4005 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4006 int should_stop = 1;
4007 struct thread_info *thr = ecs->event_thread;
4008
4009 delete_just_stopped_threads_infrun_breakpoints ();
4010
4011 if (thr != NULL)
4012 {
4013 struct thread_fsm *thread_fsm = thr->thread_fsm;
4014
4015 if (thread_fsm != NULL)
4016 should_stop = thread_fsm->should_stop (thr);
4017 }
4018
4019 if (!should_stop)
4020 {
4021 keep_going (ecs);
4022 }
4023 else
4024 {
4025 bool should_notify_stop = true;
4026 int proceeded = 0;
4027
4028 clean_up_just_stopped_threads_fsms (ecs);
4029
4030 if (thr != NULL && thr->thread_fsm != NULL)
4031 should_notify_stop = thr->thread_fsm->should_notify_stop ();
4032
4033 if (should_notify_stop)
4034 {
4035 /* We may not find an inferior if this was a process exit. */
4036 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
4037 proceeded = normal_stop ();
4038 }
4039
4040 if (!proceeded)
4041 {
4042 inferior_event_handler (INF_EXEC_COMPLETE);
4043 cmd_done = 1;
4044 }
4045
4046 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4047 previously selected thread is gone. We have two
4048 choices - switch to no thread selected, or restore the
4049 previously selected thread (now exited). We chose the
4050 later, just because that's what GDB used to do. After
4051 this, "info threads" says "The current thread <Thread
4052 ID 2> has terminated." instead of "No thread
4053 selected.". */
4054 if (!non_stop
4055 && cmd_done
4056 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
4057 restore_thread.dont_restore ();
4058 }
4059 }
4060
4061 defer_delete_threads.release ();
4062 defer_bpstat_clear.release ();
4063
4064 /* No error, don't finish the thread states yet. */
4065 finish_state.release ();
4066
4067 /* This scope is used to ensure that readline callbacks are
4068 reinstalled here. */
4069 }
4070
4071 /* If a UI was in sync execution mode, and now isn't, restore its
4072 prompt (a synchronous execution command has finished, and we're
4073 ready for input). */
4074 all_uis_check_sync_execution_done ();
4075
4076 if (cmd_done
4077 && exec_done_display_p
4078 && (inferior_ptid == null_ptid
4079 || inferior_thread ()->state != THREAD_RUNNING))
4080 printf_unfiltered (_("completed.\n"));
4081 }
4082
4083 /* See infrun.h. */
4084
4085 void
4086 set_step_info (thread_info *tp, struct frame_info *frame,
4087 struct symtab_and_line sal)
4088 {
4089 /* This can be removed once this function no longer implicitly relies on the
4090 inferior_ptid value. */
4091 gdb_assert (inferior_ptid == tp->ptid);
4092
4093 tp->control.step_frame_id = get_frame_id (frame);
4094 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
4095
4096 tp->current_symtab = sal.symtab;
4097 tp->current_line = sal.line;
4098 }
4099
4100 /* Clear context switchable stepping state. */
4101
4102 void
4103 init_thread_stepping_state (struct thread_info *tss)
4104 {
4105 tss->stepped_breakpoint = 0;
4106 tss->stepping_over_breakpoint = 0;
4107 tss->stepping_over_watchpoint = 0;
4108 tss->step_after_step_resume_breakpoint = 0;
4109 }
4110
4111 /* See infrun.h. */
4112
4113 void
4114 set_last_target_status (process_stratum_target *target, ptid_t ptid,
4115 target_waitstatus status)
4116 {
4117 target_last_proc_target = target;
4118 target_last_wait_ptid = ptid;
4119 target_last_waitstatus = status;
4120 }
4121
4122 /* See infrun.h. */
4123
4124 void
4125 get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4126 target_waitstatus *status)
4127 {
4128 if (target != nullptr)
4129 *target = target_last_proc_target;
4130 if (ptid != nullptr)
4131 *ptid = target_last_wait_ptid;
4132 if (status != nullptr)
4133 *status = target_last_waitstatus;
4134 }
4135
4136 /* See infrun.h. */
4137
4138 void
4139 nullify_last_target_wait_ptid (void)
4140 {
4141 target_last_proc_target = nullptr;
4142 target_last_wait_ptid = minus_one_ptid;
4143 target_last_waitstatus = {};
4144 }
4145
4146 /* Switch thread contexts. */
4147
4148 static void
4149 context_switch (execution_control_state *ecs)
4150 {
4151 if (debug_infrun
4152 && ecs->ptid != inferior_ptid
4153 && (inferior_ptid == null_ptid
4154 || ecs->event_thread != inferior_thread ()))
4155 {
4156 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
4157 target_pid_to_str (inferior_ptid).c_str ());
4158 fprintf_unfiltered (gdb_stdlog, "to %s\n",
4159 target_pid_to_str (ecs->ptid).c_str ());
4160 }
4161
4162 switch_to_thread (ecs->event_thread);
4163 }
4164
4165 /* If the target can't tell whether we've hit breakpoints
4166 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4167 check whether that could have been caused by a breakpoint. If so,
4168 adjust the PC, per gdbarch_decr_pc_after_break. */
4169
4170 static void
4171 adjust_pc_after_break (struct thread_info *thread,
4172 struct target_waitstatus *ws)
4173 {
4174 struct regcache *regcache;
4175 struct gdbarch *gdbarch;
4176 CORE_ADDR breakpoint_pc, decr_pc;
4177
4178 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4179 we aren't, just return.
4180
4181 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4182 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4183 implemented by software breakpoints should be handled through the normal
4184 breakpoint layer.
4185
4186 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4187 different signals (SIGILL or SIGEMT for instance), but it is less
4188 clear where the PC is pointing afterwards. It may not match
4189 gdbarch_decr_pc_after_break. I don't know any specific target that
4190 generates these signals at breakpoints (the code has been in GDB since at
4191 least 1992) so I can not guess how to handle them here.
4192
4193 In earlier versions of GDB, a target with
4194 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4195 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4196 target with both of these set in GDB history, and it seems unlikely to be
4197 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4198
4199 if (ws->kind != TARGET_WAITKIND_STOPPED)
4200 return;
4201
4202 if (ws->value.sig != GDB_SIGNAL_TRAP)
4203 return;
4204
4205 /* In reverse execution, when a breakpoint is hit, the instruction
4206 under it has already been de-executed. The reported PC always
4207 points at the breakpoint address, so adjusting it further would
4208 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4209 architecture:
4210
4211 B1 0x08000000 : INSN1
4212 B2 0x08000001 : INSN2
4213 0x08000002 : INSN3
4214 PC -> 0x08000003 : INSN4
4215
4216 Say you're stopped at 0x08000003 as above. Reverse continuing
4217 from that point should hit B2 as below. Reading the PC when the
4218 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4219 been de-executed already.
4220
4221 B1 0x08000000 : INSN1
4222 B2 PC -> 0x08000001 : INSN2
4223 0x08000002 : INSN3
4224 0x08000003 : INSN4
4225
4226 We can't apply the same logic as for forward execution, because
4227 we would wrongly adjust the PC to 0x08000000, since there's a
4228 breakpoint at PC - 1. We'd then report a hit on B1, although
4229 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4230 behaviour. */
4231 if (execution_direction == EXEC_REVERSE)
4232 return;
4233
4234 /* If the target can tell whether the thread hit a SW breakpoint,
4235 trust it. Targets that can tell also adjust the PC
4236 themselves. */
4237 if (target_supports_stopped_by_sw_breakpoint ())
4238 return;
4239
4240 /* Note that relying on whether a breakpoint is planted in memory to
4241 determine this can fail. E.g,. the breakpoint could have been
4242 removed since. Or the thread could have been told to step an
4243 instruction the size of a breakpoint instruction, and only
4244 _after_ was a breakpoint inserted at its address. */
4245
4246 /* If this target does not decrement the PC after breakpoints, then
4247 we have nothing to do. */
4248 regcache = get_thread_regcache (thread);
4249 gdbarch = regcache->arch ();
4250
4251 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4252 if (decr_pc == 0)
4253 return;
4254
4255 const address_space *aspace = regcache->aspace ();
4256
4257 /* Find the location where (if we've hit a breakpoint) the
4258 breakpoint would be. */
4259 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
4260
4261 /* If the target can't tell whether a software breakpoint triggered,
4262 fallback to figuring it out based on breakpoints we think were
4263 inserted in the target, and on whether the thread was stepped or
4264 continued. */
4265
4266 /* Check whether there actually is a software breakpoint inserted at
4267 that location.
4268
4269 If in non-stop mode, a race condition is possible where we've
4270 removed a breakpoint, but stop events for that breakpoint were
4271 already queued and arrive later. To suppress those spurious
4272 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
4273 and retire them after a number of stop events are reported. Note
4274 this is an heuristic and can thus get confused. The real fix is
4275 to get the "stopped by SW BP and needs adjustment" info out of
4276 the target/kernel (and thus never reach here; see above). */
4277 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
4278 || (target_is_non_stop_p ()
4279 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
4280 {
4281 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
4282
4283 if (record_full_is_used ())
4284 restore_operation_disable.emplace
4285 (record_full_gdb_operation_disable_set ());
4286
4287 /* When using hardware single-step, a SIGTRAP is reported for both
4288 a completed single-step and a software breakpoint. Need to
4289 differentiate between the two, as the latter needs adjusting
4290 but the former does not.
4291
4292 The SIGTRAP can be due to a completed hardware single-step only if
4293 - we didn't insert software single-step breakpoints
4294 - this thread is currently being stepped
4295
4296 If any of these events did not occur, we must have stopped due
4297 to hitting a software breakpoint, and have to back up to the
4298 breakpoint address.
4299
4300 As a special case, we could have hardware single-stepped a
4301 software breakpoint. In this case (prev_pc == breakpoint_pc),
4302 we also need to back up to the breakpoint address. */
4303
4304 if (thread_has_single_step_breakpoints_set (thread)
4305 || !currently_stepping (thread)
4306 || (thread->stepped_breakpoint
4307 && thread->prev_pc == breakpoint_pc))
4308 regcache_write_pc (regcache, breakpoint_pc);
4309 }
4310 }
4311
4312 static int
4313 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
4314 {
4315 for (frame = get_prev_frame (frame);
4316 frame != NULL;
4317 frame = get_prev_frame (frame))
4318 {
4319 if (frame_id_eq (get_frame_id (frame), step_frame_id))
4320 return 1;
4321 if (get_frame_type (frame) != INLINE_FRAME)
4322 break;
4323 }
4324
4325 return 0;
4326 }
4327
4328 /* Look for an inline frame that is marked for skip.
4329 If PREV_FRAME is TRUE start at the previous frame,
4330 otherwise start at the current frame. Stop at the
4331 first non-inline frame, or at the frame where the
4332 step started. */
4333
4334 static bool
4335 inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
4336 {
4337 struct frame_info *frame = get_current_frame ();
4338
4339 if (prev_frame)
4340 frame = get_prev_frame (frame);
4341
4342 for (; frame != NULL; frame = get_prev_frame (frame))
4343 {
4344 const char *fn = NULL;
4345 symtab_and_line sal;
4346 struct symbol *sym;
4347
4348 if (frame_id_eq (get_frame_id (frame), tp->control.step_frame_id))
4349 break;
4350 if (get_frame_type (frame) != INLINE_FRAME)
4351 break;
4352
4353 sal = find_frame_sal (frame);
4354 sym = get_frame_function (frame);
4355
4356 if (sym != NULL)
4357 fn = sym->print_name ();
4358
4359 if (sal.line != 0
4360 && function_name_is_marked_for_skip (fn, sal))
4361 return true;
4362 }
4363
4364 return false;
4365 }
4366
4367 /* If the event thread has the stop requested flag set, pretend it
4368 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4369 target_stop). */
4370
4371 static bool
4372 handle_stop_requested (struct execution_control_state *ecs)
4373 {
4374 if (ecs->event_thread->stop_requested)
4375 {
4376 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
4377 ecs->ws.value.sig = GDB_SIGNAL_0;
4378 handle_signal_stop (ecs);
4379 return true;
4380 }
4381 return false;
4382 }
4383
4384 /* Auxiliary function that handles syscall entry/return events.
4385 It returns 1 if the inferior should keep going (and GDB
4386 should ignore the event), or 0 if the event deserves to be
4387 processed. */
4388
4389 static int
4390 handle_syscall_event (struct execution_control_state *ecs)
4391 {
4392 struct regcache *regcache;
4393 int syscall_number;
4394
4395 context_switch (ecs);
4396
4397 regcache = get_thread_regcache (ecs->event_thread);
4398 syscall_number = ecs->ws.value.syscall_number;
4399 ecs->event_thread->suspend.stop_pc = regcache_read_pc (regcache);
4400
4401 if (catch_syscall_enabled () > 0
4402 && catching_syscall_number (syscall_number) > 0)
4403 {
4404 if (debug_infrun)
4405 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
4406 syscall_number);
4407
4408 ecs->event_thread->control.stop_bpstat
4409 = bpstat_stop_status (regcache->aspace (),
4410 ecs->event_thread->suspend.stop_pc,
4411 ecs->event_thread, &ecs->ws);
4412
4413 if (handle_stop_requested (ecs))
4414 return 0;
4415
4416 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4417 {
4418 /* Catchpoint hit. */
4419 return 0;
4420 }
4421 }
4422
4423 if (handle_stop_requested (ecs))
4424 return 0;
4425
4426 /* If no catchpoint triggered for this, then keep going. */
4427 keep_going (ecs);
4428 return 1;
4429 }
4430
4431 /* Lazily fill in the execution_control_state's stop_func_* fields. */
4432
4433 static void
4434 fill_in_stop_func (struct gdbarch *gdbarch,
4435 struct execution_control_state *ecs)
4436 {
4437 if (!ecs->stop_func_filled_in)
4438 {
4439 const block *block;
4440
4441 /* Don't care about return value; stop_func_start and stop_func_name
4442 will both be 0 if it doesn't work. */
4443 find_pc_partial_function (ecs->event_thread->suspend.stop_pc,
4444 &ecs->stop_func_name,
4445 &ecs->stop_func_start,
4446 &ecs->stop_func_end,
4447 &block);
4448
4449 /* The call to find_pc_partial_function, above, will set
4450 stop_func_start and stop_func_end to the start and end
4451 of the range containing the stop pc. If this range
4452 contains the entry pc for the block (which is always the
4453 case for contiguous blocks), advance stop_func_start past
4454 the function's start offset and entrypoint. Note that
4455 stop_func_start is NOT advanced when in a range of a
4456 non-contiguous block that does not contain the entry pc. */
4457 if (block != nullptr
4458 && ecs->stop_func_start <= BLOCK_ENTRY_PC (block)
4459 && BLOCK_ENTRY_PC (block) < ecs->stop_func_end)
4460 {
4461 ecs->stop_func_start
4462 += gdbarch_deprecated_function_start_offset (gdbarch);
4463
4464 if (gdbarch_skip_entrypoint_p (gdbarch))
4465 ecs->stop_func_start
4466 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
4467 }
4468
4469 ecs->stop_func_filled_in = 1;
4470 }
4471 }
4472
4473
4474 /* Return the STOP_SOON field of the inferior pointed at by ECS. */
4475
4476 static enum stop_kind
4477 get_inferior_stop_soon (execution_control_state *ecs)
4478 {
4479 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4480
4481 gdb_assert (inf != NULL);
4482 return inf->control.stop_soon;
4483 }
4484
4485 /* Poll for one event out of the current target. Store the resulting
4486 waitstatus in WS, and return the event ptid. Does not block. */
4487
4488 static ptid_t
4489 poll_one_curr_target (struct target_waitstatus *ws)
4490 {
4491 ptid_t event_ptid;
4492
4493 overlay_cache_invalid = 1;
4494
4495 /* Flush target cache before starting to handle each event.
4496 Target was running and cache could be stale. This is just a
4497 heuristic. Running threads may modify target memory, but we
4498 don't get any event. */
4499 target_dcache_invalidate ();
4500
4501 if (deprecated_target_wait_hook)
4502 event_ptid = deprecated_target_wait_hook (minus_one_ptid, ws, TARGET_WNOHANG);
4503 else
4504 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
4505
4506 if (debug_infrun)
4507 print_target_wait_results (minus_one_ptid, event_ptid, ws);
4508
4509 return event_ptid;
4510 }
4511
4512 /* An event reported by wait_one. */
4513
4514 struct wait_one_event
4515 {
4516 /* The target the event came out of. */
4517 process_stratum_target *target;
4518
4519 /* The PTID the event was for. */
4520 ptid_t ptid;
4521
4522 /* The waitstatus. */
4523 target_waitstatus ws;
4524 };
4525
4526 /* Wait for one event out of any target. */
4527
4528 static wait_one_event
4529 wait_one ()
4530 {
4531 while (1)
4532 {
4533 for (inferior *inf : all_inferiors ())
4534 {
4535 process_stratum_target *target = inf->process_target ();
4536 if (target == NULL
4537 || !target->is_async_p ()
4538 || !target->threads_executing)
4539 continue;
4540
4541 switch_to_inferior_no_thread (inf);
4542
4543 wait_one_event event;
4544 event.target = target;
4545 event.ptid = poll_one_curr_target (&event.ws);
4546
4547 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
4548 {
4549 /* If nothing is resumed, remove the target from the
4550 event loop. */
4551 target_async (0);
4552 }
4553 else if (event.ws.kind != TARGET_WAITKIND_IGNORE)
4554 return event;
4555 }
4556
4557 /* Block waiting for some event. */
4558
4559 fd_set readfds;
4560 int nfds = 0;
4561
4562 FD_ZERO (&readfds);
4563
4564 for (inferior *inf : all_inferiors ())
4565 {
4566 process_stratum_target *target = inf->process_target ();
4567 if (target == NULL
4568 || !target->is_async_p ()
4569 || !target->threads_executing)
4570 continue;
4571
4572 int fd = target->async_wait_fd ();
4573 FD_SET (fd, &readfds);
4574 if (nfds <= fd)
4575 nfds = fd + 1;
4576 }
4577
4578 if (nfds == 0)
4579 {
4580 /* No waitable targets left. All must be stopped. */
4581 return {NULL, minus_one_ptid, {TARGET_WAITKIND_NO_RESUMED}};
4582 }
4583
4584 QUIT;
4585
4586 int numfds = interruptible_select (nfds, &readfds, 0, NULL, 0);
4587 if (numfds < 0)
4588 {
4589 if (errno == EINTR)
4590 continue;
4591 else
4592 perror_with_name ("interruptible_select");
4593 }
4594 }
4595 }
4596
4597 /* Save the thread's event and stop reason to process it later. */
4598
4599 static void
4600 save_waitstatus (struct thread_info *tp, const target_waitstatus *ws)
4601 {
4602 if (debug_infrun)
4603 {
4604 std::string statstr = target_waitstatus_to_string (ws);
4605
4606 fprintf_unfiltered (gdb_stdlog,
4607 "infrun: saving status %s for %d.%ld.%ld\n",
4608 statstr.c_str (),
4609 tp->ptid.pid (),
4610 tp->ptid.lwp (),
4611 tp->ptid.tid ());
4612 }
4613
4614 /* Record for later. */
4615 tp->suspend.waitstatus = *ws;
4616 tp->suspend.waitstatus_pending_p = 1;
4617
4618 struct regcache *regcache = get_thread_regcache (tp);
4619 const address_space *aspace = regcache->aspace ();
4620
4621 if (ws->kind == TARGET_WAITKIND_STOPPED
4622 && ws->value.sig == GDB_SIGNAL_TRAP)
4623 {
4624 CORE_ADDR pc = regcache_read_pc (regcache);
4625
4626 adjust_pc_after_break (tp, &tp->suspend.waitstatus);
4627
4628 scoped_restore_current_thread restore_thread;
4629 switch_to_thread (tp);
4630
4631 if (target_stopped_by_watchpoint ())
4632 {
4633 tp->suspend.stop_reason
4634 = TARGET_STOPPED_BY_WATCHPOINT;
4635 }
4636 else if (target_supports_stopped_by_sw_breakpoint ()
4637 && target_stopped_by_sw_breakpoint ())
4638 {
4639 tp->suspend.stop_reason
4640 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4641 }
4642 else if (target_supports_stopped_by_hw_breakpoint ()
4643 && target_stopped_by_hw_breakpoint ())
4644 {
4645 tp->suspend.stop_reason
4646 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4647 }
4648 else if (!target_supports_stopped_by_hw_breakpoint ()
4649 && hardware_breakpoint_inserted_here_p (aspace,
4650 pc))
4651 {
4652 tp->suspend.stop_reason
4653 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4654 }
4655 else if (!target_supports_stopped_by_sw_breakpoint ()
4656 && software_breakpoint_inserted_here_p (aspace,
4657 pc))
4658 {
4659 tp->suspend.stop_reason
4660 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4661 }
4662 else if (!thread_has_single_step_breakpoints_set (tp)
4663 && currently_stepping (tp))
4664 {
4665 tp->suspend.stop_reason
4666 = TARGET_STOPPED_BY_SINGLE_STEP;
4667 }
4668 }
4669 }
4670
4671 /* Mark the non-executing threads accordingly. In all-stop, all
4672 threads of all processes are stopped when we get any event
4673 reported. In non-stop mode, only the event thread stops. */
4674
4675 static void
4676 mark_non_executing_threads (process_stratum_target *target,
4677 ptid_t event_ptid,
4678 struct target_waitstatus ws)
4679 {
4680 ptid_t mark_ptid;
4681
4682 if (!target_is_non_stop_p ())
4683 mark_ptid = minus_one_ptid;
4684 else if (ws.kind == TARGET_WAITKIND_SIGNALLED
4685 || ws.kind == TARGET_WAITKIND_EXITED)
4686 {
4687 /* If we're handling a process exit in non-stop mode, even
4688 though threads haven't been deleted yet, one would think
4689 that there is nothing to do, as threads of the dead process
4690 will be soon deleted, and threads of any other process were
4691 left running. However, on some targets, threads survive a
4692 process exit event. E.g., for the "checkpoint" command,
4693 when the current checkpoint/fork exits, linux-fork.c
4694 automatically switches to another fork from within
4695 target_mourn_inferior, by associating the same
4696 inferior/thread to another fork. We haven't mourned yet at
4697 this point, but we must mark any threads left in the
4698 process as not-executing so that finish_thread_state marks
4699 them stopped (in the user's perspective) if/when we present
4700 the stop to the user. */
4701 mark_ptid = ptid_t (event_ptid.pid ());
4702 }
4703 else
4704 mark_ptid = event_ptid;
4705
4706 set_executing (target, mark_ptid, false);
4707
4708 /* Likewise the resumed flag. */
4709 set_resumed (target, mark_ptid, false);
4710 }
4711
4712 /* See infrun.h. */
4713
4714 void
4715 stop_all_threads (void)
4716 {
4717 /* We may need multiple passes to discover all threads. */
4718 int pass;
4719 int iterations = 0;
4720
4721 gdb_assert (exists_non_stop_target ());
4722
4723 if (debug_infrun)
4724 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads\n");
4725
4726 scoped_restore_current_thread restore_thread;
4727
4728 /* Enable thread events of all targets. */
4729 for (auto *target : all_non_exited_process_targets ())
4730 {
4731 switch_to_target_no_thread (target);
4732 target_thread_events (true);
4733 }
4734
4735 SCOPE_EXIT
4736 {
4737 /* Disable thread events of all targets. */
4738 for (auto *target : all_non_exited_process_targets ())
4739 {
4740 switch_to_target_no_thread (target);
4741 target_thread_events (false);
4742 }
4743
4744 if (debug_infrun)
4745 fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads done\n");
4746 };
4747
4748 /* Request threads to stop, and then wait for the stops. Because
4749 threads we already know about can spawn more threads while we're
4750 trying to stop them, and we only learn about new threads when we
4751 update the thread list, do this in a loop, and keep iterating
4752 until two passes find no threads that need to be stopped. */
4753 for (pass = 0; pass < 2; pass++, iterations++)
4754 {
4755 if (debug_infrun)
4756 fprintf_unfiltered (gdb_stdlog,
4757 "infrun: stop_all_threads, pass=%d, "
4758 "iterations=%d\n", pass, iterations);
4759 while (1)
4760 {
4761 int waits_needed = 0;
4762
4763 for (auto *target : all_non_exited_process_targets ())
4764 {
4765 switch_to_target_no_thread (target);
4766 update_thread_list ();
4767 }
4768
4769 /* Go through all threads looking for threads that we need
4770 to tell the target to stop. */
4771 for (thread_info *t : all_non_exited_threads ())
4772 {
4773 /* For a single-target setting with an all-stop target,
4774 we would not even arrive here. For a multi-target
4775 setting, until GDB is able to handle a mixture of
4776 all-stop and non-stop targets, simply skip all-stop
4777 targets' threads. This should be fine due to the
4778 protection of 'check_multi_target_resumption'. */
4779
4780 switch_to_thread_no_regs (t);
4781 if (!target_is_non_stop_p ())
4782 continue;
4783
4784 if (t->executing)
4785 {
4786 /* If already stopping, don't request a stop again.
4787 We just haven't seen the notification yet. */
4788 if (!t->stop_requested)
4789 {
4790 if (debug_infrun)
4791 fprintf_unfiltered (gdb_stdlog,
4792 "infrun: %s executing, "
4793 "need stop\n",
4794 target_pid_to_str (t->ptid).c_str ());
4795 target_stop (t->ptid);
4796 t->stop_requested = 1;
4797 }
4798 else
4799 {
4800 if (debug_infrun)
4801 fprintf_unfiltered (gdb_stdlog,
4802 "infrun: %s executing, "
4803 "already stopping\n",
4804 target_pid_to_str (t->ptid).c_str ());
4805 }
4806
4807 if (t->stop_requested)
4808 waits_needed++;
4809 }
4810 else
4811 {
4812 if (debug_infrun)
4813 fprintf_unfiltered (gdb_stdlog,
4814 "infrun: %s not executing\n",
4815 target_pid_to_str (t->ptid).c_str ());
4816
4817 /* The thread may be not executing, but still be
4818 resumed with a pending status to process. */
4819 t->resumed = false;
4820 }
4821 }
4822
4823 if (waits_needed == 0)
4824 break;
4825
4826 /* If we find new threads on the second iteration, restart
4827 over. We want to see two iterations in a row with all
4828 threads stopped. */
4829 if (pass > 0)
4830 pass = -1;
4831
4832 for (int i = 0; i < waits_needed; i++)
4833 {
4834 wait_one_event event = wait_one ();
4835
4836 if (debug_infrun)
4837 {
4838 fprintf_unfiltered (gdb_stdlog,
4839 "infrun: stop_all_threads %s %s\n",
4840 target_waitstatus_to_string (&event.ws).c_str (),
4841 target_pid_to_str (event.ptid).c_str ());
4842 }
4843
4844 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
4845 {
4846 /* All resumed threads exited. */
4847 break;
4848 }
4849 else if (event.ws.kind == TARGET_WAITKIND_THREAD_EXITED
4850 || event.ws.kind == TARGET_WAITKIND_EXITED
4851 || event.ws.kind == TARGET_WAITKIND_SIGNALLED)
4852 {
4853 /* One thread/process exited/signalled. */
4854
4855 thread_info *t = nullptr;
4856
4857 /* The target may have reported just a pid. If so, try
4858 the first non-exited thread. */
4859 if (event.ptid.is_pid ())
4860 {
4861 int pid = event.ptid.pid ();
4862 inferior *inf = find_inferior_pid (event.target, pid);
4863 for (thread_info *tp : inf->non_exited_threads ())
4864 {
4865 t = tp;
4866 break;
4867 }
4868
4869 /* If there is no available thread, the event would
4870 have to be appended to a per-inferior event list,
4871 which does not exist (and if it did, we'd have
4872 to adjust run control command to be able to
4873 resume such an inferior). We assert here instead
4874 of going into an infinite loop. */
4875 gdb_assert (t != nullptr);
4876
4877 if (debug_infrun)
4878 fprintf_unfiltered (gdb_stdlog,
4879 "infrun: stop_all_threads, using %s\n",
4880 target_pid_to_str (t->ptid).c_str ());
4881 }
4882 else
4883 {
4884 t = find_thread_ptid (event.target, event.ptid);
4885 /* Check if this is the first time we see this thread.
4886 Don't bother adding if it individually exited. */
4887 if (t == nullptr
4888 && event.ws.kind != TARGET_WAITKIND_THREAD_EXITED)
4889 t = add_thread (event.target, event.ptid);
4890 }
4891
4892 if (t != nullptr)
4893 {
4894 /* Set the threads as non-executing to avoid
4895 another stop attempt on them. */
4896 switch_to_thread_no_regs (t);
4897 mark_non_executing_threads (event.target, event.ptid,
4898 event.ws);
4899 save_waitstatus (t, &event.ws);
4900 t->stop_requested = false;
4901 }
4902 }
4903 else
4904 {
4905 thread_info *t = find_thread_ptid (event.target, event.ptid);
4906 if (t == NULL)
4907 t = add_thread (event.target, event.ptid);
4908
4909 t->stop_requested = 0;
4910 t->executing = 0;
4911 t->resumed = false;
4912 t->control.may_range_step = 0;
4913
4914 /* This may be the first time we see the inferior report
4915 a stop. */
4916 inferior *inf = find_inferior_ptid (event.target, event.ptid);
4917 if (inf->needs_setup)
4918 {
4919 switch_to_thread_no_regs (t);
4920 setup_inferior (0);
4921 }
4922
4923 if (event.ws.kind == TARGET_WAITKIND_STOPPED
4924 && event.ws.value.sig == GDB_SIGNAL_0)
4925 {
4926 /* We caught the event that we intended to catch, so
4927 there's no event pending. */
4928 t->suspend.waitstatus.kind = TARGET_WAITKIND_IGNORE;
4929 t->suspend.waitstatus_pending_p = 0;
4930
4931 if (displaced_step_fixup (t, GDB_SIGNAL_0) < 0)
4932 {
4933 /* Add it back to the step-over queue. */
4934 if (debug_infrun)
4935 {
4936 fprintf_unfiltered (gdb_stdlog,
4937 "infrun: displaced-step of %s "
4938 "canceled: adding back to the "
4939 "step-over queue\n",
4940 target_pid_to_str (t->ptid).c_str ());
4941 }
4942 t->control.trap_expected = 0;
4943 thread_step_over_chain_enqueue (t);
4944 }
4945 }
4946 else
4947 {
4948 enum gdb_signal sig;
4949 struct regcache *regcache;
4950
4951 if (debug_infrun)
4952 {
4953 std::string statstr = target_waitstatus_to_string (&event.ws);
4954
4955 fprintf_unfiltered (gdb_stdlog,
4956 "infrun: target_wait %s, saving "
4957 "status for %d.%ld.%ld\n",
4958 statstr.c_str (),
4959 t->ptid.pid (),
4960 t->ptid.lwp (),
4961 t->ptid.tid ());
4962 }
4963
4964 /* Record for later. */
4965 save_waitstatus (t, &event.ws);
4966
4967 sig = (event.ws.kind == TARGET_WAITKIND_STOPPED
4968 ? event.ws.value.sig : GDB_SIGNAL_0);
4969
4970 if (displaced_step_fixup (t, sig) < 0)
4971 {
4972 /* Add it back to the step-over queue. */
4973 t->control.trap_expected = 0;
4974 thread_step_over_chain_enqueue (t);
4975 }
4976
4977 regcache = get_thread_regcache (t);
4978 t->suspend.stop_pc = regcache_read_pc (regcache);
4979
4980 if (debug_infrun)
4981 {
4982 fprintf_unfiltered (gdb_stdlog,
4983 "infrun: saved stop_pc=%s for %s "
4984 "(currently_stepping=%d)\n",
4985 paddress (target_gdbarch (),
4986 t->suspend.stop_pc),
4987 target_pid_to_str (t->ptid).c_str (),
4988 currently_stepping (t));
4989 }
4990 }
4991 }
4992 }
4993 }
4994 }
4995 }
4996
4997 /* Handle a TARGET_WAITKIND_NO_RESUMED event. */
4998
4999 static int
5000 handle_no_resumed (struct execution_control_state *ecs)
5001 {
5002 if (target_can_async_p ())
5003 {
5004 int any_sync = 0;
5005
5006 for (ui *ui : all_uis ())
5007 {
5008 if (ui->prompt_state == PROMPT_BLOCKED)
5009 {
5010 any_sync = 1;
5011 break;
5012 }
5013 }
5014 if (!any_sync)
5015 {
5016 /* There were no unwaited-for children left in the target, but,
5017 we're not synchronously waiting for events either. Just
5018 ignore. */
5019
5020 if (debug_infrun)
5021 fprintf_unfiltered (gdb_stdlog,
5022 "infrun: TARGET_WAITKIND_NO_RESUMED "
5023 "(ignoring: bg)\n");
5024 prepare_to_wait (ecs);
5025 return 1;
5026 }
5027 }
5028
5029 /* Otherwise, if we were running a synchronous execution command, we
5030 may need to cancel it and give the user back the terminal.
5031
5032 In non-stop mode, the target can't tell whether we've already
5033 consumed previous stop events, so it can end up sending us a
5034 no-resumed event like so:
5035
5036 #0 - thread 1 is left stopped
5037
5038 #1 - thread 2 is resumed and hits breakpoint
5039 -> TARGET_WAITKIND_STOPPED
5040
5041 #2 - thread 3 is resumed and exits
5042 this is the last resumed thread, so
5043 -> TARGET_WAITKIND_NO_RESUMED
5044
5045 #3 - gdb processes stop for thread 2 and decides to re-resume
5046 it.
5047
5048 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
5049 thread 2 is now resumed, so the event should be ignored.
5050
5051 IOW, if the stop for thread 2 doesn't end a foreground command,
5052 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5053 event. But it could be that the event meant that thread 2 itself
5054 (or whatever other thread was the last resumed thread) exited.
5055
5056 To address this we refresh the thread list and check whether we
5057 have resumed threads _now_. In the example above, this removes
5058 thread 3 from the thread list. If thread 2 was re-resumed, we
5059 ignore this event. If we find no thread resumed, then we cancel
5060 the synchronous command and show "no unwaited-for " to the
5061 user. */
5062
5063 inferior *curr_inf = current_inferior ();
5064
5065 scoped_restore_current_thread restore_thread;
5066
5067 for (auto *target : all_non_exited_process_targets ())
5068 {
5069 switch_to_target_no_thread (target);
5070 update_thread_list ();
5071 }
5072
5073 /* If:
5074
5075 - the current target has no thread executing, and
5076 - the current inferior is native, and
5077 - the current inferior is the one which has the terminal, and
5078 - we did nothing,
5079
5080 then a Ctrl-C from this point on would remain stuck in the
5081 kernel, until a thread resumes and dequeues it. That would
5082 result in the GDB CLI not reacting to Ctrl-C, not able to
5083 interrupt the program. To address this, if the current inferior
5084 no longer has any thread executing, we give the terminal to some
5085 other inferior that has at least one thread executing. */
5086 bool swap_terminal = true;
5087
5088 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5089 whether to report it to the user. */
5090 bool ignore_event = false;
5091
5092 for (thread_info *thread : all_non_exited_threads ())
5093 {
5094 if (swap_terminal && thread->executing)
5095 {
5096 if (thread->inf != curr_inf)
5097 {
5098 target_terminal::ours ();
5099
5100 switch_to_thread (thread);
5101 target_terminal::inferior ();
5102 }
5103 swap_terminal = false;
5104 }
5105
5106 if (!ignore_event
5107 && (thread->executing
5108 || thread->suspend.waitstatus_pending_p))
5109 {
5110 /* Either there were no unwaited-for children left in the
5111 target at some point, but there are now, or some target
5112 other than the eventing one has unwaited-for children
5113 left. Just ignore. */
5114 if (debug_infrun)
5115 fprintf_unfiltered (gdb_stdlog,
5116 "infrun: TARGET_WAITKIND_NO_RESUMED "
5117 "(ignoring: found resumed)\n");
5118
5119 ignore_event = true;
5120 }
5121
5122 if (ignore_event && !swap_terminal)
5123 break;
5124 }
5125
5126 if (ignore_event)
5127 {
5128 switch_to_inferior_no_thread (curr_inf);
5129 prepare_to_wait (ecs);
5130 return 1;
5131 }
5132
5133 /* Go ahead and report the event. */
5134 return 0;
5135 }
5136
5137 /* Given an execution control state that has been freshly filled in by
5138 an event from the inferior, figure out what it means and take
5139 appropriate action.
5140
5141 The alternatives are:
5142
5143 1) stop_waiting and return; to really stop and return to the
5144 debugger.
5145
5146 2) keep_going and return; to wait for the next event (set
5147 ecs->event_thread->stepping_over_breakpoint to 1 to single step
5148 once). */
5149
5150 static void
5151 handle_inferior_event (struct execution_control_state *ecs)
5152 {
5153 /* Make sure that all temporary struct value objects that were
5154 created during the handling of the event get deleted at the
5155 end. */
5156 scoped_value_mark free_values;
5157
5158 enum stop_kind stop_soon;
5159
5160 if (debug_infrun)
5161 fprintf_unfiltered (gdb_stdlog, "infrun: handle_inferior_event %s\n",
5162 target_waitstatus_to_string (&ecs->ws).c_str ());
5163
5164 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
5165 {
5166 /* We had an event in the inferior, but we are not interested in
5167 handling it at this level. The lower layers have already
5168 done what needs to be done, if anything.
5169
5170 One of the possible circumstances for this is when the
5171 inferior produces output for the console. The inferior has
5172 not stopped, and we are ignoring the event. Another possible
5173 circumstance is any event which the lower level knows will be
5174 reported multiple times without an intervening resume. */
5175 prepare_to_wait (ecs);
5176 return;
5177 }
5178
5179 if (ecs->ws.kind == TARGET_WAITKIND_THREAD_EXITED)
5180 {
5181 prepare_to_wait (ecs);
5182 return;
5183 }
5184
5185 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
5186 && handle_no_resumed (ecs))
5187 return;
5188
5189 /* Cache the last target/ptid/waitstatus. */
5190 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
5191
5192 /* Always clear state belonging to the previous time we stopped. */
5193 stop_stack_dummy = STOP_NONE;
5194
5195 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
5196 {
5197 /* No unwaited-for children left. IOW, all resumed children
5198 have exited. */
5199 stop_print_frame = 0;
5200 stop_waiting (ecs);
5201 return;
5202 }
5203
5204 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
5205 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
5206 {
5207 ecs->event_thread = find_thread_ptid (ecs->target, ecs->ptid);
5208 /* If it's a new thread, add it to the thread database. */
5209 if (ecs->event_thread == NULL)
5210 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
5211
5212 /* Disable range stepping. If the next step request could use a
5213 range, this will be end up re-enabled then. */
5214 ecs->event_thread->control.may_range_step = 0;
5215 }
5216
5217 /* Dependent on valid ECS->EVENT_THREAD. */
5218 adjust_pc_after_break (ecs->event_thread, &ecs->ws);
5219
5220 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5221 reinit_frame_cache ();
5222
5223 breakpoint_retire_moribund ();
5224
5225 /* First, distinguish signals caused by the debugger from signals
5226 that have to do with the program's own actions. Note that
5227 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5228 on the operating system version. Here we detect when a SIGILL or
5229 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5230 something similar for SIGSEGV, since a SIGSEGV will be generated
5231 when we're trying to execute a breakpoint instruction on a
5232 non-executable stack. This happens for call dummy breakpoints
5233 for architectures like SPARC that place call dummies on the
5234 stack. */
5235 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
5236 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
5237 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
5238 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
5239 {
5240 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
5241
5242 if (breakpoint_inserted_here_p (regcache->aspace (),
5243 regcache_read_pc (regcache)))
5244 {
5245 if (debug_infrun)
5246 fprintf_unfiltered (gdb_stdlog,
5247 "infrun: Treating signal as SIGTRAP\n");
5248 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
5249 }
5250 }
5251
5252 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
5253
5254 switch (ecs->ws.kind)
5255 {
5256 case TARGET_WAITKIND_LOADED:
5257 context_switch (ecs);
5258 /* Ignore gracefully during startup of the inferior, as it might
5259 be the shell which has just loaded some objects, otherwise
5260 add the symbols for the newly loaded objects. Also ignore at
5261 the beginning of an attach or remote session; we will query
5262 the full list of libraries once the connection is
5263 established. */
5264
5265 stop_soon = get_inferior_stop_soon (ecs);
5266 if (stop_soon == NO_STOP_QUIETLY)
5267 {
5268 struct regcache *regcache;
5269
5270 regcache = get_thread_regcache (ecs->event_thread);
5271
5272 handle_solib_event ();
5273
5274 ecs->event_thread->control.stop_bpstat
5275 = bpstat_stop_status (regcache->aspace (),
5276 ecs->event_thread->suspend.stop_pc,
5277 ecs->event_thread, &ecs->ws);
5278
5279 if (handle_stop_requested (ecs))
5280 return;
5281
5282 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5283 {
5284 /* A catchpoint triggered. */
5285 process_event_stop_test (ecs);
5286 return;
5287 }
5288
5289 /* If requested, stop when the dynamic linker notifies
5290 gdb of events. This allows the user to get control
5291 and place breakpoints in initializer routines for
5292 dynamically loaded objects (among other things). */
5293 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5294 if (stop_on_solib_events)
5295 {
5296 /* Make sure we print "Stopped due to solib-event" in
5297 normal_stop. */
5298 stop_print_frame = 1;
5299
5300 stop_waiting (ecs);
5301 return;
5302 }
5303 }
5304
5305 /* If we are skipping through a shell, or through shared library
5306 loading that we aren't interested in, resume the program. If
5307 we're running the program normally, also resume. */
5308 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
5309 {
5310 /* Loading of shared libraries might have changed breakpoint
5311 addresses. Make sure new breakpoints are inserted. */
5312 if (stop_soon == NO_STOP_QUIETLY)
5313 insert_breakpoints ();
5314 resume (GDB_SIGNAL_0);
5315 prepare_to_wait (ecs);
5316 return;
5317 }
5318
5319 /* But stop if we're attaching or setting up a remote
5320 connection. */
5321 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5322 || stop_soon == STOP_QUIETLY_REMOTE)
5323 {
5324 if (debug_infrun)
5325 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
5326 stop_waiting (ecs);
5327 return;
5328 }
5329
5330 internal_error (__FILE__, __LINE__,
5331 _("unhandled stop_soon: %d"), (int) stop_soon);
5332
5333 case TARGET_WAITKIND_SPURIOUS:
5334 if (handle_stop_requested (ecs))
5335 return;
5336 context_switch (ecs);
5337 resume (GDB_SIGNAL_0);
5338 prepare_to_wait (ecs);
5339 return;
5340
5341 case TARGET_WAITKIND_THREAD_CREATED:
5342 if (handle_stop_requested (ecs))
5343 return;
5344 context_switch (ecs);
5345 if (!switch_back_to_stepped_thread (ecs))
5346 keep_going (ecs);
5347 return;
5348
5349 case TARGET_WAITKIND_EXITED:
5350 case TARGET_WAITKIND_SIGNALLED:
5351 {
5352 /* Depending on the system, ecs->ptid may point to a thread or
5353 to a process. On some targets, target_mourn_inferior may
5354 need to have access to the just-exited thread. That is the
5355 case of GNU/Linux's "checkpoint" support, for example.
5356 Call the switch_to_xxx routine as appropriate. */
5357 thread_info *thr = find_thread_ptid (ecs->target, ecs->ptid);
5358 if (thr != nullptr)
5359 switch_to_thread (thr);
5360 else
5361 {
5362 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5363 switch_to_inferior_no_thread (inf);
5364 }
5365 }
5366 handle_vfork_child_exec_or_exit (0);
5367 target_terminal::ours (); /* Must do this before mourn anyway. */
5368
5369 /* Clearing any previous state of convenience variables. */
5370 clear_exit_convenience_vars ();
5371
5372 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
5373 {
5374 /* Record the exit code in the convenience variable $_exitcode, so
5375 that the user can inspect this again later. */
5376 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5377 (LONGEST) ecs->ws.value.integer);
5378
5379 /* Also record this in the inferior itself. */
5380 current_inferior ()->has_exit_code = 1;
5381 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
5382
5383 /* Support the --return-child-result option. */
5384 return_child_result_value = ecs->ws.value.integer;
5385
5386 gdb::observers::exited.notify (ecs->ws.value.integer);
5387 }
5388 else
5389 {
5390 struct gdbarch *gdbarch = current_inferior ()->gdbarch;
5391
5392 if (gdbarch_gdb_signal_to_target_p (gdbarch))
5393 {
5394 /* Set the value of the internal variable $_exitsignal,
5395 which holds the signal uncaught by the inferior. */
5396 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5397 gdbarch_gdb_signal_to_target (gdbarch,
5398 ecs->ws.value.sig));
5399 }
5400 else
5401 {
5402 /* We don't have access to the target's method used for
5403 converting between signal numbers (GDB's internal
5404 representation <-> target's representation).
5405 Therefore, we cannot do a good job at displaying this
5406 information to the user. It's better to just warn
5407 her about it (if infrun debugging is enabled), and
5408 give up. */
5409 if (debug_infrun)
5410 fprintf_filtered (gdb_stdlog, _("\
5411 Cannot fill $_exitsignal with the correct signal number.\n"));
5412 }
5413
5414 gdb::observers::signal_exited.notify (ecs->ws.value.sig);
5415 }
5416
5417 gdb_flush (gdb_stdout);
5418 target_mourn_inferior (inferior_ptid);
5419 stop_print_frame = 0;
5420 stop_waiting (ecs);
5421 return;
5422
5423 case TARGET_WAITKIND_FORKED:
5424 case TARGET_WAITKIND_VFORKED:
5425 /* Check whether the inferior is displaced stepping. */
5426 {
5427 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
5428 struct gdbarch *gdbarch = regcache->arch ();
5429
5430 /* If checking displaced stepping is supported, and thread
5431 ecs->ptid is displaced stepping. */
5432 if (displaced_step_in_progress_thread (ecs->event_thread))
5433 {
5434 struct inferior *parent_inf
5435 = find_inferior_ptid (ecs->target, ecs->ptid);
5436 struct regcache *child_regcache;
5437 CORE_ADDR parent_pc;
5438
5439 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
5440 {
5441 struct displaced_step_inferior_state *displaced
5442 = get_displaced_stepping_state (parent_inf);
5443
5444 /* Restore scratch pad for child process. */
5445 displaced_step_restore (displaced, ecs->ws.value.related_pid);
5446 }
5447
5448 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5449 indicating that the displaced stepping of syscall instruction
5450 has been done. Perform cleanup for parent process here. Note
5451 that this operation also cleans up the child process for vfork,
5452 because their pages are shared. */
5453 displaced_step_fixup (ecs->event_thread, GDB_SIGNAL_TRAP);
5454 /* Start a new step-over in another thread if there's one
5455 that needs it. */
5456 start_step_over ();
5457
5458 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5459 the child's PC is also within the scratchpad. Set the child's PC
5460 to the parent's PC value, which has already been fixed up.
5461 FIXME: we use the parent's aspace here, although we're touching
5462 the child, because the child hasn't been added to the inferior
5463 list yet at this point. */
5464
5465 child_regcache
5466 = get_thread_arch_aspace_regcache (parent_inf->process_target (),
5467 ecs->ws.value.related_pid,
5468 gdbarch,
5469 parent_inf->aspace);
5470 /* Read PC value of parent process. */
5471 parent_pc = regcache_read_pc (regcache);
5472
5473 if (debug_displaced)
5474 fprintf_unfiltered (gdb_stdlog,
5475 "displaced: write child pc from %s to %s\n",
5476 paddress (gdbarch,
5477 regcache_read_pc (child_regcache)),
5478 paddress (gdbarch, parent_pc));
5479
5480 regcache_write_pc (child_regcache, parent_pc);
5481 }
5482 }
5483
5484 context_switch (ecs);
5485
5486 /* Immediately detach breakpoints from the child before there's
5487 any chance of letting the user delete breakpoints from the
5488 breakpoint lists. If we don't do this early, it's easy to
5489 leave left over traps in the child, vis: "break foo; catch
5490 fork; c; <fork>; del; c; <child calls foo>". We only follow
5491 the fork on the last `continue', and by that time the
5492 breakpoint at "foo" is long gone from the breakpoint table.
5493 If we vforked, then we don't need to unpatch here, since both
5494 parent and child are sharing the same memory pages; we'll
5495 need to unpatch at follow/detach time instead to be certain
5496 that new breakpoints added between catchpoint hit time and
5497 vfork follow are detached. */
5498 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
5499 {
5500 /* This won't actually modify the breakpoint list, but will
5501 physically remove the breakpoints from the child. */
5502 detach_breakpoints (ecs->ws.value.related_pid);
5503 }
5504
5505 delete_just_stopped_threads_single_step_breakpoints ();
5506
5507 /* In case the event is caught by a catchpoint, remember that
5508 the event is to be followed at the next resume of the thread,
5509 and not immediately. */
5510 ecs->event_thread->pending_follow = ecs->ws;
5511
5512 ecs->event_thread->suspend.stop_pc
5513 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
5514
5515 ecs->event_thread->control.stop_bpstat
5516 = bpstat_stop_status (get_current_regcache ()->aspace (),
5517 ecs->event_thread->suspend.stop_pc,
5518 ecs->event_thread, &ecs->ws);
5519
5520 if (handle_stop_requested (ecs))
5521 return;
5522
5523 /* If no catchpoint triggered for this, then keep going. Note
5524 that we're interested in knowing the bpstat actually causes a
5525 stop, not just if it may explain the signal. Software
5526 watchpoints, for example, always appear in the bpstat. */
5527 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5528 {
5529 bool follow_child
5530 = (follow_fork_mode_string == follow_fork_mode_child);
5531
5532 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5533
5534 process_stratum_target *targ
5535 = ecs->event_thread->inf->process_target ();
5536
5537 bool should_resume = follow_fork ();
5538
5539 /* Note that one of these may be an invalid pointer,
5540 depending on detach_fork. */
5541 thread_info *parent = ecs->event_thread;
5542 thread_info *child
5543 = find_thread_ptid (targ, ecs->ws.value.related_pid);
5544
5545 /* At this point, the parent is marked running, and the
5546 child is marked stopped. */
5547
5548 /* If not resuming the parent, mark it stopped. */
5549 if (follow_child && !detach_fork && !non_stop && !sched_multi)
5550 parent->set_running (false);
5551
5552 /* If resuming the child, mark it running. */
5553 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
5554 child->set_running (true);
5555
5556 /* In non-stop mode, also resume the other branch. */
5557 if (!detach_fork && (non_stop
5558 || (sched_multi && target_is_non_stop_p ())))
5559 {
5560 if (follow_child)
5561 switch_to_thread (parent);
5562 else
5563 switch_to_thread (child);
5564
5565 ecs->event_thread = inferior_thread ();
5566 ecs->ptid = inferior_ptid;
5567 keep_going (ecs);
5568 }
5569
5570 if (follow_child)
5571 switch_to_thread (child);
5572 else
5573 switch_to_thread (parent);
5574
5575 ecs->event_thread = inferior_thread ();
5576 ecs->ptid = inferior_ptid;
5577
5578 if (should_resume)
5579 keep_going (ecs);
5580 else
5581 stop_waiting (ecs);
5582 return;
5583 }
5584 process_event_stop_test (ecs);
5585 return;
5586
5587 case TARGET_WAITKIND_VFORK_DONE:
5588 /* Done with the shared memory region. Re-insert breakpoints in
5589 the parent, and keep going. */
5590
5591 context_switch (ecs);
5592
5593 current_inferior ()->waiting_for_vfork_done = 0;
5594 current_inferior ()->pspace->breakpoints_not_allowed = 0;
5595
5596 if (handle_stop_requested (ecs))
5597 return;
5598
5599 /* This also takes care of reinserting breakpoints in the
5600 previously locked inferior. */
5601 keep_going (ecs);
5602 return;
5603
5604 case TARGET_WAITKIND_EXECD:
5605
5606 /* Note we can't read registers yet (the stop_pc), because we
5607 don't yet know the inferior's post-exec architecture.
5608 'stop_pc' is explicitly read below instead. */
5609 switch_to_thread_no_regs (ecs->event_thread);
5610
5611 /* Do whatever is necessary to the parent branch of the vfork. */
5612 handle_vfork_child_exec_or_exit (1);
5613
5614 /* This causes the eventpoints and symbol table to be reset.
5615 Must do this now, before trying to determine whether to
5616 stop. */
5617 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
5618
5619 /* In follow_exec we may have deleted the original thread and
5620 created a new one. Make sure that the event thread is the
5621 execd thread for that case (this is a nop otherwise). */
5622 ecs->event_thread = inferior_thread ();
5623
5624 ecs->event_thread->suspend.stop_pc
5625 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
5626
5627 ecs->event_thread->control.stop_bpstat
5628 = bpstat_stop_status (get_current_regcache ()->aspace (),
5629 ecs->event_thread->suspend.stop_pc,
5630 ecs->event_thread, &ecs->ws);
5631
5632 /* Note that this may be referenced from inside
5633 bpstat_stop_status above, through inferior_has_execd. */
5634 xfree (ecs->ws.value.execd_pathname);
5635 ecs->ws.value.execd_pathname = NULL;
5636
5637 if (handle_stop_requested (ecs))
5638 return;
5639
5640 /* If no catchpoint triggered for this, then keep going. */
5641 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5642 {
5643 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5644 keep_going (ecs);
5645 return;
5646 }
5647 process_event_stop_test (ecs);
5648 return;
5649
5650 /* Be careful not to try to gather much state about a thread
5651 that's in a syscall. It's frequently a losing proposition. */
5652 case TARGET_WAITKIND_SYSCALL_ENTRY:
5653 /* Getting the current syscall number. */
5654 if (handle_syscall_event (ecs) == 0)
5655 process_event_stop_test (ecs);
5656 return;
5657
5658 /* Before examining the threads further, step this thread to
5659 get it entirely out of the syscall. (We get notice of the
5660 event when the thread is just on the verge of exiting a
5661 syscall. Stepping one instruction seems to get it back
5662 into user code.) */
5663 case TARGET_WAITKIND_SYSCALL_RETURN:
5664 if (handle_syscall_event (ecs) == 0)
5665 process_event_stop_test (ecs);
5666 return;
5667
5668 case TARGET_WAITKIND_STOPPED:
5669 handle_signal_stop (ecs);
5670 return;
5671
5672 case TARGET_WAITKIND_NO_HISTORY:
5673 /* Reverse execution: target ran out of history info. */
5674
5675 /* Switch to the stopped thread. */
5676 context_switch (ecs);
5677 if (debug_infrun)
5678 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
5679
5680 delete_just_stopped_threads_single_step_breakpoints ();
5681 ecs->event_thread->suspend.stop_pc
5682 = regcache_read_pc (get_thread_regcache (inferior_thread ()));
5683
5684 if (handle_stop_requested (ecs))
5685 return;
5686
5687 gdb::observers::no_history.notify ();
5688 stop_waiting (ecs);
5689 return;
5690 }
5691 }
5692
5693 /* Restart threads back to what they were trying to do back when we
5694 paused them for an in-line step-over. The EVENT_THREAD thread is
5695 ignored. */
5696
5697 static void
5698 restart_threads (struct thread_info *event_thread)
5699 {
5700 /* In case the instruction just stepped spawned a new thread. */
5701 update_thread_list ();
5702
5703 for (thread_info *tp : all_non_exited_threads ())
5704 {
5705 switch_to_thread_no_regs (tp);
5706
5707 if (tp == event_thread)
5708 {
5709 if (debug_infrun)
5710 fprintf_unfiltered (gdb_stdlog,
5711 "infrun: restart threads: "
5712 "[%s] is event thread\n",
5713 target_pid_to_str (tp->ptid).c_str ());
5714 continue;
5715 }
5716
5717 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5718 {
5719 if (debug_infrun)
5720 fprintf_unfiltered (gdb_stdlog,
5721 "infrun: restart threads: "
5722 "[%s] not meant to be running\n",
5723 target_pid_to_str (tp->ptid).c_str ());
5724 continue;
5725 }
5726
5727 if (tp->resumed)
5728 {
5729 if (debug_infrun)
5730 fprintf_unfiltered (gdb_stdlog,
5731 "infrun: restart threads: [%s] resumed\n",
5732 target_pid_to_str (tp->ptid).c_str ());
5733 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
5734 continue;
5735 }
5736
5737 if (thread_is_in_step_over_chain (tp))
5738 {
5739 if (debug_infrun)
5740 fprintf_unfiltered (gdb_stdlog,
5741 "infrun: restart threads: "
5742 "[%s] needs step-over\n",
5743 target_pid_to_str (tp->ptid).c_str ());
5744 gdb_assert (!tp->resumed);
5745 continue;
5746 }
5747
5748
5749 if (tp->suspend.waitstatus_pending_p)
5750 {
5751 if (debug_infrun)
5752 fprintf_unfiltered (gdb_stdlog,
5753 "infrun: restart threads: "
5754 "[%s] has pending status\n",
5755 target_pid_to_str (tp->ptid).c_str ());
5756 tp->resumed = true;
5757 continue;
5758 }
5759
5760 gdb_assert (!tp->stop_requested);
5761
5762 /* If some thread needs to start a step-over at this point, it
5763 should still be in the step-over queue, and thus skipped
5764 above. */
5765 if (thread_still_needs_step_over (tp))
5766 {
5767 internal_error (__FILE__, __LINE__,
5768 "thread [%s] needs a step-over, but not in "
5769 "step-over queue\n",
5770 target_pid_to_str (tp->ptid).c_str ());
5771 }
5772
5773 if (currently_stepping (tp))
5774 {
5775 if (debug_infrun)
5776 fprintf_unfiltered (gdb_stdlog,
5777 "infrun: restart threads: [%s] was stepping\n",
5778 target_pid_to_str (tp->ptid).c_str ());
5779 keep_going_stepped_thread (tp);
5780 }
5781 else
5782 {
5783 struct execution_control_state ecss;
5784 struct execution_control_state *ecs = &ecss;
5785
5786 if (debug_infrun)
5787 fprintf_unfiltered (gdb_stdlog,
5788 "infrun: restart threads: [%s] continuing\n",
5789 target_pid_to_str (tp->ptid).c_str ());
5790 reset_ecs (ecs, tp);
5791 switch_to_thread (tp);
5792 keep_going_pass_signal (ecs);
5793 }
5794 }
5795 }
5796
5797 /* Callback for iterate_over_threads. Find a resumed thread that has
5798 a pending waitstatus. */
5799
5800 static int
5801 resumed_thread_with_pending_status (struct thread_info *tp,
5802 void *arg)
5803 {
5804 return (tp->resumed
5805 && tp->suspend.waitstatus_pending_p);
5806 }
5807
5808 /* Called when we get an event that may finish an in-line or
5809 out-of-line (displaced stepping) step-over started previously.
5810 Return true if the event is processed and we should go back to the
5811 event loop; false if the caller should continue processing the
5812 event. */
5813
5814 static int
5815 finish_step_over (struct execution_control_state *ecs)
5816 {
5817 int had_step_over_info;
5818
5819 displaced_step_fixup (ecs->event_thread,
5820 ecs->event_thread->suspend.stop_signal);
5821
5822 had_step_over_info = step_over_info_valid_p ();
5823
5824 if (had_step_over_info)
5825 {
5826 /* If we're stepping over a breakpoint with all threads locked,
5827 then only the thread that was stepped should be reporting
5828 back an event. */
5829 gdb_assert (ecs->event_thread->control.trap_expected);
5830
5831 clear_step_over_info ();
5832 }
5833
5834 if (!target_is_non_stop_p ())
5835 return 0;
5836
5837 /* Start a new step-over in another thread if there's one that
5838 needs it. */
5839 start_step_over ();
5840
5841 /* If we were stepping over a breakpoint before, and haven't started
5842 a new in-line step-over sequence, then restart all other threads
5843 (except the event thread). We can't do this in all-stop, as then
5844 e.g., we wouldn't be able to issue any other remote packet until
5845 these other threads stop. */
5846 if (had_step_over_info && !step_over_info_valid_p ())
5847 {
5848 struct thread_info *pending;
5849
5850 /* If we only have threads with pending statuses, the restart
5851 below won't restart any thread and so nothing re-inserts the
5852 breakpoint we just stepped over. But we need it inserted
5853 when we later process the pending events, otherwise if
5854 another thread has a pending event for this breakpoint too,
5855 we'd discard its event (because the breakpoint that
5856 originally caused the event was no longer inserted). */
5857 context_switch (ecs);
5858 insert_breakpoints ();
5859
5860 restart_threads (ecs->event_thread);
5861
5862 /* If we have events pending, go through handle_inferior_event
5863 again, picking up a pending event at random. This avoids
5864 thread starvation. */
5865
5866 /* But not if we just stepped over a watchpoint in order to let
5867 the instruction execute so we can evaluate its expression.
5868 The set of watchpoints that triggered is recorded in the
5869 breakpoint objects themselves (see bp->watchpoint_triggered).
5870 If we processed another event first, that other event could
5871 clobber this info. */
5872 if (ecs->event_thread->stepping_over_watchpoint)
5873 return 0;
5874
5875 pending = iterate_over_threads (resumed_thread_with_pending_status,
5876 NULL);
5877 if (pending != NULL)
5878 {
5879 struct thread_info *tp = ecs->event_thread;
5880 struct regcache *regcache;
5881
5882 if (debug_infrun)
5883 {
5884 fprintf_unfiltered (gdb_stdlog,
5885 "infrun: found resumed threads with "
5886 "pending events, saving status\n");
5887 }
5888
5889 gdb_assert (pending != tp);
5890
5891 /* Record the event thread's event for later. */
5892 save_waitstatus (tp, &ecs->ws);
5893 /* This was cleared early, by handle_inferior_event. Set it
5894 so this pending event is considered by
5895 do_target_wait. */
5896 tp->resumed = true;
5897
5898 gdb_assert (!tp->executing);
5899
5900 regcache = get_thread_regcache (tp);
5901 tp->suspend.stop_pc = regcache_read_pc (regcache);
5902
5903 if (debug_infrun)
5904 {
5905 fprintf_unfiltered (gdb_stdlog,
5906 "infrun: saved stop_pc=%s for %s "
5907 "(currently_stepping=%d)\n",
5908 paddress (target_gdbarch (),
5909 tp->suspend.stop_pc),
5910 target_pid_to_str (tp->ptid).c_str (),
5911 currently_stepping (tp));
5912 }
5913
5914 /* This in-line step-over finished; clear this so we won't
5915 start a new one. This is what handle_signal_stop would
5916 do, if we returned false. */
5917 tp->stepping_over_breakpoint = 0;
5918
5919 /* Wake up the event loop again. */
5920 mark_async_event_handler (infrun_async_inferior_event_token);
5921
5922 prepare_to_wait (ecs);
5923 return 1;
5924 }
5925 }
5926
5927 return 0;
5928 }
5929
5930 /* Come here when the program has stopped with a signal. */
5931
5932 static void
5933 handle_signal_stop (struct execution_control_state *ecs)
5934 {
5935 struct frame_info *frame;
5936 struct gdbarch *gdbarch;
5937 int stopped_by_watchpoint;
5938 enum stop_kind stop_soon;
5939 int random_signal;
5940
5941 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
5942
5943 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
5944
5945 /* Do we need to clean up the state of a thread that has
5946 completed a displaced single-step? (Doing so usually affects
5947 the PC, so do it here, before we set stop_pc.) */
5948 if (finish_step_over (ecs))
5949 return;
5950
5951 /* If we either finished a single-step or hit a breakpoint, but
5952 the user wanted this thread to be stopped, pretend we got a
5953 SIG0 (generic unsignaled stop). */
5954 if (ecs->event_thread->stop_requested
5955 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
5956 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5957
5958 ecs->event_thread->suspend.stop_pc
5959 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
5960
5961 if (debug_infrun)
5962 {
5963 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
5964 struct gdbarch *reg_gdbarch = regcache->arch ();
5965
5966 switch_to_thread (ecs->event_thread);
5967
5968 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
5969 paddress (reg_gdbarch,
5970 ecs->event_thread->suspend.stop_pc));
5971 if (target_stopped_by_watchpoint ())
5972 {
5973 CORE_ADDR addr;
5974
5975 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
5976
5977 if (target_stopped_data_address (current_top_target (), &addr))
5978 fprintf_unfiltered (gdb_stdlog,
5979 "infrun: stopped data address = %s\n",
5980 paddress (reg_gdbarch, addr));
5981 else
5982 fprintf_unfiltered (gdb_stdlog,
5983 "infrun: (no data address available)\n");
5984 }
5985 }
5986
5987 /* This is originated from start_remote(), start_inferior() and
5988 shared libraries hook functions. */
5989 stop_soon = get_inferior_stop_soon (ecs);
5990 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
5991 {
5992 context_switch (ecs);
5993 if (debug_infrun)
5994 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
5995 stop_print_frame = 1;
5996 stop_waiting (ecs);
5997 return;
5998 }
5999
6000 /* This originates from attach_command(). We need to overwrite
6001 the stop_signal here, because some kernels don't ignore a
6002 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6003 See more comments in inferior.h. On the other hand, if we
6004 get a non-SIGSTOP, report it to the user - assume the backend
6005 will handle the SIGSTOP if it should show up later.
6006
6007 Also consider that the attach is complete when we see a
6008 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6009 target extended-remote report it instead of a SIGSTOP
6010 (e.g. gdbserver). We already rely on SIGTRAP being our
6011 signal, so this is no exception.
6012
6013 Also consider that the attach is complete when we see a
6014 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6015 the target to stop all threads of the inferior, in case the
6016 low level attach operation doesn't stop them implicitly. If
6017 they weren't stopped implicitly, then the stub will report a
6018 GDB_SIGNAL_0, meaning: stopped for no particular reason
6019 other than GDB's request. */
6020 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6021 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
6022 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6023 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
6024 {
6025 stop_print_frame = 1;
6026 stop_waiting (ecs);
6027 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6028 return;
6029 }
6030
6031 /* See if something interesting happened to the non-current thread. If
6032 so, then switch to that thread. */
6033 if (ecs->ptid != inferior_ptid)
6034 {
6035 if (debug_infrun)
6036 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
6037
6038 context_switch (ecs);
6039
6040 if (deprecated_context_hook)
6041 deprecated_context_hook (ecs->event_thread->global_num);
6042 }
6043
6044 /* At this point, get hold of the now-current thread's frame. */
6045 frame = get_current_frame ();
6046 gdbarch = get_frame_arch (frame);
6047
6048 /* Pull the single step breakpoints out of the target. */
6049 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
6050 {
6051 struct regcache *regcache;
6052 CORE_ADDR pc;
6053
6054 regcache = get_thread_regcache (ecs->event_thread);
6055 const address_space *aspace = regcache->aspace ();
6056
6057 pc = regcache_read_pc (regcache);
6058
6059 /* However, before doing so, if this single-step breakpoint was
6060 actually for another thread, set this thread up for moving
6061 past it. */
6062 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
6063 aspace, pc))
6064 {
6065 if (single_step_breakpoint_inserted_here_p (aspace, pc))
6066 {
6067 if (debug_infrun)
6068 {
6069 fprintf_unfiltered (gdb_stdlog,
6070 "infrun: [%s] hit another thread's "
6071 "single-step breakpoint\n",
6072 target_pid_to_str (ecs->ptid).c_str ());
6073 }
6074 ecs->hit_singlestep_breakpoint = 1;
6075 }
6076 }
6077 else
6078 {
6079 if (debug_infrun)
6080 {
6081 fprintf_unfiltered (gdb_stdlog,
6082 "infrun: [%s] hit its "
6083 "single-step breakpoint\n",
6084 target_pid_to_str (ecs->ptid).c_str ());
6085 }
6086 }
6087 }
6088 delete_just_stopped_threads_single_step_breakpoints ();
6089
6090 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6091 && ecs->event_thread->control.trap_expected
6092 && ecs->event_thread->stepping_over_watchpoint)
6093 stopped_by_watchpoint = 0;
6094 else
6095 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
6096
6097 /* If necessary, step over this watchpoint. We'll be back to display
6098 it in a moment. */
6099 if (stopped_by_watchpoint
6100 && (target_have_steppable_watchpoint
6101 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
6102 {
6103 /* At this point, we are stopped at an instruction which has
6104 attempted to write to a piece of memory under control of
6105 a watchpoint. The instruction hasn't actually executed
6106 yet. If we were to evaluate the watchpoint expression
6107 now, we would get the old value, and therefore no change
6108 would seem to have occurred.
6109
6110 In order to make watchpoints work `right', we really need
6111 to complete the memory write, and then evaluate the
6112 watchpoint expression. We do this by single-stepping the
6113 target.
6114
6115 It may not be necessary to disable the watchpoint to step over
6116 it. For example, the PA can (with some kernel cooperation)
6117 single step over a watchpoint without disabling the watchpoint.
6118
6119 It is far more common to need to disable a watchpoint to step
6120 the inferior over it. If we have non-steppable watchpoints,
6121 we must disable the current watchpoint; it's simplest to
6122 disable all watchpoints.
6123
6124 Any breakpoint at PC must also be stepped over -- if there's
6125 one, it will have already triggered before the watchpoint
6126 triggered, and we either already reported it to the user, or
6127 it didn't cause a stop and we called keep_going. In either
6128 case, if there was a breakpoint at PC, we must be trying to
6129 step past it. */
6130 ecs->event_thread->stepping_over_watchpoint = 1;
6131 keep_going (ecs);
6132 return;
6133 }
6134
6135 ecs->event_thread->stepping_over_breakpoint = 0;
6136 ecs->event_thread->stepping_over_watchpoint = 0;
6137 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
6138 ecs->event_thread->control.stop_step = 0;
6139 stop_print_frame = 1;
6140 stopped_by_random_signal = 0;
6141 bpstat stop_chain = NULL;
6142
6143 /* Hide inlined functions starting here, unless we just performed stepi or
6144 nexti. After stepi and nexti, always show the innermost frame (not any
6145 inline function call sites). */
6146 if (ecs->event_thread->control.step_range_end != 1)
6147 {
6148 const address_space *aspace
6149 = get_thread_regcache (ecs->event_thread)->aspace ();
6150
6151 /* skip_inline_frames is expensive, so we avoid it if we can
6152 determine that the address is one where functions cannot have
6153 been inlined. This improves performance with inferiors that
6154 load a lot of shared libraries, because the solib event
6155 breakpoint is defined as the address of a function (i.e. not
6156 inline). Note that we have to check the previous PC as well
6157 as the current one to catch cases when we have just
6158 single-stepped off a breakpoint prior to reinstating it.
6159 Note that we're assuming that the code we single-step to is
6160 not inline, but that's not definitive: there's nothing
6161 preventing the event breakpoint function from containing
6162 inlined code, and the single-step ending up there. If the
6163 user had set a breakpoint on that inlined code, the missing
6164 skip_inline_frames call would break things. Fortunately
6165 that's an extremely unlikely scenario. */
6166 if (!pc_at_non_inline_function (aspace,
6167 ecs->event_thread->suspend.stop_pc,
6168 &ecs->ws)
6169 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6170 && ecs->event_thread->control.trap_expected
6171 && pc_at_non_inline_function (aspace,
6172 ecs->event_thread->prev_pc,
6173 &ecs->ws)))
6174 {
6175 stop_chain = build_bpstat_chain (aspace,
6176 ecs->event_thread->suspend.stop_pc,
6177 &ecs->ws);
6178 skip_inline_frames (ecs->event_thread, stop_chain);
6179
6180 /* Re-fetch current thread's frame in case that invalidated
6181 the frame cache. */
6182 frame = get_current_frame ();
6183 gdbarch = get_frame_arch (frame);
6184 }
6185 }
6186
6187 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6188 && ecs->event_thread->control.trap_expected
6189 && gdbarch_single_step_through_delay_p (gdbarch)
6190 && currently_stepping (ecs->event_thread))
6191 {
6192 /* We're trying to step off a breakpoint. Turns out that we're
6193 also on an instruction that needs to be stepped multiple
6194 times before it's been fully executing. E.g., architectures
6195 with a delay slot. It needs to be stepped twice, once for
6196 the instruction and once for the delay slot. */
6197 int step_through_delay
6198 = gdbarch_single_step_through_delay (gdbarch, frame);
6199
6200 if (debug_infrun && step_through_delay)
6201 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
6202 if (ecs->event_thread->control.step_range_end == 0
6203 && step_through_delay)
6204 {
6205 /* The user issued a continue when stopped at a breakpoint.
6206 Set up for another trap and get out of here. */
6207 ecs->event_thread->stepping_over_breakpoint = 1;
6208 keep_going (ecs);
6209 return;
6210 }
6211 else if (step_through_delay)
6212 {
6213 /* The user issued a step when stopped at a breakpoint.
6214 Maybe we should stop, maybe we should not - the delay
6215 slot *might* correspond to a line of source. In any
6216 case, don't decide that here, just set
6217 ecs->stepping_over_breakpoint, making sure we
6218 single-step again before breakpoints are re-inserted. */
6219 ecs->event_thread->stepping_over_breakpoint = 1;
6220 }
6221 }
6222
6223 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
6224 handles this event. */
6225 ecs->event_thread->control.stop_bpstat
6226 = bpstat_stop_status (get_current_regcache ()->aspace (),
6227 ecs->event_thread->suspend.stop_pc,
6228 ecs->event_thread, &ecs->ws, stop_chain);
6229
6230 /* Following in case break condition called a
6231 function. */
6232 stop_print_frame = 1;
6233
6234 /* This is where we handle "moribund" watchpoints. Unlike
6235 software breakpoints traps, hardware watchpoint traps are
6236 always distinguishable from random traps. If no high-level
6237 watchpoint is associated with the reported stop data address
6238 anymore, then the bpstat does not explain the signal ---
6239 simply make sure to ignore it if `stopped_by_watchpoint' is
6240 set. */
6241
6242 if (debug_infrun
6243 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6244 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6245 GDB_SIGNAL_TRAP)
6246 && stopped_by_watchpoint)
6247 fprintf_unfiltered (gdb_stdlog,
6248 "infrun: no user watchpoint explains "
6249 "watchpoint SIGTRAP, ignoring\n");
6250
6251 /* NOTE: cagney/2003-03-29: These checks for a random signal
6252 at one stage in the past included checks for an inferior
6253 function call's call dummy's return breakpoint. The original
6254 comment, that went with the test, read:
6255
6256 ``End of a stack dummy. Some systems (e.g. Sony news) give
6257 another signal besides SIGTRAP, so check here as well as
6258 above.''
6259
6260 If someone ever tries to get call dummys on a
6261 non-executable stack to work (where the target would stop
6262 with something like a SIGSEGV), then those tests might need
6263 to be re-instated. Given, however, that the tests were only
6264 enabled when momentary breakpoints were not being used, I
6265 suspect that it won't be the case.
6266
6267 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6268 be necessary for call dummies on a non-executable stack on
6269 SPARC. */
6270
6271 /* See if the breakpoints module can explain the signal. */
6272 random_signal
6273 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6274 ecs->event_thread->suspend.stop_signal);
6275
6276 /* Maybe this was a trap for a software breakpoint that has since
6277 been removed. */
6278 if (random_signal && target_stopped_by_sw_breakpoint ())
6279 {
6280 if (gdbarch_program_breakpoint_here_p (gdbarch,
6281 ecs->event_thread->suspend.stop_pc))
6282 {
6283 struct regcache *regcache;
6284 int decr_pc;
6285
6286 /* Re-adjust PC to what the program would see if GDB was not
6287 debugging it. */
6288 regcache = get_thread_regcache (ecs->event_thread);
6289 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
6290 if (decr_pc != 0)
6291 {
6292 gdb::optional<scoped_restore_tmpl<int>>
6293 restore_operation_disable;
6294
6295 if (record_full_is_used ())
6296 restore_operation_disable.emplace
6297 (record_full_gdb_operation_disable_set ());
6298
6299 regcache_write_pc (regcache,
6300 ecs->event_thread->suspend.stop_pc + decr_pc);
6301 }
6302 }
6303 else
6304 {
6305 /* A delayed software breakpoint event. Ignore the trap. */
6306 if (debug_infrun)
6307 fprintf_unfiltered (gdb_stdlog,
6308 "infrun: delayed software breakpoint "
6309 "trap, ignoring\n");
6310 random_signal = 0;
6311 }
6312 }
6313
6314 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6315 has since been removed. */
6316 if (random_signal && target_stopped_by_hw_breakpoint ())
6317 {
6318 /* A delayed hardware breakpoint event. Ignore the trap. */
6319 if (debug_infrun)
6320 fprintf_unfiltered (gdb_stdlog,
6321 "infrun: delayed hardware breakpoint/watchpoint "
6322 "trap, ignoring\n");
6323 random_signal = 0;
6324 }
6325
6326 /* If not, perhaps stepping/nexting can. */
6327 if (random_signal)
6328 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6329 && currently_stepping (ecs->event_thread));
6330
6331 /* Perhaps the thread hit a single-step breakpoint of _another_
6332 thread. Single-step breakpoints are transparent to the
6333 breakpoints module. */
6334 if (random_signal)
6335 random_signal = !ecs->hit_singlestep_breakpoint;
6336
6337 /* No? Perhaps we got a moribund watchpoint. */
6338 if (random_signal)
6339 random_signal = !stopped_by_watchpoint;
6340
6341 /* Always stop if the user explicitly requested this thread to
6342 remain stopped. */
6343 if (ecs->event_thread->stop_requested)
6344 {
6345 random_signal = 1;
6346 if (debug_infrun)
6347 fprintf_unfiltered (gdb_stdlog, "infrun: user-requested stop\n");
6348 }
6349
6350 /* For the program's own signals, act according to
6351 the signal handling tables. */
6352
6353 if (random_signal)
6354 {
6355 /* Signal not for debugging purposes. */
6356 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
6357 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
6358
6359 if (debug_infrun)
6360 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
6361 gdb_signal_to_symbol_string (stop_signal));
6362
6363 stopped_by_random_signal = 1;
6364
6365 /* Always stop on signals if we're either just gaining control
6366 of the program, or the user explicitly requested this thread
6367 to remain stopped. */
6368 if (stop_soon != NO_STOP_QUIETLY
6369 || ecs->event_thread->stop_requested
6370 || (!inf->detaching
6371 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
6372 {
6373 stop_waiting (ecs);
6374 return;
6375 }
6376
6377 /* Notify observers the signal has "handle print" set. Note we
6378 returned early above if stopping; normal_stop handles the
6379 printing in that case. */
6380 if (signal_print[ecs->event_thread->suspend.stop_signal])
6381 {
6382 /* The signal table tells us to print about this signal. */
6383 target_terminal::ours_for_output ();
6384 gdb::observers::signal_received.notify (ecs->event_thread->suspend.stop_signal);
6385 target_terminal::inferior ();
6386 }
6387
6388 /* Clear the signal if it should not be passed. */
6389 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
6390 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6391
6392 if (ecs->event_thread->prev_pc == ecs->event_thread->suspend.stop_pc
6393 && ecs->event_thread->control.trap_expected
6394 && ecs->event_thread->control.step_resume_breakpoint == NULL)
6395 {
6396 /* We were just starting a new sequence, attempting to
6397 single-step off of a breakpoint and expecting a SIGTRAP.
6398 Instead this signal arrives. This signal will take us out
6399 of the stepping range so GDB needs to remember to, when
6400 the signal handler returns, resume stepping off that
6401 breakpoint. */
6402 /* To simplify things, "continue" is forced to use the same
6403 code paths as single-step - set a breakpoint at the
6404 signal return address and then, once hit, step off that
6405 breakpoint. */
6406 if (debug_infrun)
6407 fprintf_unfiltered (gdb_stdlog,
6408 "infrun: signal arrived while stepping over "
6409 "breakpoint\n");
6410
6411 insert_hp_step_resume_breakpoint_at_frame (frame);
6412 ecs->event_thread->step_after_step_resume_breakpoint = 1;
6413 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6414 ecs->event_thread->control.trap_expected = 0;
6415
6416 /* If we were nexting/stepping some other thread, switch to
6417 it, so that we don't continue it, losing control. */
6418 if (!switch_back_to_stepped_thread (ecs))
6419 keep_going (ecs);
6420 return;
6421 }
6422
6423 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
6424 && (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6425 ecs->event_thread)
6426 || ecs->event_thread->control.step_range_end == 1)
6427 && frame_id_eq (get_stack_frame_id (frame),
6428 ecs->event_thread->control.step_stack_frame_id)
6429 && ecs->event_thread->control.step_resume_breakpoint == NULL)
6430 {
6431 /* The inferior is about to take a signal that will take it
6432 out of the single step range. Set a breakpoint at the
6433 current PC (which is presumably where the signal handler
6434 will eventually return) and then allow the inferior to
6435 run free.
6436
6437 Note that this is only needed for a signal delivered
6438 while in the single-step range. Nested signals aren't a
6439 problem as they eventually all return. */
6440 if (debug_infrun)
6441 fprintf_unfiltered (gdb_stdlog,
6442 "infrun: signal may take us out of "
6443 "single-step range\n");
6444
6445 clear_step_over_info ();
6446 insert_hp_step_resume_breakpoint_at_frame (frame);
6447 ecs->event_thread->step_after_step_resume_breakpoint = 1;
6448 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6449 ecs->event_thread->control.trap_expected = 0;
6450 keep_going (ecs);
6451 return;
6452 }
6453
6454 /* Note: step_resume_breakpoint may be non-NULL. This occurs
6455 when either there's a nested signal, or when there's a
6456 pending signal enabled just as the signal handler returns
6457 (leaving the inferior at the step-resume-breakpoint without
6458 actually executing it). Either way continue until the
6459 breakpoint is really hit. */
6460
6461 if (!switch_back_to_stepped_thread (ecs))
6462 {
6463 if (debug_infrun)
6464 fprintf_unfiltered (gdb_stdlog,
6465 "infrun: random signal, keep going\n");
6466
6467 keep_going (ecs);
6468 }
6469 return;
6470 }
6471
6472 process_event_stop_test (ecs);
6473 }
6474
6475 /* Come here when we've got some debug event / signal we can explain
6476 (IOW, not a random signal), and test whether it should cause a
6477 stop, or whether we should resume the inferior (transparently).
6478 E.g., could be a breakpoint whose condition evaluates false; we
6479 could be still stepping within the line; etc. */
6480
6481 static void
6482 process_event_stop_test (struct execution_control_state *ecs)
6483 {
6484 struct symtab_and_line stop_pc_sal;
6485 struct frame_info *frame;
6486 struct gdbarch *gdbarch;
6487 CORE_ADDR jmp_buf_pc;
6488 struct bpstat_what what;
6489
6490 /* Handle cases caused by hitting a breakpoint. */
6491
6492 frame = get_current_frame ();
6493 gdbarch = get_frame_arch (frame);
6494
6495 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
6496
6497 if (what.call_dummy)
6498 {
6499 stop_stack_dummy = what.call_dummy;
6500 }
6501
6502 /* A few breakpoint types have callbacks associated (e.g.,
6503 bp_jit_event). Run them now. */
6504 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
6505
6506 /* If we hit an internal event that triggers symbol changes, the
6507 current frame will be invalidated within bpstat_what (e.g., if we
6508 hit an internal solib event). Re-fetch it. */
6509 frame = get_current_frame ();
6510 gdbarch = get_frame_arch (frame);
6511
6512 switch (what.main_action)
6513 {
6514 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
6515 /* If we hit the breakpoint at longjmp while stepping, we
6516 install a momentary breakpoint at the target of the
6517 jmp_buf. */
6518
6519 if (debug_infrun)
6520 fprintf_unfiltered (gdb_stdlog,
6521 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
6522
6523 ecs->event_thread->stepping_over_breakpoint = 1;
6524
6525 if (what.is_longjmp)
6526 {
6527 struct value *arg_value;
6528
6529 /* If we set the longjmp breakpoint via a SystemTap probe,
6530 then use it to extract the arguments. The destination PC
6531 is the third argument to the probe. */
6532 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6533 if (arg_value)
6534 {
6535 jmp_buf_pc = value_as_address (arg_value);
6536 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6537 }
6538 else if (!gdbarch_get_longjmp_target_p (gdbarch)
6539 || !gdbarch_get_longjmp_target (gdbarch,
6540 frame, &jmp_buf_pc))
6541 {
6542 if (debug_infrun)
6543 fprintf_unfiltered (gdb_stdlog,
6544 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
6545 "(!gdbarch_get_longjmp_target)\n");
6546 keep_going (ecs);
6547 return;
6548 }
6549
6550 /* Insert a breakpoint at resume address. */
6551 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
6552 }
6553 else
6554 check_exception_resume (ecs, frame);
6555 keep_going (ecs);
6556 return;
6557
6558 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
6559 {
6560 struct frame_info *init_frame;
6561
6562 /* There are several cases to consider.
6563
6564 1. The initiating frame no longer exists. In this case we
6565 must stop, because the exception or longjmp has gone too
6566 far.
6567
6568 2. The initiating frame exists, and is the same as the
6569 current frame. We stop, because the exception or longjmp
6570 has been caught.
6571
6572 3. The initiating frame exists and is different from the
6573 current frame. This means the exception or longjmp has
6574 been caught beneath the initiating frame, so keep going.
6575
6576 4. longjmp breakpoint has been placed just to protect
6577 against stale dummy frames and user is not interested in
6578 stopping around longjmps. */
6579
6580 if (debug_infrun)
6581 fprintf_unfiltered (gdb_stdlog,
6582 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
6583
6584 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
6585 != NULL);
6586 delete_exception_resume_breakpoint (ecs->event_thread);
6587
6588 if (what.is_longjmp)
6589 {
6590 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
6591
6592 if (!frame_id_p (ecs->event_thread->initiating_frame))
6593 {
6594 /* Case 4. */
6595 keep_going (ecs);
6596 return;
6597 }
6598 }
6599
6600 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
6601
6602 if (init_frame)
6603 {
6604 struct frame_id current_id
6605 = get_frame_id (get_current_frame ());
6606 if (frame_id_eq (current_id,
6607 ecs->event_thread->initiating_frame))
6608 {
6609 /* Case 2. Fall through. */
6610 }
6611 else
6612 {
6613 /* Case 3. */
6614 keep_going (ecs);
6615 return;
6616 }
6617 }
6618
6619 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6620 exists. */
6621 delete_step_resume_breakpoint (ecs->event_thread);
6622
6623 end_stepping_range (ecs);
6624 }
6625 return;
6626
6627 case BPSTAT_WHAT_SINGLE:
6628 if (debug_infrun)
6629 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
6630 ecs->event_thread->stepping_over_breakpoint = 1;
6631 /* Still need to check other stuff, at least the case where we
6632 are stepping and step out of the right range. */
6633 break;
6634
6635 case BPSTAT_WHAT_STEP_RESUME:
6636 if (debug_infrun)
6637 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
6638
6639 delete_step_resume_breakpoint (ecs->event_thread);
6640 if (ecs->event_thread->control.proceed_to_finish
6641 && execution_direction == EXEC_REVERSE)
6642 {
6643 struct thread_info *tp = ecs->event_thread;
6644
6645 /* We are finishing a function in reverse, and just hit the
6646 step-resume breakpoint at the start address of the
6647 function, and we're almost there -- just need to back up
6648 by one more single-step, which should take us back to the
6649 function call. */
6650 tp->control.step_range_start = tp->control.step_range_end = 1;
6651 keep_going (ecs);
6652 return;
6653 }
6654 fill_in_stop_func (gdbarch, ecs);
6655 if (ecs->event_thread->suspend.stop_pc == ecs->stop_func_start
6656 && execution_direction == EXEC_REVERSE)
6657 {
6658 /* We are stepping over a function call in reverse, and just
6659 hit the step-resume breakpoint at the start address of
6660 the function. Go back to single-stepping, which should
6661 take us back to the function call. */
6662 ecs->event_thread->stepping_over_breakpoint = 1;
6663 keep_going (ecs);
6664 return;
6665 }
6666 break;
6667
6668 case BPSTAT_WHAT_STOP_NOISY:
6669 if (debug_infrun)
6670 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
6671 stop_print_frame = 1;
6672
6673 /* Assume the thread stopped for a breakpoint. We'll still check
6674 whether a/the breakpoint is there when the thread is next
6675 resumed. */
6676 ecs->event_thread->stepping_over_breakpoint = 1;
6677
6678 stop_waiting (ecs);
6679 return;
6680
6681 case BPSTAT_WHAT_STOP_SILENT:
6682 if (debug_infrun)
6683 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
6684 stop_print_frame = 0;
6685
6686 /* Assume the thread stopped for a breakpoint. We'll still check
6687 whether a/the breakpoint is there when the thread is next
6688 resumed. */
6689 ecs->event_thread->stepping_over_breakpoint = 1;
6690 stop_waiting (ecs);
6691 return;
6692
6693 case BPSTAT_WHAT_HP_STEP_RESUME:
6694 if (debug_infrun)
6695 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
6696
6697 delete_step_resume_breakpoint (ecs->event_thread);
6698 if (ecs->event_thread->step_after_step_resume_breakpoint)
6699 {
6700 /* Back when the step-resume breakpoint was inserted, we
6701 were trying to single-step off a breakpoint. Go back to
6702 doing that. */
6703 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6704 ecs->event_thread->stepping_over_breakpoint = 1;
6705 keep_going (ecs);
6706 return;
6707 }
6708 break;
6709
6710 case BPSTAT_WHAT_KEEP_CHECKING:
6711 break;
6712 }
6713
6714 /* If we stepped a permanent breakpoint and we had a high priority
6715 step-resume breakpoint for the address we stepped, but we didn't
6716 hit it, then we must have stepped into the signal handler. The
6717 step-resume was only necessary to catch the case of _not_
6718 stepping into the handler, so delete it, and fall through to
6719 checking whether the step finished. */
6720 if (ecs->event_thread->stepped_breakpoint)
6721 {
6722 struct breakpoint *sr_bp
6723 = ecs->event_thread->control.step_resume_breakpoint;
6724
6725 if (sr_bp != NULL
6726 && sr_bp->loc->permanent
6727 && sr_bp->type == bp_hp_step_resume
6728 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6729 {
6730 if (debug_infrun)
6731 fprintf_unfiltered (gdb_stdlog,
6732 "infrun: stepped permanent breakpoint, stopped in "
6733 "handler\n");
6734 delete_step_resume_breakpoint (ecs->event_thread);
6735 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6736 }
6737 }
6738
6739 /* We come here if we hit a breakpoint but should not stop for it.
6740 Possibly we also were stepping and should stop for that. So fall
6741 through and test for stepping. But, if not stepping, do not
6742 stop. */
6743
6744 /* In all-stop mode, if we're currently stepping but have stopped in
6745 some other thread, we need to switch back to the stepped thread. */
6746 if (switch_back_to_stepped_thread (ecs))
6747 return;
6748
6749 if (ecs->event_thread->control.step_resume_breakpoint)
6750 {
6751 if (debug_infrun)
6752 fprintf_unfiltered (gdb_stdlog,
6753 "infrun: step-resume breakpoint is inserted\n");
6754
6755 /* Having a step-resume breakpoint overrides anything
6756 else having to do with stepping commands until
6757 that breakpoint is reached. */
6758 keep_going (ecs);
6759 return;
6760 }
6761
6762 if (ecs->event_thread->control.step_range_end == 0)
6763 {
6764 if (debug_infrun)
6765 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
6766 /* Likewise if we aren't even stepping. */
6767 keep_going (ecs);
6768 return;
6769 }
6770
6771 /* Re-fetch current thread's frame in case the code above caused
6772 the frame cache to be re-initialized, making our FRAME variable
6773 a dangling pointer. */
6774 frame = get_current_frame ();
6775 gdbarch = get_frame_arch (frame);
6776 fill_in_stop_func (gdbarch, ecs);
6777
6778 /* If stepping through a line, keep going if still within it.
6779
6780 Note that step_range_end is the address of the first instruction
6781 beyond the step range, and NOT the address of the last instruction
6782 within it!
6783
6784 Note also that during reverse execution, we may be stepping
6785 through a function epilogue and therefore must detect when
6786 the current-frame changes in the middle of a line. */
6787
6788 if (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6789 ecs->event_thread)
6790 && (execution_direction != EXEC_REVERSE
6791 || frame_id_eq (get_frame_id (frame),
6792 ecs->event_thread->control.step_frame_id)))
6793 {
6794 if (debug_infrun)
6795 fprintf_unfiltered
6796 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
6797 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6798 paddress (gdbarch, ecs->event_thread->control.step_range_end));
6799
6800 /* Tentatively re-enable range stepping; `resume' disables it if
6801 necessary (e.g., if we're stepping over a breakpoint or we
6802 have software watchpoints). */
6803 ecs->event_thread->control.may_range_step = 1;
6804
6805 /* When stepping backward, stop at beginning of line range
6806 (unless it's the function entry point, in which case
6807 keep going back to the call point). */
6808 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6809 if (stop_pc == ecs->event_thread->control.step_range_start
6810 && stop_pc != ecs->stop_func_start
6811 && execution_direction == EXEC_REVERSE)
6812 end_stepping_range (ecs);
6813 else
6814 keep_going (ecs);
6815
6816 return;
6817 }
6818
6819 /* We stepped out of the stepping range. */
6820
6821 /* If we are stepping at the source level and entered the runtime
6822 loader dynamic symbol resolution code...
6823
6824 EXEC_FORWARD: we keep on single stepping until we exit the run
6825 time loader code and reach the callee's address.
6826
6827 EXEC_REVERSE: we've already executed the callee (backward), and
6828 the runtime loader code is handled just like any other
6829 undebuggable function call. Now we need only keep stepping
6830 backward through the trampoline code, and that's handled further
6831 down, so there is nothing for us to do here. */
6832
6833 if (execution_direction != EXEC_REVERSE
6834 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6835 && in_solib_dynsym_resolve_code (ecs->event_thread->suspend.stop_pc))
6836 {
6837 CORE_ADDR pc_after_resolver =
6838 gdbarch_skip_solib_resolver (gdbarch,
6839 ecs->event_thread->suspend.stop_pc);
6840
6841 if (debug_infrun)
6842 fprintf_unfiltered (gdb_stdlog,
6843 "infrun: stepped into dynsym resolve code\n");
6844
6845 if (pc_after_resolver)
6846 {
6847 /* Set up a step-resume breakpoint at the address
6848 indicated by SKIP_SOLIB_RESOLVER. */
6849 symtab_and_line sr_sal;
6850 sr_sal.pc = pc_after_resolver;
6851 sr_sal.pspace = get_frame_program_space (frame);
6852
6853 insert_step_resume_breakpoint_at_sal (gdbarch,
6854 sr_sal, null_frame_id);
6855 }
6856
6857 keep_going (ecs);
6858 return;
6859 }
6860
6861 /* Step through an indirect branch thunk. */
6862 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
6863 && gdbarch_in_indirect_branch_thunk (gdbarch,
6864 ecs->event_thread->suspend.stop_pc))
6865 {
6866 if (debug_infrun)
6867 fprintf_unfiltered (gdb_stdlog,
6868 "infrun: stepped into indirect branch thunk\n");
6869 keep_going (ecs);
6870 return;
6871 }
6872
6873 if (ecs->event_thread->control.step_range_end != 1
6874 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6875 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
6876 && get_frame_type (frame) == SIGTRAMP_FRAME)
6877 {
6878 if (debug_infrun)
6879 fprintf_unfiltered (gdb_stdlog,
6880 "infrun: stepped into signal trampoline\n");
6881 /* The inferior, while doing a "step" or "next", has ended up in
6882 a signal trampoline (either by a signal being delivered or by
6883 the signal handler returning). Just single-step until the
6884 inferior leaves the trampoline (either by calling the handler
6885 or returning). */
6886 keep_going (ecs);
6887 return;
6888 }
6889
6890 /* If we're in the return path from a shared library trampoline,
6891 we want to proceed through the trampoline when stepping. */
6892 /* macro/2012-04-25: This needs to come before the subroutine
6893 call check below as on some targets return trampolines look
6894 like subroutine calls (MIPS16 return thunks). */
6895 if (gdbarch_in_solib_return_trampoline (gdbarch,
6896 ecs->event_thread->suspend.stop_pc,
6897 ecs->stop_func_name)
6898 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
6899 {
6900 /* Determine where this trampoline returns. */
6901 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6902 CORE_ADDR real_stop_pc
6903 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
6904
6905 if (debug_infrun)
6906 fprintf_unfiltered (gdb_stdlog,
6907 "infrun: stepped into solib return tramp\n");
6908
6909 /* Only proceed through if we know where it's going. */
6910 if (real_stop_pc)
6911 {
6912 /* And put the step-breakpoint there and go until there. */
6913 symtab_and_line sr_sal;
6914 sr_sal.pc = real_stop_pc;
6915 sr_sal.section = find_pc_overlay (sr_sal.pc);
6916 sr_sal.pspace = get_frame_program_space (frame);
6917
6918 /* Do not specify what the fp should be when we stop since
6919 on some machines the prologue is where the new fp value
6920 is established. */
6921 insert_step_resume_breakpoint_at_sal (gdbarch,
6922 sr_sal, null_frame_id);
6923
6924 /* Restart without fiddling with the step ranges or
6925 other state. */
6926 keep_going (ecs);
6927 return;
6928 }
6929 }
6930
6931 /* Check for subroutine calls. The check for the current frame
6932 equalling the step ID is not necessary - the check of the
6933 previous frame's ID is sufficient - but it is a common case and
6934 cheaper than checking the previous frame's ID.
6935
6936 NOTE: frame_id_eq will never report two invalid frame IDs as
6937 being equal, so to get into this block, both the current and
6938 previous frame must have valid frame IDs. */
6939 /* The outer_frame_id check is a heuristic to detect stepping
6940 through startup code. If we step over an instruction which
6941 sets the stack pointer from an invalid value to a valid value,
6942 we may detect that as a subroutine call from the mythical
6943 "outermost" function. This could be fixed by marking
6944 outermost frames as !stack_p,code_p,special_p. Then the
6945 initial outermost frame, before sp was valid, would
6946 have code_addr == &_start. See the comment in frame_id_eq
6947 for more. */
6948 if (!frame_id_eq (get_stack_frame_id (frame),
6949 ecs->event_thread->control.step_stack_frame_id)
6950 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
6951 ecs->event_thread->control.step_stack_frame_id)
6952 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
6953 outer_frame_id)
6954 || (ecs->event_thread->control.step_start_function
6955 != find_pc_function (ecs->event_thread->suspend.stop_pc)))))
6956 {
6957 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6958 CORE_ADDR real_stop_pc;
6959
6960 if (debug_infrun)
6961 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
6962
6963 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
6964 {
6965 /* I presume that step_over_calls is only 0 when we're
6966 supposed to be stepping at the assembly language level
6967 ("stepi"). Just stop. */
6968 /* And this works the same backward as frontward. MVS */
6969 end_stepping_range (ecs);
6970 return;
6971 }
6972
6973 /* Reverse stepping through solib trampolines. */
6974
6975 if (execution_direction == EXEC_REVERSE
6976 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
6977 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6978 || (ecs->stop_func_start == 0
6979 && in_solib_dynsym_resolve_code (stop_pc))))
6980 {
6981 /* Any solib trampoline code can be handled in reverse
6982 by simply continuing to single-step. We have already
6983 executed the solib function (backwards), and a few
6984 steps will take us back through the trampoline to the
6985 caller. */
6986 keep_going (ecs);
6987 return;
6988 }
6989
6990 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
6991 {
6992 /* We're doing a "next".
6993
6994 Normal (forward) execution: set a breakpoint at the
6995 callee's return address (the address at which the caller
6996 will resume).
6997
6998 Reverse (backward) execution. set the step-resume
6999 breakpoint at the start of the function that we just
7000 stepped into (backwards), and continue to there. When we
7001 get there, we'll need to single-step back to the caller. */
7002
7003 if (execution_direction == EXEC_REVERSE)
7004 {
7005 /* If we're already at the start of the function, we've either
7006 just stepped backward into a single instruction function,
7007 or stepped back out of a signal handler to the first instruction
7008 of the function. Just keep going, which will single-step back
7009 to the caller. */
7010 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
7011 {
7012 /* Normal function call return (static or dynamic). */
7013 symtab_and_line sr_sal;
7014 sr_sal.pc = ecs->stop_func_start;
7015 sr_sal.pspace = get_frame_program_space (frame);
7016 insert_step_resume_breakpoint_at_sal (gdbarch,
7017 sr_sal, null_frame_id);
7018 }
7019 }
7020 else
7021 insert_step_resume_breakpoint_at_caller (frame);
7022
7023 keep_going (ecs);
7024 return;
7025 }
7026
7027 /* If we are in a function call trampoline (a stub between the
7028 calling routine and the real function), locate the real
7029 function. That's what tells us (a) whether we want to step
7030 into it at all, and (b) what prologue we want to run to the
7031 end of, if we do step into it. */
7032 real_stop_pc = skip_language_trampoline (frame, stop_pc);
7033 if (real_stop_pc == 0)
7034 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7035 if (real_stop_pc != 0)
7036 ecs->stop_func_start = real_stop_pc;
7037
7038 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
7039 {
7040 symtab_and_line sr_sal;
7041 sr_sal.pc = ecs->stop_func_start;
7042 sr_sal.pspace = get_frame_program_space (frame);
7043
7044 insert_step_resume_breakpoint_at_sal (gdbarch,
7045 sr_sal, null_frame_id);
7046 keep_going (ecs);
7047 return;
7048 }
7049
7050 /* If we have line number information for the function we are
7051 thinking of stepping into and the function isn't on the skip
7052 list, step into it.
7053
7054 If there are several symtabs at that PC (e.g. with include
7055 files), just want to know whether *any* of them have line
7056 numbers. find_pc_line handles this. */
7057 {
7058 struct symtab_and_line tmp_sal;
7059
7060 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
7061 if (tmp_sal.line != 0
7062 && !function_name_is_marked_for_skip (ecs->stop_func_name,
7063 tmp_sal)
7064 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
7065 {
7066 if (execution_direction == EXEC_REVERSE)
7067 handle_step_into_function_backward (gdbarch, ecs);
7068 else
7069 handle_step_into_function (gdbarch, ecs);
7070 return;
7071 }
7072 }
7073
7074 /* If we have no line number and the step-stop-if-no-debug is
7075 set, we stop the step so that the user has a chance to switch
7076 in assembly mode. */
7077 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7078 && step_stop_if_no_debug)
7079 {
7080 end_stepping_range (ecs);
7081 return;
7082 }
7083
7084 if (execution_direction == EXEC_REVERSE)
7085 {
7086 /* If we're already at the start of the function, we've either just
7087 stepped backward into a single instruction function without line
7088 number info, or stepped back out of a signal handler to the first
7089 instruction of the function without line number info. Just keep
7090 going, which will single-step back to the caller. */
7091 if (ecs->stop_func_start != stop_pc)
7092 {
7093 /* Set a breakpoint at callee's start address.
7094 From there we can step once and be back in the caller. */
7095 symtab_and_line sr_sal;
7096 sr_sal.pc = ecs->stop_func_start;
7097 sr_sal.pspace = get_frame_program_space (frame);
7098 insert_step_resume_breakpoint_at_sal (gdbarch,
7099 sr_sal, null_frame_id);
7100 }
7101 }
7102 else
7103 /* Set a breakpoint at callee's return address (the address
7104 at which the caller will resume). */
7105 insert_step_resume_breakpoint_at_caller (frame);
7106
7107 keep_going (ecs);
7108 return;
7109 }
7110
7111 /* Reverse stepping through solib trampolines. */
7112
7113 if (execution_direction == EXEC_REVERSE
7114 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
7115 {
7116 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
7117
7118 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7119 || (ecs->stop_func_start == 0
7120 && in_solib_dynsym_resolve_code (stop_pc)))
7121 {
7122 /* Any solib trampoline code can be handled in reverse
7123 by simply continuing to single-step. We have already
7124 executed the solib function (backwards), and a few
7125 steps will take us back through the trampoline to the
7126 caller. */
7127 keep_going (ecs);
7128 return;
7129 }
7130 else if (in_solib_dynsym_resolve_code (stop_pc))
7131 {
7132 /* Stepped backward into the solib dynsym resolver.
7133 Set a breakpoint at its start and continue, then
7134 one more step will take us out. */
7135 symtab_and_line sr_sal;
7136 sr_sal.pc = ecs->stop_func_start;
7137 sr_sal.pspace = get_frame_program_space (frame);
7138 insert_step_resume_breakpoint_at_sal (gdbarch,
7139 sr_sal, null_frame_id);
7140 keep_going (ecs);
7141 return;
7142 }
7143 }
7144
7145 /* This always returns the sal for the inner-most frame when we are in a
7146 stack of inlined frames, even if GDB actually believes that it is in a
7147 more outer frame. This is checked for below by calls to
7148 inline_skipped_frames. */
7149 stop_pc_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
7150
7151 /* NOTE: tausq/2004-05-24: This if block used to be done before all
7152 the trampoline processing logic, however, there are some trampolines
7153 that have no names, so we should do trampoline handling first. */
7154 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7155 && ecs->stop_func_name == NULL
7156 && stop_pc_sal.line == 0)
7157 {
7158 if (debug_infrun)
7159 fprintf_unfiltered (gdb_stdlog,
7160 "infrun: stepped into undebuggable function\n");
7161
7162 /* The inferior just stepped into, or returned to, an
7163 undebuggable function (where there is no debugging information
7164 and no line number corresponding to the address where the
7165 inferior stopped). Since we want to skip this kind of code,
7166 we keep going until the inferior returns from this
7167 function - unless the user has asked us not to (via
7168 set step-mode) or we no longer know how to get back
7169 to the call site. */
7170 if (step_stop_if_no_debug
7171 || !frame_id_p (frame_unwind_caller_id (frame)))
7172 {
7173 /* If we have no line number and the step-stop-if-no-debug
7174 is set, we stop the step so that the user has a chance to
7175 switch in assembly mode. */
7176 end_stepping_range (ecs);
7177 return;
7178 }
7179 else
7180 {
7181 /* Set a breakpoint at callee's return address (the address
7182 at which the caller will resume). */
7183 insert_step_resume_breakpoint_at_caller (frame);
7184 keep_going (ecs);
7185 return;
7186 }
7187 }
7188
7189 if (ecs->event_thread->control.step_range_end == 1)
7190 {
7191 /* It is stepi or nexti. We always want to stop stepping after
7192 one instruction. */
7193 if (debug_infrun)
7194 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
7195 end_stepping_range (ecs);
7196 return;
7197 }
7198
7199 if (stop_pc_sal.line == 0)
7200 {
7201 /* We have no line number information. That means to stop
7202 stepping (does this always happen right after one instruction,
7203 when we do "s" in a function with no line numbers,
7204 or can this happen as a result of a return or longjmp?). */
7205 if (debug_infrun)
7206 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
7207 end_stepping_range (ecs);
7208 return;
7209 }
7210
7211 /* Look for "calls" to inlined functions, part one. If the inline
7212 frame machinery detected some skipped call sites, we have entered
7213 a new inline function. */
7214
7215 if (frame_id_eq (get_frame_id (get_current_frame ()),
7216 ecs->event_thread->control.step_frame_id)
7217 && inline_skipped_frames (ecs->event_thread))
7218 {
7219 if (debug_infrun)
7220 fprintf_unfiltered (gdb_stdlog,
7221 "infrun: stepped into inlined function\n");
7222
7223 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
7224
7225 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
7226 {
7227 /* For "step", we're going to stop. But if the call site
7228 for this inlined function is on the same source line as
7229 we were previously stepping, go down into the function
7230 first. Otherwise stop at the call site. */
7231
7232 if (call_sal.line == ecs->event_thread->current_line
7233 && call_sal.symtab == ecs->event_thread->current_symtab)
7234 {
7235 step_into_inline_frame (ecs->event_thread);
7236 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
7237 {
7238 keep_going (ecs);
7239 return;
7240 }
7241 }
7242
7243 end_stepping_range (ecs);
7244 return;
7245 }
7246 else
7247 {
7248 /* For "next", we should stop at the call site if it is on a
7249 different source line. Otherwise continue through the
7250 inlined function. */
7251 if (call_sal.line == ecs->event_thread->current_line
7252 && call_sal.symtab == ecs->event_thread->current_symtab)
7253 keep_going (ecs);
7254 else
7255 end_stepping_range (ecs);
7256 return;
7257 }
7258 }
7259
7260 /* Look for "calls" to inlined functions, part two. If we are still
7261 in the same real function we were stepping through, but we have
7262 to go further up to find the exact frame ID, we are stepping
7263 through a more inlined call beyond its call site. */
7264
7265 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
7266 && !frame_id_eq (get_frame_id (get_current_frame ()),
7267 ecs->event_thread->control.step_frame_id)
7268 && stepped_in_from (get_current_frame (),
7269 ecs->event_thread->control.step_frame_id))
7270 {
7271 if (debug_infrun)
7272 fprintf_unfiltered (gdb_stdlog,
7273 "infrun: stepping through inlined function\n");
7274
7275 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
7276 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
7277 keep_going (ecs);
7278 else
7279 end_stepping_range (ecs);
7280 return;
7281 }
7282
7283 bool refresh_step_info = true;
7284 if ((ecs->event_thread->suspend.stop_pc == stop_pc_sal.pc)
7285 && (ecs->event_thread->current_line != stop_pc_sal.line
7286 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
7287 {
7288 if (stop_pc_sal.is_stmt)
7289 {
7290 /* We are at the start of a different line. So stop. Note that
7291 we don't stop if we step into the middle of a different line.
7292 That is said to make things like for (;;) statements work
7293 better. */
7294 if (debug_infrun)
7295 fprintf_unfiltered (gdb_stdlog,
7296 "infrun: stepped to a different line\n");
7297 end_stepping_range (ecs);
7298 return;
7299 }
7300 else if (frame_id_eq (get_frame_id (get_current_frame ()),
7301 ecs->event_thread->control.step_frame_id))
7302 {
7303 /* We are at the start of a different line, however, this line is
7304 not marked as a statement, and we have not changed frame. We
7305 ignore this line table entry, and continue stepping forward,
7306 looking for a better place to stop. */
7307 refresh_step_info = false;
7308 if (debug_infrun)
7309 fprintf_unfiltered (gdb_stdlog,
7310 "infrun: stepped to a different line, but "
7311 "it's not the start of a statement\n");
7312 }
7313 }
7314
7315 /* We aren't done stepping.
7316
7317 Optimize by setting the stepping range to the line.
7318 (We might not be in the original line, but if we entered a
7319 new line in mid-statement, we continue stepping. This makes
7320 things like for(;;) statements work better.)
7321
7322 If we entered a SAL that indicates a non-statement line table entry,
7323 then we update the stepping range, but we don't update the step info,
7324 which includes things like the line number we are stepping away from.
7325 This means we will stop when we find a line table entry that is marked
7326 as is-statement, even if it matches the non-statement one we just
7327 stepped into. */
7328
7329 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
7330 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
7331 ecs->event_thread->control.may_range_step = 1;
7332 if (refresh_step_info)
7333 set_step_info (ecs->event_thread, frame, stop_pc_sal);
7334
7335 if (debug_infrun)
7336 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
7337 keep_going (ecs);
7338 }
7339
7340 /* In all-stop mode, if we're currently stepping but have stopped in
7341 some other thread, we may need to switch back to the stepped
7342 thread. Returns true we set the inferior running, false if we left
7343 it stopped (and the event needs further processing). */
7344
7345 static int
7346 switch_back_to_stepped_thread (struct execution_control_state *ecs)
7347 {
7348 if (!target_is_non_stop_p ())
7349 {
7350 struct thread_info *stepping_thread;
7351
7352 /* If any thread is blocked on some internal breakpoint, and we
7353 simply need to step over that breakpoint to get it going
7354 again, do that first. */
7355
7356 /* However, if we see an event for the stepping thread, then we
7357 know all other threads have been moved past their breakpoints
7358 already. Let the caller check whether the step is finished,
7359 etc., before deciding to move it past a breakpoint. */
7360 if (ecs->event_thread->control.step_range_end != 0)
7361 return 0;
7362
7363 /* Check if the current thread is blocked on an incomplete
7364 step-over, interrupted by a random signal. */
7365 if (ecs->event_thread->control.trap_expected
7366 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
7367 {
7368 if (debug_infrun)
7369 {
7370 fprintf_unfiltered (gdb_stdlog,
7371 "infrun: need to finish step-over of [%s]\n",
7372 target_pid_to_str (ecs->event_thread->ptid).c_str ());
7373 }
7374 keep_going (ecs);
7375 return 1;
7376 }
7377
7378 /* Check if the current thread is blocked by a single-step
7379 breakpoint of another thread. */
7380 if (ecs->hit_singlestep_breakpoint)
7381 {
7382 if (debug_infrun)
7383 {
7384 fprintf_unfiltered (gdb_stdlog,
7385 "infrun: need to step [%s] over single-step "
7386 "breakpoint\n",
7387 target_pid_to_str (ecs->ptid).c_str ());
7388 }
7389 keep_going (ecs);
7390 return 1;
7391 }
7392
7393 /* If this thread needs yet another step-over (e.g., stepping
7394 through a delay slot), do it first before moving on to
7395 another thread. */
7396 if (thread_still_needs_step_over (ecs->event_thread))
7397 {
7398 if (debug_infrun)
7399 {
7400 fprintf_unfiltered (gdb_stdlog,
7401 "infrun: thread [%s] still needs step-over\n",
7402 target_pid_to_str (ecs->event_thread->ptid).c_str ());
7403 }
7404 keep_going (ecs);
7405 return 1;
7406 }
7407
7408 /* If scheduler locking applies even if not stepping, there's no
7409 need to walk over threads. Above we've checked whether the
7410 current thread is stepping. If some other thread not the
7411 event thread is stepping, then it must be that scheduler
7412 locking is not in effect. */
7413 if (schedlock_applies (ecs->event_thread))
7414 return 0;
7415
7416 /* Otherwise, we no longer expect a trap in the current thread.
7417 Clear the trap_expected flag before switching back -- this is
7418 what keep_going does as well, if we call it. */
7419 ecs->event_thread->control.trap_expected = 0;
7420
7421 /* Likewise, clear the signal if it should not be passed. */
7422 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7423 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7424
7425 /* Do all pending step-overs before actually proceeding with
7426 step/next/etc. */
7427 if (start_step_over ())
7428 {
7429 prepare_to_wait (ecs);
7430 return 1;
7431 }
7432
7433 /* Look for the stepping/nexting thread. */
7434 stepping_thread = NULL;
7435
7436 for (thread_info *tp : all_non_exited_threads ())
7437 {
7438 switch_to_thread_no_regs (tp);
7439
7440 /* Ignore threads of processes the caller is not
7441 resuming. */
7442 if (!sched_multi
7443 && (tp->inf->process_target () != ecs->target
7444 || tp->inf->pid != ecs->ptid.pid ()))
7445 continue;
7446
7447 /* When stepping over a breakpoint, we lock all threads
7448 except the one that needs to move past the breakpoint.
7449 If a non-event thread has this set, the "incomplete
7450 step-over" check above should have caught it earlier. */
7451 if (tp->control.trap_expected)
7452 {
7453 internal_error (__FILE__, __LINE__,
7454 "[%s] has inconsistent state: "
7455 "trap_expected=%d\n",
7456 target_pid_to_str (tp->ptid).c_str (),
7457 tp->control.trap_expected);
7458 }
7459
7460 /* Did we find the stepping thread? */
7461 if (tp->control.step_range_end)
7462 {
7463 /* Yep. There should only one though. */
7464 gdb_assert (stepping_thread == NULL);
7465
7466 /* The event thread is handled at the top, before we
7467 enter this loop. */
7468 gdb_assert (tp != ecs->event_thread);
7469
7470 /* If some thread other than the event thread is
7471 stepping, then scheduler locking can't be in effect,
7472 otherwise we wouldn't have resumed the current event
7473 thread in the first place. */
7474 gdb_assert (!schedlock_applies (tp));
7475
7476 stepping_thread = tp;
7477 }
7478 }
7479
7480 if (stepping_thread != NULL)
7481 {
7482 if (debug_infrun)
7483 fprintf_unfiltered (gdb_stdlog,
7484 "infrun: switching back to stepped thread\n");
7485
7486 if (keep_going_stepped_thread (stepping_thread))
7487 {
7488 prepare_to_wait (ecs);
7489 return 1;
7490 }
7491 }
7492
7493 switch_to_thread (ecs->event_thread);
7494 }
7495
7496 return 0;
7497 }
7498
7499 /* Set a previously stepped thread back to stepping. Returns true on
7500 success, false if the resume is not possible (e.g., the thread
7501 vanished). */
7502
7503 static int
7504 keep_going_stepped_thread (struct thread_info *tp)
7505 {
7506 struct frame_info *frame;
7507 struct execution_control_state ecss;
7508 struct execution_control_state *ecs = &ecss;
7509
7510 /* If the stepping thread exited, then don't try to switch back and
7511 resume it, which could fail in several different ways depending
7512 on the target. Instead, just keep going.
7513
7514 We can find a stepping dead thread in the thread list in two
7515 cases:
7516
7517 - The target supports thread exit events, and when the target
7518 tries to delete the thread from the thread list, inferior_ptid
7519 pointed at the exiting thread. In such case, calling
7520 delete_thread does not really remove the thread from the list;
7521 instead, the thread is left listed, with 'exited' state.
7522
7523 - The target's debug interface does not support thread exit
7524 events, and so we have no idea whatsoever if the previously
7525 stepping thread is still alive. For that reason, we need to
7526 synchronously query the target now. */
7527
7528 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
7529 {
7530 if (debug_infrun)
7531 fprintf_unfiltered (gdb_stdlog,
7532 "infrun: not resuming previously "
7533 "stepped thread, it has vanished\n");
7534
7535 delete_thread (tp);
7536 return 0;
7537 }
7538
7539 if (debug_infrun)
7540 fprintf_unfiltered (gdb_stdlog,
7541 "infrun: resuming previously stepped thread\n");
7542
7543 reset_ecs (ecs, tp);
7544 switch_to_thread (tp);
7545
7546 tp->suspend.stop_pc = regcache_read_pc (get_thread_regcache (tp));
7547 frame = get_current_frame ();
7548
7549 /* If the PC of the thread we were trying to single-step has
7550 changed, then that thread has trapped or been signaled, but the
7551 event has not been reported to GDB yet. Re-poll the target
7552 looking for this particular thread's event (i.e. temporarily
7553 enable schedlock) by:
7554
7555 - setting a break at the current PC
7556 - resuming that particular thread, only (by setting trap
7557 expected)
7558
7559 This prevents us continuously moving the single-step breakpoint
7560 forward, one instruction at a time, overstepping. */
7561
7562 if (tp->suspend.stop_pc != tp->prev_pc)
7563 {
7564 ptid_t resume_ptid;
7565
7566 if (debug_infrun)
7567 fprintf_unfiltered (gdb_stdlog,
7568 "infrun: expected thread advanced also (%s -> %s)\n",
7569 paddress (target_gdbarch (), tp->prev_pc),
7570 paddress (target_gdbarch (), tp->suspend.stop_pc));
7571
7572 /* Clear the info of the previous step-over, as it's no longer
7573 valid (if the thread was trying to step over a breakpoint, it
7574 has already succeeded). It's what keep_going would do too,
7575 if we called it. Do this before trying to insert the sss
7576 breakpoint, otherwise if we were previously trying to step
7577 over this exact address in another thread, the breakpoint is
7578 skipped. */
7579 clear_step_over_info ();
7580 tp->control.trap_expected = 0;
7581
7582 insert_single_step_breakpoint (get_frame_arch (frame),
7583 get_frame_address_space (frame),
7584 tp->suspend.stop_pc);
7585
7586 tp->resumed = true;
7587 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
7588 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
7589 }
7590 else
7591 {
7592 if (debug_infrun)
7593 fprintf_unfiltered (gdb_stdlog,
7594 "infrun: expected thread still hasn't advanced\n");
7595
7596 keep_going_pass_signal (ecs);
7597 }
7598 return 1;
7599 }
7600
7601 /* Is thread TP in the middle of (software or hardware)
7602 single-stepping? (Note the result of this function must never be
7603 passed directly as target_resume's STEP parameter.) */
7604
7605 static int
7606 currently_stepping (struct thread_info *tp)
7607 {
7608 return ((tp->control.step_range_end
7609 && tp->control.step_resume_breakpoint == NULL)
7610 || tp->control.trap_expected
7611 || tp->stepped_breakpoint
7612 || bpstat_should_step ());
7613 }
7614
7615 /* Inferior has stepped into a subroutine call with source code that
7616 we should not step over. Do step to the first line of code in
7617 it. */
7618
7619 static void
7620 handle_step_into_function (struct gdbarch *gdbarch,
7621 struct execution_control_state *ecs)
7622 {
7623 fill_in_stop_func (gdbarch, ecs);
7624
7625 compunit_symtab *cust
7626 = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
7627 if (cust != NULL && compunit_language (cust) != language_asm)
7628 ecs->stop_func_start
7629 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
7630
7631 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
7632 /* Use the step_resume_break to step until the end of the prologue,
7633 even if that involves jumps (as it seems to on the vax under
7634 4.2). */
7635 /* If the prologue ends in the middle of a source line, continue to
7636 the end of that source line (if it is still within the function).
7637 Otherwise, just go to end of prologue. */
7638 if (stop_func_sal.end
7639 && stop_func_sal.pc != ecs->stop_func_start
7640 && stop_func_sal.end < ecs->stop_func_end)
7641 ecs->stop_func_start = stop_func_sal.end;
7642
7643 /* Architectures which require breakpoint adjustment might not be able
7644 to place a breakpoint at the computed address. If so, the test
7645 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7646 ecs->stop_func_start to an address at which a breakpoint may be
7647 legitimately placed.
7648
7649 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7650 made, GDB will enter an infinite loop when stepping through
7651 optimized code consisting of VLIW instructions which contain
7652 subinstructions corresponding to different source lines. On
7653 FR-V, it's not permitted to place a breakpoint on any but the
7654 first subinstruction of a VLIW instruction. When a breakpoint is
7655 set, GDB will adjust the breakpoint address to the beginning of
7656 the VLIW instruction. Thus, we need to make the corresponding
7657 adjustment here when computing the stop address. */
7658
7659 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
7660 {
7661 ecs->stop_func_start
7662 = gdbarch_adjust_breakpoint_address (gdbarch,
7663 ecs->stop_func_start);
7664 }
7665
7666 if (ecs->stop_func_start == ecs->event_thread->suspend.stop_pc)
7667 {
7668 /* We are already there: stop now. */
7669 end_stepping_range (ecs);
7670 return;
7671 }
7672 else
7673 {
7674 /* Put the step-breakpoint there and go until there. */
7675 symtab_and_line sr_sal;
7676 sr_sal.pc = ecs->stop_func_start;
7677 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
7678 sr_sal.pspace = get_frame_program_space (get_current_frame ());
7679
7680 /* Do not specify what the fp should be when we stop since on
7681 some machines the prologue is where the new fp value is
7682 established. */
7683 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
7684
7685 /* And make sure stepping stops right away then. */
7686 ecs->event_thread->control.step_range_end
7687 = ecs->event_thread->control.step_range_start;
7688 }
7689 keep_going (ecs);
7690 }
7691
7692 /* Inferior has stepped backward into a subroutine call with source
7693 code that we should not step over. Do step to the beginning of the
7694 last line of code in it. */
7695
7696 static void
7697 handle_step_into_function_backward (struct gdbarch *gdbarch,
7698 struct execution_control_state *ecs)
7699 {
7700 struct compunit_symtab *cust;
7701 struct symtab_and_line stop_func_sal;
7702
7703 fill_in_stop_func (gdbarch, ecs);
7704
7705 cust = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
7706 if (cust != NULL && compunit_language (cust) != language_asm)
7707 ecs->stop_func_start
7708 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
7709
7710 stop_func_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
7711
7712 /* OK, we're just going to keep stepping here. */
7713 if (stop_func_sal.pc == ecs->event_thread->suspend.stop_pc)
7714 {
7715 /* We're there already. Just stop stepping now. */
7716 end_stepping_range (ecs);
7717 }
7718 else
7719 {
7720 /* Else just reset the step range and keep going.
7721 No step-resume breakpoint, they don't work for
7722 epilogues, which can have multiple entry paths. */
7723 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7724 ecs->event_thread->control.step_range_end = stop_func_sal.end;
7725 keep_going (ecs);
7726 }
7727 return;
7728 }
7729
7730 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
7731 This is used to both functions and to skip over code. */
7732
7733 static void
7734 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7735 struct symtab_and_line sr_sal,
7736 struct frame_id sr_id,
7737 enum bptype sr_type)
7738 {
7739 /* There should never be more than one step-resume or longjmp-resume
7740 breakpoint per thread, so we should never be setting a new
7741 step_resume_breakpoint when one is already active. */
7742 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
7743 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
7744
7745 if (debug_infrun)
7746 fprintf_unfiltered (gdb_stdlog,
7747 "infrun: inserting step-resume breakpoint at %s\n",
7748 paddress (gdbarch, sr_sal.pc));
7749
7750 inferior_thread ()->control.step_resume_breakpoint
7751 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
7752 }
7753
7754 void
7755 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7756 struct symtab_and_line sr_sal,
7757 struct frame_id sr_id)
7758 {
7759 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7760 sr_sal, sr_id,
7761 bp_step_resume);
7762 }
7763
7764 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7765 This is used to skip a potential signal handler.
7766
7767 This is called with the interrupted function's frame. The signal
7768 handler, when it returns, will resume the interrupted function at
7769 RETURN_FRAME.pc. */
7770
7771 static void
7772 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
7773 {
7774 gdb_assert (return_frame != NULL);
7775
7776 struct gdbarch *gdbarch = get_frame_arch (return_frame);
7777
7778 symtab_and_line sr_sal;
7779 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
7780 sr_sal.section = find_pc_overlay (sr_sal.pc);
7781 sr_sal.pspace = get_frame_program_space (return_frame);
7782
7783 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7784 get_stack_frame_id (return_frame),
7785 bp_hp_step_resume);
7786 }
7787
7788 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
7789 is used to skip a function after stepping into it (for "next" or if
7790 the called function has no debugging information).
7791
7792 The current function has almost always been reached by single
7793 stepping a call or return instruction. NEXT_FRAME belongs to the
7794 current function, and the breakpoint will be set at the caller's
7795 resume address.
7796
7797 This is a separate function rather than reusing
7798 insert_hp_step_resume_breakpoint_at_frame in order to avoid
7799 get_prev_frame, which may stop prematurely (see the implementation
7800 of frame_unwind_caller_id for an example). */
7801
7802 static void
7803 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
7804 {
7805 /* We shouldn't have gotten here if we don't know where the call site
7806 is. */
7807 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
7808
7809 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
7810
7811 symtab_and_line sr_sal;
7812 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
7813 frame_unwind_caller_pc (next_frame));
7814 sr_sal.section = find_pc_overlay (sr_sal.pc);
7815 sr_sal.pspace = frame_unwind_program_space (next_frame);
7816
7817 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
7818 frame_unwind_caller_id (next_frame));
7819 }
7820
7821 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7822 new breakpoint at the target of a jmp_buf. The handling of
7823 longjmp-resume uses the same mechanisms used for handling
7824 "step-resume" breakpoints. */
7825
7826 static void
7827 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
7828 {
7829 /* There should never be more than one longjmp-resume breakpoint per
7830 thread, so we should never be setting a new
7831 longjmp_resume_breakpoint when one is already active. */
7832 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
7833
7834 if (debug_infrun)
7835 fprintf_unfiltered (gdb_stdlog,
7836 "infrun: inserting longjmp-resume breakpoint at %s\n",
7837 paddress (gdbarch, pc));
7838
7839 inferior_thread ()->control.exception_resume_breakpoint =
7840 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
7841 }
7842
7843 /* Insert an exception resume breakpoint. TP is the thread throwing
7844 the exception. The block B is the block of the unwinder debug hook
7845 function. FRAME is the frame corresponding to the call to this
7846 function. SYM is the symbol of the function argument holding the
7847 target PC of the exception. */
7848
7849 static void
7850 insert_exception_resume_breakpoint (struct thread_info *tp,
7851 const struct block *b,
7852 struct frame_info *frame,
7853 struct symbol *sym)
7854 {
7855 try
7856 {
7857 struct block_symbol vsym;
7858 struct value *value;
7859 CORE_ADDR handler;
7860 struct breakpoint *bp;
7861
7862 vsym = lookup_symbol_search_name (sym->search_name (),
7863 b, VAR_DOMAIN);
7864 value = read_var_value (vsym.symbol, vsym.block, frame);
7865 /* If the value was optimized out, revert to the old behavior. */
7866 if (! value_optimized_out (value))
7867 {
7868 handler = value_as_address (value);
7869
7870 if (debug_infrun)
7871 fprintf_unfiltered (gdb_stdlog,
7872 "infrun: exception resume at %lx\n",
7873 (unsigned long) handler);
7874
7875 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
7876 handler,
7877 bp_exception_resume).release ();
7878
7879 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
7880 frame = NULL;
7881
7882 bp->thread = tp->global_num;
7883 inferior_thread ()->control.exception_resume_breakpoint = bp;
7884 }
7885 }
7886 catch (const gdb_exception_error &e)
7887 {
7888 /* We want to ignore errors here. */
7889 }
7890 }
7891
7892 /* A helper for check_exception_resume that sets an
7893 exception-breakpoint based on a SystemTap probe. */
7894
7895 static void
7896 insert_exception_resume_from_probe (struct thread_info *tp,
7897 const struct bound_probe *probe,
7898 struct frame_info *frame)
7899 {
7900 struct value *arg_value;
7901 CORE_ADDR handler;
7902 struct breakpoint *bp;
7903
7904 arg_value = probe_safe_evaluate_at_pc (frame, 1);
7905 if (!arg_value)
7906 return;
7907
7908 handler = value_as_address (arg_value);
7909
7910 if (debug_infrun)
7911 fprintf_unfiltered (gdb_stdlog,
7912 "infrun: exception resume at %s\n",
7913 paddress (probe->objfile->arch (),
7914 handler));
7915
7916 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
7917 handler, bp_exception_resume).release ();
7918 bp->thread = tp->global_num;
7919 inferior_thread ()->control.exception_resume_breakpoint = bp;
7920 }
7921
7922 /* This is called when an exception has been intercepted. Check to
7923 see whether the exception's destination is of interest, and if so,
7924 set an exception resume breakpoint there. */
7925
7926 static void
7927 check_exception_resume (struct execution_control_state *ecs,
7928 struct frame_info *frame)
7929 {
7930 struct bound_probe probe;
7931 struct symbol *func;
7932
7933 /* First see if this exception unwinding breakpoint was set via a
7934 SystemTap probe point. If so, the probe has two arguments: the
7935 CFA and the HANDLER. We ignore the CFA, extract the handler, and
7936 set a breakpoint there. */
7937 probe = find_probe_by_pc (get_frame_pc (frame));
7938 if (probe.prob)
7939 {
7940 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
7941 return;
7942 }
7943
7944 func = get_frame_function (frame);
7945 if (!func)
7946 return;
7947
7948 try
7949 {
7950 const struct block *b;
7951 struct block_iterator iter;
7952 struct symbol *sym;
7953 int argno = 0;
7954
7955 /* The exception breakpoint is a thread-specific breakpoint on
7956 the unwinder's debug hook, declared as:
7957
7958 void _Unwind_DebugHook (void *cfa, void *handler);
7959
7960 The CFA argument indicates the frame to which control is
7961 about to be transferred. HANDLER is the destination PC.
7962
7963 We ignore the CFA and set a temporary breakpoint at HANDLER.
7964 This is not extremely efficient but it avoids issues in gdb
7965 with computing the DWARF CFA, and it also works even in weird
7966 cases such as throwing an exception from inside a signal
7967 handler. */
7968
7969 b = SYMBOL_BLOCK_VALUE (func);
7970 ALL_BLOCK_SYMBOLS (b, iter, sym)
7971 {
7972 if (!SYMBOL_IS_ARGUMENT (sym))
7973 continue;
7974
7975 if (argno == 0)
7976 ++argno;
7977 else
7978 {
7979 insert_exception_resume_breakpoint (ecs->event_thread,
7980 b, frame, sym);
7981 break;
7982 }
7983 }
7984 }
7985 catch (const gdb_exception_error &e)
7986 {
7987 }
7988 }
7989
7990 static void
7991 stop_waiting (struct execution_control_state *ecs)
7992 {
7993 if (debug_infrun)
7994 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
7995
7996 /* Let callers know we don't want to wait for the inferior anymore. */
7997 ecs->wait_some_more = 0;
7998
7999 /* If all-stop, but there exists a non-stop target, stop all
8000 threads now that we're presenting the stop to the user. */
8001 if (!non_stop && exists_non_stop_target ())
8002 stop_all_threads ();
8003 }
8004
8005 /* Like keep_going, but passes the signal to the inferior, even if the
8006 signal is set to nopass. */
8007
8008 static void
8009 keep_going_pass_signal (struct execution_control_state *ecs)
8010 {
8011 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
8012 gdb_assert (!ecs->event_thread->resumed);
8013
8014 /* Save the pc before execution, to compare with pc after stop. */
8015 ecs->event_thread->prev_pc
8016 = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
8017
8018 if (ecs->event_thread->control.trap_expected)
8019 {
8020 struct thread_info *tp = ecs->event_thread;
8021
8022 if (debug_infrun)
8023 fprintf_unfiltered (gdb_stdlog,
8024 "infrun: %s has trap_expected set, "
8025 "resuming to collect trap\n",
8026 target_pid_to_str (tp->ptid).c_str ());
8027
8028 /* We haven't yet gotten our trap, and either: intercepted a
8029 non-signal event (e.g., a fork); or took a signal which we
8030 are supposed to pass through to the inferior. Simply
8031 continue. */
8032 resume (ecs->event_thread->suspend.stop_signal);
8033 }
8034 else if (step_over_info_valid_p ())
8035 {
8036 /* Another thread is stepping over a breakpoint in-line. If
8037 this thread needs a step-over too, queue the request. In
8038 either case, this resume must be deferred for later. */
8039 struct thread_info *tp = ecs->event_thread;
8040
8041 if (ecs->hit_singlestep_breakpoint
8042 || thread_still_needs_step_over (tp))
8043 {
8044 if (debug_infrun)
8045 fprintf_unfiltered (gdb_stdlog,
8046 "infrun: step-over already in progress: "
8047 "step-over for %s deferred\n",
8048 target_pid_to_str (tp->ptid).c_str ());
8049 thread_step_over_chain_enqueue (tp);
8050 }
8051 else
8052 {
8053 if (debug_infrun)
8054 fprintf_unfiltered (gdb_stdlog,
8055 "infrun: step-over in progress: "
8056 "resume of %s deferred\n",
8057 target_pid_to_str (tp->ptid).c_str ());
8058 }
8059 }
8060 else
8061 {
8062 struct regcache *regcache = get_current_regcache ();
8063 int remove_bp;
8064 int remove_wps;
8065 step_over_what step_what;
8066
8067 /* Either the trap was not expected, but we are continuing
8068 anyway (if we got a signal, the user asked it be passed to
8069 the child)
8070 -- or --
8071 We got our expected trap, but decided we should resume from
8072 it.
8073
8074 We're going to run this baby now!
8075
8076 Note that insert_breakpoints won't try to re-insert
8077 already inserted breakpoints. Therefore, we don't
8078 care if breakpoints were already inserted, or not. */
8079
8080 /* If we need to step over a breakpoint, and we're not using
8081 displaced stepping to do so, insert all breakpoints
8082 (watchpoints, etc.) but the one we're stepping over, step one
8083 instruction, and then re-insert the breakpoint when that step
8084 is finished. */
8085
8086 step_what = thread_still_needs_step_over (ecs->event_thread);
8087
8088 remove_bp = (ecs->hit_singlestep_breakpoint
8089 || (step_what & STEP_OVER_BREAKPOINT));
8090 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
8091
8092 /* We can't use displaced stepping if we need to step past a
8093 watchpoint. The instruction copied to the scratch pad would
8094 still trigger the watchpoint. */
8095 if (remove_bp
8096 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
8097 {
8098 set_step_over_info (regcache->aspace (),
8099 regcache_read_pc (regcache), remove_wps,
8100 ecs->event_thread->global_num);
8101 }
8102 else if (remove_wps)
8103 set_step_over_info (NULL, 0, remove_wps, -1);
8104
8105 /* If we now need to do an in-line step-over, we need to stop
8106 all other threads. Note this must be done before
8107 insert_breakpoints below, because that removes the breakpoint
8108 we're about to step over, otherwise other threads could miss
8109 it. */
8110 if (step_over_info_valid_p () && target_is_non_stop_p ())
8111 stop_all_threads ();
8112
8113 /* Stop stepping if inserting breakpoints fails. */
8114 try
8115 {
8116 insert_breakpoints ();
8117 }
8118 catch (const gdb_exception_error &e)
8119 {
8120 exception_print (gdb_stderr, e);
8121 stop_waiting (ecs);
8122 clear_step_over_info ();
8123 return;
8124 }
8125
8126 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
8127
8128 resume (ecs->event_thread->suspend.stop_signal);
8129 }
8130
8131 prepare_to_wait (ecs);
8132 }
8133
8134 /* Called when we should continue running the inferior, because the
8135 current event doesn't cause a user visible stop. This does the
8136 resuming part; waiting for the next event is done elsewhere. */
8137
8138 static void
8139 keep_going (struct execution_control_state *ecs)
8140 {
8141 if (ecs->event_thread->control.trap_expected
8142 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
8143 ecs->event_thread->control.trap_expected = 0;
8144
8145 if (!signal_program[ecs->event_thread->suspend.stop_signal])
8146 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
8147 keep_going_pass_signal (ecs);
8148 }
8149
8150 /* This function normally comes after a resume, before
8151 handle_inferior_event exits. It takes care of any last bits of
8152 housekeeping, and sets the all-important wait_some_more flag. */
8153
8154 static void
8155 prepare_to_wait (struct execution_control_state *ecs)
8156 {
8157 if (debug_infrun)
8158 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
8159
8160 ecs->wait_some_more = 1;
8161
8162 /* If the target can't async, emulate it by marking the infrun event
8163 handler such that as soon as we get back to the event-loop, we
8164 immediately end up in fetch_inferior_event again calling
8165 target_wait. */
8166 if (!target_can_async_p ())
8167 mark_infrun_async_event_handler ();
8168 }
8169
8170 /* We are done with the step range of a step/next/si/ni command.
8171 Called once for each n of a "step n" operation. */
8172
8173 static void
8174 end_stepping_range (struct execution_control_state *ecs)
8175 {
8176 ecs->event_thread->control.stop_step = 1;
8177 stop_waiting (ecs);
8178 }
8179
8180 /* Several print_*_reason functions to print why the inferior has stopped.
8181 We always print something when the inferior exits, or receives a signal.
8182 The rest of the cases are dealt with later on in normal_stop and
8183 print_it_typical. Ideally there should be a call to one of these
8184 print_*_reason functions functions from handle_inferior_event each time
8185 stop_waiting is called.
8186
8187 Note that we don't call these directly, instead we delegate that to
8188 the interpreters, through observers. Interpreters then call these
8189 with whatever uiout is right. */
8190
8191 void
8192 print_end_stepping_range_reason (struct ui_out *uiout)
8193 {
8194 /* For CLI-like interpreters, print nothing. */
8195
8196 if (uiout->is_mi_like_p ())
8197 {
8198 uiout->field_string ("reason",
8199 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
8200 }
8201 }
8202
8203 void
8204 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
8205 {
8206 annotate_signalled ();
8207 if (uiout->is_mi_like_p ())
8208 uiout->field_string
8209 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
8210 uiout->text ("\nProgram terminated with signal ");
8211 annotate_signal_name ();
8212 uiout->field_string ("signal-name",
8213 gdb_signal_to_name (siggnal));
8214 annotate_signal_name_end ();
8215 uiout->text (", ");
8216 annotate_signal_string ();
8217 uiout->field_string ("signal-meaning",
8218 gdb_signal_to_string (siggnal));
8219 annotate_signal_string_end ();
8220 uiout->text (".\n");
8221 uiout->text ("The program no longer exists.\n");
8222 }
8223
8224 void
8225 print_exited_reason (struct ui_out *uiout, int exitstatus)
8226 {
8227 struct inferior *inf = current_inferior ();
8228 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
8229
8230 annotate_exited (exitstatus);
8231 if (exitstatus)
8232 {
8233 if (uiout->is_mi_like_p ())
8234 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
8235 std::string exit_code_str
8236 = string_printf ("0%o", (unsigned int) exitstatus);
8237 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
8238 plongest (inf->num), pidstr.c_str (),
8239 string_field ("exit-code", exit_code_str.c_str ()));
8240 }
8241 else
8242 {
8243 if (uiout->is_mi_like_p ())
8244 uiout->field_string
8245 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
8246 uiout->message ("[Inferior %s (%s) exited normally]\n",
8247 plongest (inf->num), pidstr.c_str ());
8248 }
8249 }
8250
8251 void
8252 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
8253 {
8254 struct thread_info *thr = inferior_thread ();
8255
8256 annotate_signal ();
8257
8258 if (uiout->is_mi_like_p ())
8259 ;
8260 else if (show_thread_that_caused_stop ())
8261 {
8262 const char *name;
8263
8264 uiout->text ("\nThread ");
8265 uiout->field_string ("thread-id", print_thread_id (thr));
8266
8267 name = thr->name != NULL ? thr->name : target_thread_name (thr);
8268 if (name != NULL)
8269 {
8270 uiout->text (" \"");
8271 uiout->field_string ("name", name);
8272 uiout->text ("\"");
8273 }
8274 }
8275 else
8276 uiout->text ("\nProgram");
8277
8278 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
8279 uiout->text (" stopped");
8280 else
8281 {
8282 uiout->text (" received signal ");
8283 annotate_signal_name ();
8284 if (uiout->is_mi_like_p ())
8285 uiout->field_string
8286 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
8287 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8288 annotate_signal_name_end ();
8289 uiout->text (", ");
8290 annotate_signal_string ();
8291 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
8292
8293 struct regcache *regcache = get_current_regcache ();
8294 struct gdbarch *gdbarch = regcache->arch ();
8295 if (gdbarch_report_signal_info_p (gdbarch))
8296 gdbarch_report_signal_info (gdbarch, uiout, siggnal);
8297
8298 annotate_signal_string_end ();
8299 }
8300 uiout->text (".\n");
8301 }
8302
8303 void
8304 print_no_history_reason (struct ui_out *uiout)
8305 {
8306 uiout->text ("\nNo more reverse-execution history.\n");
8307 }
8308
8309 /* Print current location without a level number, if we have changed
8310 functions or hit a breakpoint. Print source line if we have one.
8311 bpstat_print contains the logic deciding in detail what to print,
8312 based on the event(s) that just occurred. */
8313
8314 static void
8315 print_stop_location (struct target_waitstatus *ws)
8316 {
8317 int bpstat_ret;
8318 enum print_what source_flag;
8319 int do_frame_printing = 1;
8320 struct thread_info *tp = inferior_thread ();
8321
8322 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
8323 switch (bpstat_ret)
8324 {
8325 case PRINT_UNKNOWN:
8326 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8327 should) carry around the function and does (or should) use
8328 that when doing a frame comparison. */
8329 if (tp->control.stop_step
8330 && frame_id_eq (tp->control.step_frame_id,
8331 get_frame_id (get_current_frame ()))
8332 && (tp->control.step_start_function
8333 == find_pc_function (tp->suspend.stop_pc)))
8334 {
8335 /* Finished step, just print source line. */
8336 source_flag = SRC_LINE;
8337 }
8338 else
8339 {
8340 /* Print location and source line. */
8341 source_flag = SRC_AND_LOC;
8342 }
8343 break;
8344 case PRINT_SRC_AND_LOC:
8345 /* Print location and source line. */
8346 source_flag = SRC_AND_LOC;
8347 break;
8348 case PRINT_SRC_ONLY:
8349 source_flag = SRC_LINE;
8350 break;
8351 case PRINT_NOTHING:
8352 /* Something bogus. */
8353 source_flag = SRC_LINE;
8354 do_frame_printing = 0;
8355 break;
8356 default:
8357 internal_error (__FILE__, __LINE__, _("Unknown value."));
8358 }
8359
8360 /* The behavior of this routine with respect to the source
8361 flag is:
8362 SRC_LINE: Print only source line
8363 LOCATION: Print only location
8364 SRC_AND_LOC: Print location and source line. */
8365 if (do_frame_printing)
8366 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
8367 }
8368
8369 /* See infrun.h. */
8370
8371 void
8372 print_stop_event (struct ui_out *uiout, bool displays)
8373 {
8374 struct target_waitstatus last;
8375 struct thread_info *tp;
8376
8377 get_last_target_status (nullptr, nullptr, &last);
8378
8379 {
8380 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
8381
8382 print_stop_location (&last);
8383
8384 /* Display the auto-display expressions. */
8385 if (displays)
8386 do_displays ();
8387 }
8388
8389 tp = inferior_thread ();
8390 if (tp->thread_fsm != NULL
8391 && tp->thread_fsm->finished_p ())
8392 {
8393 struct return_value_info *rv;
8394
8395 rv = tp->thread_fsm->return_value ();
8396 if (rv != NULL)
8397 print_return_value (uiout, rv);
8398 }
8399 }
8400
8401 /* See infrun.h. */
8402
8403 void
8404 maybe_remove_breakpoints (void)
8405 {
8406 if (!breakpoints_should_be_inserted_now () && target_has_execution)
8407 {
8408 if (remove_breakpoints ())
8409 {
8410 target_terminal::ours_for_output ();
8411 printf_filtered (_("Cannot remove breakpoints because "
8412 "program is no longer writable.\nFurther "
8413 "execution is probably impossible.\n"));
8414 }
8415 }
8416 }
8417
8418 /* The execution context that just caused a normal stop. */
8419
8420 struct stop_context
8421 {
8422 stop_context ();
8423 ~stop_context ();
8424
8425 DISABLE_COPY_AND_ASSIGN (stop_context);
8426
8427 bool changed () const;
8428
8429 /* The stop ID. */
8430 ULONGEST stop_id;
8431
8432 /* The event PTID. */
8433
8434 ptid_t ptid;
8435
8436 /* If stopp for a thread event, this is the thread that caused the
8437 stop. */
8438 struct thread_info *thread;
8439
8440 /* The inferior that caused the stop. */
8441 int inf_num;
8442 };
8443
8444 /* Initializes a new stop context. If stopped for a thread event, this
8445 takes a strong reference to the thread. */
8446
8447 stop_context::stop_context ()
8448 {
8449 stop_id = get_stop_id ();
8450 ptid = inferior_ptid;
8451 inf_num = current_inferior ()->num;
8452
8453 if (inferior_ptid != null_ptid)
8454 {
8455 /* Take a strong reference so that the thread can't be deleted
8456 yet. */
8457 thread = inferior_thread ();
8458 thread->incref ();
8459 }
8460 else
8461 thread = NULL;
8462 }
8463
8464 /* Release a stop context previously created with save_stop_context.
8465 Releases the strong reference to the thread as well. */
8466
8467 stop_context::~stop_context ()
8468 {
8469 if (thread != NULL)
8470 thread->decref ();
8471 }
8472
8473 /* Return true if the current context no longer matches the saved stop
8474 context. */
8475
8476 bool
8477 stop_context::changed () const
8478 {
8479 if (ptid != inferior_ptid)
8480 return true;
8481 if (inf_num != current_inferior ()->num)
8482 return true;
8483 if (thread != NULL && thread->state != THREAD_STOPPED)
8484 return true;
8485 if (get_stop_id () != stop_id)
8486 return true;
8487 return false;
8488 }
8489
8490 /* See infrun.h. */
8491
8492 int
8493 normal_stop (void)
8494 {
8495 struct target_waitstatus last;
8496
8497 get_last_target_status (nullptr, nullptr, &last);
8498
8499 new_stop_id ();
8500
8501 /* If an exception is thrown from this point on, make sure to
8502 propagate GDB's knowledge of the executing state to the
8503 frontend/user running state. A QUIT is an easy exception to see
8504 here, so do this before any filtered output. */
8505
8506 ptid_t finish_ptid = null_ptid;
8507
8508 if (!non_stop)
8509 finish_ptid = minus_one_ptid;
8510 else if (last.kind == TARGET_WAITKIND_SIGNALLED
8511 || last.kind == TARGET_WAITKIND_EXITED)
8512 {
8513 /* On some targets, we may still have live threads in the
8514 inferior when we get a process exit event. E.g., for
8515 "checkpoint", when the current checkpoint/fork exits,
8516 linux-fork.c automatically switches to another fork from
8517 within target_mourn_inferior. */
8518 if (inferior_ptid != null_ptid)
8519 finish_ptid = ptid_t (inferior_ptid.pid ());
8520 }
8521 else if (last.kind != TARGET_WAITKIND_NO_RESUMED)
8522 finish_ptid = inferior_ptid;
8523
8524 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
8525 if (finish_ptid != null_ptid)
8526 {
8527 maybe_finish_thread_state.emplace
8528 (user_visible_resume_target (finish_ptid), finish_ptid);
8529 }
8530
8531 /* As we're presenting a stop, and potentially removing breakpoints,
8532 update the thread list so we can tell whether there are threads
8533 running on the target. With target remote, for example, we can
8534 only learn about new threads when we explicitly update the thread
8535 list. Do this before notifying the interpreters about signal
8536 stops, end of stepping ranges, etc., so that the "new thread"
8537 output is emitted before e.g., "Program received signal FOO",
8538 instead of after. */
8539 update_thread_list ();
8540
8541 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
8542 gdb::observers::signal_received.notify (inferior_thread ()->suspend.stop_signal);
8543
8544 /* As with the notification of thread events, we want to delay
8545 notifying the user that we've switched thread context until
8546 the inferior actually stops.
8547
8548 There's no point in saying anything if the inferior has exited.
8549 Note that SIGNALLED here means "exited with a signal", not
8550 "received a signal".
8551
8552 Also skip saying anything in non-stop mode. In that mode, as we
8553 don't want GDB to switch threads behind the user's back, to avoid
8554 races where the user is typing a command to apply to thread x,
8555 but GDB switches to thread y before the user finishes entering
8556 the command, fetch_inferior_event installs a cleanup to restore
8557 the current thread back to the thread the user had selected right
8558 after this event is handled, so we're not really switching, only
8559 informing of a stop. */
8560 if (!non_stop
8561 && previous_inferior_ptid != inferior_ptid
8562 && target_has_execution
8563 && last.kind != TARGET_WAITKIND_SIGNALLED
8564 && last.kind != TARGET_WAITKIND_EXITED
8565 && last.kind != TARGET_WAITKIND_NO_RESUMED)
8566 {
8567 SWITCH_THRU_ALL_UIS ()
8568 {
8569 target_terminal::ours_for_output ();
8570 printf_filtered (_("[Switching to %s]\n"),
8571 target_pid_to_str (inferior_ptid).c_str ());
8572 annotate_thread_changed ();
8573 }
8574 previous_inferior_ptid = inferior_ptid;
8575 }
8576
8577 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
8578 {
8579 SWITCH_THRU_ALL_UIS ()
8580 if (current_ui->prompt_state == PROMPT_BLOCKED)
8581 {
8582 target_terminal::ours_for_output ();
8583 printf_filtered (_("No unwaited-for children left.\n"));
8584 }
8585 }
8586
8587 /* Note: this depends on the update_thread_list call above. */
8588 maybe_remove_breakpoints ();
8589
8590 /* If an auto-display called a function and that got a signal,
8591 delete that auto-display to avoid an infinite recursion. */
8592
8593 if (stopped_by_random_signal)
8594 disable_current_display ();
8595
8596 SWITCH_THRU_ALL_UIS ()
8597 {
8598 async_enable_stdin ();
8599 }
8600
8601 /* Let the user/frontend see the threads as stopped. */
8602 maybe_finish_thread_state.reset ();
8603
8604 /* Select innermost stack frame - i.e., current frame is frame 0,
8605 and current location is based on that. Handle the case where the
8606 dummy call is returning after being stopped. E.g. the dummy call
8607 previously hit a breakpoint. (If the dummy call returns
8608 normally, we won't reach here.) Do this before the stop hook is
8609 run, so that it doesn't get to see the temporary dummy frame,
8610 which is not where we'll present the stop. */
8611 if (has_stack_frames ())
8612 {
8613 if (stop_stack_dummy == STOP_STACK_DUMMY)
8614 {
8615 /* Pop the empty frame that contains the stack dummy. This
8616 also restores inferior state prior to the call (struct
8617 infcall_suspend_state). */
8618 struct frame_info *frame = get_current_frame ();
8619
8620 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
8621 frame_pop (frame);
8622 /* frame_pop calls reinit_frame_cache as the last thing it
8623 does which means there's now no selected frame. */
8624 }
8625
8626 select_frame (get_current_frame ());
8627
8628 /* Set the current source location. */
8629 set_current_sal_from_frame (get_current_frame ());
8630 }
8631
8632 /* Look up the hook_stop and run it (CLI internally handles problem
8633 of stop_command's pre-hook not existing). */
8634 if (stop_command != NULL)
8635 {
8636 stop_context saved_context;
8637
8638 try
8639 {
8640 execute_cmd_pre_hook (stop_command);
8641 }
8642 catch (const gdb_exception &ex)
8643 {
8644 exception_fprintf (gdb_stderr, ex,
8645 "Error while running hook_stop:\n");
8646 }
8647
8648 /* If the stop hook resumes the target, then there's no point in
8649 trying to notify about the previous stop; its context is
8650 gone. Likewise if the command switches thread or inferior --
8651 the observers would print a stop for the wrong
8652 thread/inferior. */
8653 if (saved_context.changed ())
8654 return 1;
8655 }
8656
8657 /* Notify observers about the stop. This is where the interpreters
8658 print the stop event. */
8659 if (inferior_ptid != null_ptid)
8660 gdb::observers::normal_stop.notify (inferior_thread ()->control.stop_bpstat,
8661 stop_print_frame);
8662 else
8663 gdb::observers::normal_stop.notify (NULL, stop_print_frame);
8664
8665 annotate_stopped ();
8666
8667 if (target_has_execution)
8668 {
8669 if (last.kind != TARGET_WAITKIND_SIGNALLED
8670 && last.kind != TARGET_WAITKIND_EXITED
8671 && last.kind != TARGET_WAITKIND_NO_RESUMED)
8672 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8673 Delete any breakpoint that is to be deleted at the next stop. */
8674 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
8675 }
8676
8677 /* Try to get rid of automatically added inferiors that are no
8678 longer needed. Keeping those around slows down things linearly.
8679 Note that this never removes the current inferior. */
8680 prune_inferiors ();
8681
8682 return 0;
8683 }
8684 \f
8685 int
8686 signal_stop_state (int signo)
8687 {
8688 return signal_stop[signo];
8689 }
8690
8691 int
8692 signal_print_state (int signo)
8693 {
8694 return signal_print[signo];
8695 }
8696
8697 int
8698 signal_pass_state (int signo)
8699 {
8700 return signal_program[signo];
8701 }
8702
8703 static void
8704 signal_cache_update (int signo)
8705 {
8706 if (signo == -1)
8707 {
8708 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
8709 signal_cache_update (signo);
8710
8711 return;
8712 }
8713
8714 signal_pass[signo] = (signal_stop[signo] == 0
8715 && signal_print[signo] == 0
8716 && signal_program[signo] == 1
8717 && signal_catch[signo] == 0);
8718 }
8719
8720 int
8721 signal_stop_update (int signo, int state)
8722 {
8723 int ret = signal_stop[signo];
8724
8725 signal_stop[signo] = state;
8726 signal_cache_update (signo);
8727 return ret;
8728 }
8729
8730 int
8731 signal_print_update (int signo, int state)
8732 {
8733 int ret = signal_print[signo];
8734
8735 signal_print[signo] = state;
8736 signal_cache_update (signo);
8737 return ret;
8738 }
8739
8740 int
8741 signal_pass_update (int signo, int state)
8742 {
8743 int ret = signal_program[signo];
8744
8745 signal_program[signo] = state;
8746 signal_cache_update (signo);
8747 return ret;
8748 }
8749
8750 /* Update the global 'signal_catch' from INFO and notify the
8751 target. */
8752
8753 void
8754 signal_catch_update (const unsigned int *info)
8755 {
8756 int i;
8757
8758 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8759 signal_catch[i] = info[i] > 0;
8760 signal_cache_update (-1);
8761 target_pass_signals (signal_pass);
8762 }
8763
8764 static void
8765 sig_print_header (void)
8766 {
8767 printf_filtered (_("Signal Stop\tPrint\tPass "
8768 "to program\tDescription\n"));
8769 }
8770
8771 static void
8772 sig_print_info (enum gdb_signal oursig)
8773 {
8774 const char *name = gdb_signal_to_name (oursig);
8775 int name_padding = 13 - strlen (name);
8776
8777 if (name_padding <= 0)
8778 name_padding = 0;
8779
8780 printf_filtered ("%s", name);
8781 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
8782 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8783 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
8784 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
8785 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
8786 }
8787
8788 /* Specify how various signals in the inferior should be handled. */
8789
8790 static void
8791 handle_command (const char *args, int from_tty)
8792 {
8793 int digits, wordlen;
8794 int sigfirst, siglast;
8795 enum gdb_signal oursig;
8796 int allsigs;
8797
8798 if (args == NULL)
8799 {
8800 error_no_arg (_("signal to handle"));
8801 }
8802
8803 /* Allocate and zero an array of flags for which signals to handle. */
8804
8805 const size_t nsigs = GDB_SIGNAL_LAST;
8806 unsigned char sigs[nsigs] {};
8807
8808 /* Break the command line up into args. */
8809
8810 gdb_argv built_argv (args);
8811
8812 /* Walk through the args, looking for signal oursigs, signal names, and
8813 actions. Signal numbers and signal names may be interspersed with
8814 actions, with the actions being performed for all signals cumulatively
8815 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
8816
8817 for (char *arg : built_argv)
8818 {
8819 wordlen = strlen (arg);
8820 for (digits = 0; isdigit (arg[digits]); digits++)
8821 {;
8822 }
8823 allsigs = 0;
8824 sigfirst = siglast = -1;
8825
8826 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
8827 {
8828 /* Apply action to all signals except those used by the
8829 debugger. Silently skip those. */
8830 allsigs = 1;
8831 sigfirst = 0;
8832 siglast = nsigs - 1;
8833 }
8834 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
8835 {
8836 SET_SIGS (nsigs, sigs, signal_stop);
8837 SET_SIGS (nsigs, sigs, signal_print);
8838 }
8839 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
8840 {
8841 UNSET_SIGS (nsigs, sigs, signal_program);
8842 }
8843 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
8844 {
8845 SET_SIGS (nsigs, sigs, signal_print);
8846 }
8847 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
8848 {
8849 SET_SIGS (nsigs, sigs, signal_program);
8850 }
8851 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
8852 {
8853 UNSET_SIGS (nsigs, sigs, signal_stop);
8854 }
8855 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
8856 {
8857 SET_SIGS (nsigs, sigs, signal_program);
8858 }
8859 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
8860 {
8861 UNSET_SIGS (nsigs, sigs, signal_print);
8862 UNSET_SIGS (nsigs, sigs, signal_stop);
8863 }
8864 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
8865 {
8866 UNSET_SIGS (nsigs, sigs, signal_program);
8867 }
8868 else if (digits > 0)
8869 {
8870 /* It is numeric. The numeric signal refers to our own
8871 internal signal numbering from target.h, not to host/target
8872 signal number. This is a feature; users really should be
8873 using symbolic names anyway, and the common ones like
8874 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8875
8876 sigfirst = siglast = (int)
8877 gdb_signal_from_command (atoi (arg));
8878 if (arg[digits] == '-')
8879 {
8880 siglast = (int)
8881 gdb_signal_from_command (atoi (arg + digits + 1));
8882 }
8883 if (sigfirst > siglast)
8884 {
8885 /* Bet he didn't figure we'd think of this case... */
8886 std::swap (sigfirst, siglast);
8887 }
8888 }
8889 else
8890 {
8891 oursig = gdb_signal_from_name (arg);
8892 if (oursig != GDB_SIGNAL_UNKNOWN)
8893 {
8894 sigfirst = siglast = (int) oursig;
8895 }
8896 else
8897 {
8898 /* Not a number and not a recognized flag word => complain. */
8899 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
8900 }
8901 }
8902
8903 /* If any signal numbers or symbol names were found, set flags for
8904 which signals to apply actions to. */
8905
8906 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
8907 {
8908 switch ((enum gdb_signal) signum)
8909 {
8910 case GDB_SIGNAL_TRAP:
8911 case GDB_SIGNAL_INT:
8912 if (!allsigs && !sigs[signum])
8913 {
8914 if (query (_("%s is used by the debugger.\n\
8915 Are you sure you want to change it? "),
8916 gdb_signal_to_name ((enum gdb_signal) signum)))
8917 {
8918 sigs[signum] = 1;
8919 }
8920 else
8921 printf_unfiltered (_("Not confirmed, unchanged.\n"));
8922 }
8923 break;
8924 case GDB_SIGNAL_0:
8925 case GDB_SIGNAL_DEFAULT:
8926 case GDB_SIGNAL_UNKNOWN:
8927 /* Make sure that "all" doesn't print these. */
8928 break;
8929 default:
8930 sigs[signum] = 1;
8931 break;
8932 }
8933 }
8934 }
8935
8936 for (int signum = 0; signum < nsigs; signum++)
8937 if (sigs[signum])
8938 {
8939 signal_cache_update (-1);
8940 target_pass_signals (signal_pass);
8941 target_program_signals (signal_program);
8942
8943 if (from_tty)
8944 {
8945 /* Show the results. */
8946 sig_print_header ();
8947 for (; signum < nsigs; signum++)
8948 if (sigs[signum])
8949 sig_print_info ((enum gdb_signal) signum);
8950 }
8951
8952 break;
8953 }
8954 }
8955
8956 /* Complete the "handle" command. */
8957
8958 static void
8959 handle_completer (struct cmd_list_element *ignore,
8960 completion_tracker &tracker,
8961 const char *text, const char *word)
8962 {
8963 static const char * const keywords[] =
8964 {
8965 "all",
8966 "stop",
8967 "ignore",
8968 "print",
8969 "pass",
8970 "nostop",
8971 "noignore",
8972 "noprint",
8973 "nopass",
8974 NULL,
8975 };
8976
8977 signal_completer (ignore, tracker, text, word);
8978 complete_on_enum (tracker, keywords, word, word);
8979 }
8980
8981 enum gdb_signal
8982 gdb_signal_from_command (int num)
8983 {
8984 if (num >= 1 && num <= 15)
8985 return (enum gdb_signal) num;
8986 error (_("Only signals 1-15 are valid as numeric signals.\n\
8987 Use \"info signals\" for a list of symbolic signals."));
8988 }
8989
8990 /* Print current contents of the tables set by the handle command.
8991 It is possible we should just be printing signals actually used
8992 by the current target (but for things to work right when switching
8993 targets, all signals should be in the signal tables). */
8994
8995 static void
8996 info_signals_command (const char *signum_exp, int from_tty)
8997 {
8998 enum gdb_signal oursig;
8999
9000 sig_print_header ();
9001
9002 if (signum_exp)
9003 {
9004 /* First see if this is a symbol name. */
9005 oursig = gdb_signal_from_name (signum_exp);
9006 if (oursig == GDB_SIGNAL_UNKNOWN)
9007 {
9008 /* No, try numeric. */
9009 oursig =
9010 gdb_signal_from_command (parse_and_eval_long (signum_exp));
9011 }
9012 sig_print_info (oursig);
9013 return;
9014 }
9015
9016 printf_filtered ("\n");
9017 /* These ugly casts brought to you by the native VAX compiler. */
9018 for (oursig = GDB_SIGNAL_FIRST;
9019 (int) oursig < (int) GDB_SIGNAL_LAST;
9020 oursig = (enum gdb_signal) ((int) oursig + 1))
9021 {
9022 QUIT;
9023
9024 if (oursig != GDB_SIGNAL_UNKNOWN
9025 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
9026 sig_print_info (oursig);
9027 }
9028
9029 printf_filtered (_("\nUse the \"handle\" command "
9030 "to change these tables.\n"));
9031 }
9032
9033 /* The $_siginfo convenience variable is a bit special. We don't know
9034 for sure the type of the value until we actually have a chance to
9035 fetch the data. The type can change depending on gdbarch, so it is
9036 also dependent on which thread you have selected.
9037
9038 1. making $_siginfo be an internalvar that creates a new value on
9039 access.
9040
9041 2. making the value of $_siginfo be an lval_computed value. */
9042
9043 /* This function implements the lval_computed support for reading a
9044 $_siginfo value. */
9045
9046 static void
9047 siginfo_value_read (struct value *v)
9048 {
9049 LONGEST transferred;
9050
9051 /* If we can access registers, so can we access $_siginfo. Likewise
9052 vice versa. */
9053 validate_registers_access ();
9054
9055 transferred =
9056 target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO,
9057 NULL,
9058 value_contents_all_raw (v),
9059 value_offset (v),
9060 TYPE_LENGTH (value_type (v)));
9061
9062 if (transferred != TYPE_LENGTH (value_type (v)))
9063 error (_("Unable to read siginfo"));
9064 }
9065
9066 /* This function implements the lval_computed support for writing a
9067 $_siginfo value. */
9068
9069 static void
9070 siginfo_value_write (struct value *v, struct value *fromval)
9071 {
9072 LONGEST transferred;
9073
9074 /* If we can access registers, so can we access $_siginfo. Likewise
9075 vice versa. */
9076 validate_registers_access ();
9077
9078 transferred = target_write (current_top_target (),
9079 TARGET_OBJECT_SIGNAL_INFO,
9080 NULL,
9081 value_contents_all_raw (fromval),
9082 value_offset (v),
9083 TYPE_LENGTH (value_type (fromval)));
9084
9085 if (transferred != TYPE_LENGTH (value_type (fromval)))
9086 error (_("Unable to write siginfo"));
9087 }
9088
9089 static const struct lval_funcs siginfo_value_funcs =
9090 {
9091 siginfo_value_read,
9092 siginfo_value_write
9093 };
9094
9095 /* Return a new value with the correct type for the siginfo object of
9096 the current thread using architecture GDBARCH. Return a void value
9097 if there's no object available. */
9098
9099 static struct value *
9100 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
9101 void *ignore)
9102 {
9103 if (target_has_stack
9104 && inferior_ptid != null_ptid
9105 && gdbarch_get_siginfo_type_p (gdbarch))
9106 {
9107 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9108
9109 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
9110 }
9111
9112 return allocate_value (builtin_type (gdbarch)->builtin_void);
9113 }
9114
9115 \f
9116 /* infcall_suspend_state contains state about the program itself like its
9117 registers and any signal it received when it last stopped.
9118 This state must be restored regardless of how the inferior function call
9119 ends (either successfully, or after it hits a breakpoint or signal)
9120 if the program is to properly continue where it left off. */
9121
9122 class infcall_suspend_state
9123 {
9124 public:
9125 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
9126 once the inferior function call has finished. */
9127 infcall_suspend_state (struct gdbarch *gdbarch,
9128 const struct thread_info *tp,
9129 struct regcache *regcache)
9130 : m_thread_suspend (tp->suspend),
9131 m_registers (new readonly_detached_regcache (*regcache))
9132 {
9133 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
9134
9135 if (gdbarch_get_siginfo_type_p (gdbarch))
9136 {
9137 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9138 size_t len = TYPE_LENGTH (type);
9139
9140 siginfo_data.reset ((gdb_byte *) xmalloc (len));
9141
9142 if (target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
9143 siginfo_data.get (), 0, len) != len)
9144 {
9145 /* Errors ignored. */
9146 siginfo_data.reset (nullptr);
9147 }
9148 }
9149
9150 if (siginfo_data)
9151 {
9152 m_siginfo_gdbarch = gdbarch;
9153 m_siginfo_data = std::move (siginfo_data);
9154 }
9155 }
9156
9157 /* Return a pointer to the stored register state. */
9158
9159 readonly_detached_regcache *registers () const
9160 {
9161 return m_registers.get ();
9162 }
9163
9164 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
9165
9166 void restore (struct gdbarch *gdbarch,
9167 struct thread_info *tp,
9168 struct regcache *regcache) const
9169 {
9170 tp->suspend = m_thread_suspend;
9171
9172 if (m_siginfo_gdbarch == gdbarch)
9173 {
9174 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9175
9176 /* Errors ignored. */
9177 target_write (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
9178 m_siginfo_data.get (), 0, TYPE_LENGTH (type));
9179 }
9180
9181 /* The inferior can be gone if the user types "print exit(0)"
9182 (and perhaps other times). */
9183 if (target_has_execution)
9184 /* NB: The register write goes through to the target. */
9185 regcache->restore (registers ());
9186 }
9187
9188 private:
9189 /* How the current thread stopped before the inferior function call was
9190 executed. */
9191 struct thread_suspend_state m_thread_suspend;
9192
9193 /* The registers before the inferior function call was executed. */
9194 std::unique_ptr<readonly_detached_regcache> m_registers;
9195
9196 /* Format of SIGINFO_DATA or NULL if it is not present. */
9197 struct gdbarch *m_siginfo_gdbarch = nullptr;
9198
9199 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
9200 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
9201 content would be invalid. */
9202 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
9203 };
9204
9205 infcall_suspend_state_up
9206 save_infcall_suspend_state ()
9207 {
9208 struct thread_info *tp = inferior_thread ();
9209 struct regcache *regcache = get_current_regcache ();
9210 struct gdbarch *gdbarch = regcache->arch ();
9211
9212 infcall_suspend_state_up inf_state
9213 (new struct infcall_suspend_state (gdbarch, tp, regcache));
9214
9215 /* Having saved the current state, adjust the thread state, discarding
9216 any stop signal information. The stop signal is not useful when
9217 starting an inferior function call, and run_inferior_call will not use
9218 the signal due to its `proceed' call with GDB_SIGNAL_0. */
9219 tp->suspend.stop_signal = GDB_SIGNAL_0;
9220
9221 return inf_state;
9222 }
9223
9224 /* Restore inferior session state to INF_STATE. */
9225
9226 void
9227 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
9228 {
9229 struct thread_info *tp = inferior_thread ();
9230 struct regcache *regcache = get_current_regcache ();
9231 struct gdbarch *gdbarch = regcache->arch ();
9232
9233 inf_state->restore (gdbarch, tp, regcache);
9234 discard_infcall_suspend_state (inf_state);
9235 }
9236
9237 void
9238 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
9239 {
9240 delete inf_state;
9241 }
9242
9243 readonly_detached_regcache *
9244 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
9245 {
9246 return inf_state->registers ();
9247 }
9248
9249 /* infcall_control_state contains state regarding gdb's control of the
9250 inferior itself like stepping control. It also contains session state like
9251 the user's currently selected frame. */
9252
9253 struct infcall_control_state
9254 {
9255 struct thread_control_state thread_control;
9256 struct inferior_control_state inferior_control;
9257
9258 /* Other fields: */
9259 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
9260 int stopped_by_random_signal = 0;
9261
9262 /* ID if the selected frame when the inferior function call was made. */
9263 struct frame_id selected_frame_id {};
9264 };
9265
9266 /* Save all of the information associated with the inferior<==>gdb
9267 connection. */
9268
9269 infcall_control_state_up
9270 save_infcall_control_state ()
9271 {
9272 infcall_control_state_up inf_status (new struct infcall_control_state);
9273 struct thread_info *tp = inferior_thread ();
9274 struct inferior *inf = current_inferior ();
9275
9276 inf_status->thread_control = tp->control;
9277 inf_status->inferior_control = inf->control;
9278
9279 tp->control.step_resume_breakpoint = NULL;
9280 tp->control.exception_resume_breakpoint = NULL;
9281
9282 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9283 chain. If caller's caller is walking the chain, they'll be happier if we
9284 hand them back the original chain when restore_infcall_control_state is
9285 called. */
9286 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
9287
9288 /* Other fields: */
9289 inf_status->stop_stack_dummy = stop_stack_dummy;
9290 inf_status->stopped_by_random_signal = stopped_by_random_signal;
9291
9292 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
9293
9294 return inf_status;
9295 }
9296
9297 static void
9298 restore_selected_frame (const frame_id &fid)
9299 {
9300 frame_info *frame = frame_find_by_id (fid);
9301
9302 /* If inf_status->selected_frame_id is NULL, there was no previously
9303 selected frame. */
9304 if (frame == NULL)
9305 {
9306 warning (_("Unable to restore previously selected frame."));
9307 return;
9308 }
9309
9310 select_frame (frame);
9311 }
9312
9313 /* Restore inferior session state to INF_STATUS. */
9314
9315 void
9316 restore_infcall_control_state (struct infcall_control_state *inf_status)
9317 {
9318 struct thread_info *tp = inferior_thread ();
9319 struct inferior *inf = current_inferior ();
9320
9321 if (tp->control.step_resume_breakpoint)
9322 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
9323
9324 if (tp->control.exception_resume_breakpoint)
9325 tp->control.exception_resume_breakpoint->disposition
9326 = disp_del_at_next_stop;
9327
9328 /* Handle the bpstat_copy of the chain. */
9329 bpstat_clear (&tp->control.stop_bpstat);
9330
9331 tp->control = inf_status->thread_control;
9332 inf->control = inf_status->inferior_control;
9333
9334 /* Other fields: */
9335 stop_stack_dummy = inf_status->stop_stack_dummy;
9336 stopped_by_random_signal = inf_status->stopped_by_random_signal;
9337
9338 if (target_has_stack)
9339 {
9340 /* The point of the try/catch is that if the stack is clobbered,
9341 walking the stack might encounter a garbage pointer and
9342 error() trying to dereference it. */
9343 try
9344 {
9345 restore_selected_frame (inf_status->selected_frame_id);
9346 }
9347 catch (const gdb_exception_error &ex)
9348 {
9349 exception_fprintf (gdb_stderr, ex,
9350 "Unable to restore previously selected frame:\n");
9351 /* Error in restoring the selected frame. Select the
9352 innermost frame. */
9353 select_frame (get_current_frame ());
9354 }
9355 }
9356
9357 delete inf_status;
9358 }
9359
9360 void
9361 discard_infcall_control_state (struct infcall_control_state *inf_status)
9362 {
9363 if (inf_status->thread_control.step_resume_breakpoint)
9364 inf_status->thread_control.step_resume_breakpoint->disposition
9365 = disp_del_at_next_stop;
9366
9367 if (inf_status->thread_control.exception_resume_breakpoint)
9368 inf_status->thread_control.exception_resume_breakpoint->disposition
9369 = disp_del_at_next_stop;
9370
9371 /* See save_infcall_control_state for info on stop_bpstat. */
9372 bpstat_clear (&inf_status->thread_control.stop_bpstat);
9373
9374 delete inf_status;
9375 }
9376 \f
9377 /* See infrun.h. */
9378
9379 void
9380 clear_exit_convenience_vars (void)
9381 {
9382 clear_internalvar (lookup_internalvar ("_exitsignal"));
9383 clear_internalvar (lookup_internalvar ("_exitcode"));
9384 }
9385 \f
9386
9387 /* User interface for reverse debugging:
9388 Set exec-direction / show exec-direction commands
9389 (returns error unless target implements to_set_exec_direction method). */
9390
9391 enum exec_direction_kind execution_direction = EXEC_FORWARD;
9392 static const char exec_forward[] = "forward";
9393 static const char exec_reverse[] = "reverse";
9394 static const char *exec_direction = exec_forward;
9395 static const char *const exec_direction_names[] = {
9396 exec_forward,
9397 exec_reverse,
9398 NULL
9399 };
9400
9401 static void
9402 set_exec_direction_func (const char *args, int from_tty,
9403 struct cmd_list_element *cmd)
9404 {
9405 if (target_can_execute_reverse)
9406 {
9407 if (!strcmp (exec_direction, exec_forward))
9408 execution_direction = EXEC_FORWARD;
9409 else if (!strcmp (exec_direction, exec_reverse))
9410 execution_direction = EXEC_REVERSE;
9411 }
9412 else
9413 {
9414 exec_direction = exec_forward;
9415 error (_("Target does not support this operation."));
9416 }
9417 }
9418
9419 static void
9420 show_exec_direction_func (struct ui_file *out, int from_tty,
9421 struct cmd_list_element *cmd, const char *value)
9422 {
9423 switch (execution_direction) {
9424 case EXEC_FORWARD:
9425 fprintf_filtered (out, _("Forward.\n"));
9426 break;
9427 case EXEC_REVERSE:
9428 fprintf_filtered (out, _("Reverse.\n"));
9429 break;
9430 default:
9431 internal_error (__FILE__, __LINE__,
9432 _("bogus execution_direction value: %d"),
9433 (int) execution_direction);
9434 }
9435 }
9436
9437 static void
9438 show_schedule_multiple (struct ui_file *file, int from_tty,
9439 struct cmd_list_element *c, const char *value)
9440 {
9441 fprintf_filtered (file, _("Resuming the execution of threads "
9442 "of all processes is %s.\n"), value);
9443 }
9444
9445 /* Implementation of `siginfo' variable. */
9446
9447 static const struct internalvar_funcs siginfo_funcs =
9448 {
9449 siginfo_make_value,
9450 NULL,
9451 NULL
9452 };
9453
9454 /* Callback for infrun's target events source. This is marked when a
9455 thread has a pending status to process. */
9456
9457 static void
9458 infrun_async_inferior_event_handler (gdb_client_data data)
9459 {
9460 inferior_event_handler (INF_REG_EVENT);
9461 }
9462
9463 namespace selftests
9464 {
9465
9466 /* Verify that when two threads with the same ptid exist (from two different
9467 targets) and one of them changes ptid, we only update inferior_ptid if
9468 it is appropriate. */
9469
9470 static void
9471 infrun_thread_ptid_changed ()
9472 {
9473 gdbarch *arch = current_inferior ()->gdbarch;
9474
9475 /* The thread which inferior_ptid represents changes ptid. */
9476 {
9477 scoped_restore_current_pspace_and_thread restore;
9478
9479 scoped_mock_context<test_target_ops> target1 (arch);
9480 scoped_mock_context<test_target_ops> target2 (arch);
9481 target2.mock_inferior.next = &target1.mock_inferior;
9482
9483 ptid_t old_ptid (111, 222);
9484 ptid_t new_ptid (111, 333);
9485
9486 target1.mock_inferior.pid = old_ptid.pid ();
9487 target1.mock_thread.ptid = old_ptid;
9488 target2.mock_inferior.pid = old_ptid.pid ();
9489 target2.mock_thread.ptid = old_ptid;
9490
9491 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9492 set_current_inferior (&target1.mock_inferior);
9493
9494 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9495
9496 gdb_assert (inferior_ptid == new_ptid);
9497 }
9498
9499 /* A thread with the same ptid as inferior_ptid, but from another target,
9500 changes ptid. */
9501 {
9502 scoped_restore_current_pspace_and_thread restore;
9503
9504 scoped_mock_context<test_target_ops> target1 (arch);
9505 scoped_mock_context<test_target_ops> target2 (arch);
9506 target2.mock_inferior.next = &target1.mock_inferior;
9507
9508 ptid_t old_ptid (111, 222);
9509 ptid_t new_ptid (111, 333);
9510
9511 target1.mock_inferior.pid = old_ptid.pid ();
9512 target1.mock_thread.ptid = old_ptid;
9513 target2.mock_inferior.pid = old_ptid.pid ();
9514 target2.mock_thread.ptid = old_ptid;
9515
9516 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9517 set_current_inferior (&target2.mock_inferior);
9518
9519 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9520
9521 gdb_assert (inferior_ptid == old_ptid);
9522 }
9523 }
9524
9525 } /* namespace selftests */
9526
9527 void _initialize_infrun ();
9528 void
9529 _initialize_infrun ()
9530 {
9531 struct cmd_list_element *c;
9532
9533 /* Register extra event sources in the event loop. */
9534 infrun_async_inferior_event_token
9535 = create_async_event_handler (infrun_async_inferior_event_handler, NULL);
9536
9537 add_info ("signals", info_signals_command, _("\
9538 What debugger does when program gets various signals.\n\
9539 Specify a signal as argument to print info on that signal only."));
9540 add_info_alias ("handle", "signals", 0);
9541
9542 c = add_com ("handle", class_run, handle_command, _("\
9543 Specify how to handle signals.\n\
9544 Usage: handle SIGNAL [ACTIONS]\n\
9545 Args are signals and actions to apply to those signals.\n\
9546 If no actions are specified, the current settings for the specified signals\n\
9547 will be displayed instead.\n\
9548 \n\
9549 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9550 from 1-15 are allowed for compatibility with old versions of GDB.\n\
9551 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9552 The special arg \"all\" is recognized to mean all signals except those\n\
9553 used by the debugger, typically SIGTRAP and SIGINT.\n\
9554 \n\
9555 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
9556 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9557 Stop means reenter debugger if this signal happens (implies print).\n\
9558 Print means print a message if this signal happens.\n\
9559 Pass means let program see this signal; otherwise program doesn't know.\n\
9560 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
9561 Pass and Stop may be combined.\n\
9562 \n\
9563 Multiple signals may be specified. Signal numbers and signal names\n\
9564 may be interspersed with actions, with the actions being performed for\n\
9565 all signals cumulatively specified."));
9566 set_cmd_completer (c, handle_completer);
9567
9568 if (!dbx_commands)
9569 stop_command = add_cmd ("stop", class_obscure,
9570 not_just_help_class_command, _("\
9571 There is no `stop' command, but you can set a hook on `stop'.\n\
9572 This allows you to set a list of commands to be run each time execution\n\
9573 of the program stops."), &cmdlist);
9574
9575 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
9576 Set inferior debugging."), _("\
9577 Show inferior debugging."), _("\
9578 When non-zero, inferior specific debugging is enabled."),
9579 NULL,
9580 show_debug_infrun,
9581 &setdebuglist, &showdebuglist);
9582
9583 add_setshow_boolean_cmd ("displaced", class_maintenance,
9584 &debug_displaced, _("\
9585 Set displaced stepping debugging."), _("\
9586 Show displaced stepping debugging."), _("\
9587 When non-zero, displaced stepping specific debugging is enabled."),
9588 NULL,
9589 show_debug_displaced,
9590 &setdebuglist, &showdebuglist);
9591
9592 add_setshow_boolean_cmd ("non-stop", no_class,
9593 &non_stop_1, _("\
9594 Set whether gdb controls the inferior in non-stop mode."), _("\
9595 Show whether gdb controls the inferior in non-stop mode."), _("\
9596 When debugging a multi-threaded program and this setting is\n\
9597 off (the default, also called all-stop mode), when one thread stops\n\
9598 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9599 all other threads in the program while you interact with the thread of\n\
9600 interest. When you continue or step a thread, you can allow the other\n\
9601 threads to run, or have them remain stopped, but while you inspect any\n\
9602 thread's state, all threads stop.\n\
9603 \n\
9604 In non-stop mode, when one thread stops, other threads can continue\n\
9605 to run freely. You'll be able to step each thread independently,\n\
9606 leave it stopped or free to run as needed."),
9607 set_non_stop,
9608 show_non_stop,
9609 &setlist,
9610 &showlist);
9611
9612 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
9613 {
9614 signal_stop[i] = 1;
9615 signal_print[i] = 1;
9616 signal_program[i] = 1;
9617 signal_catch[i] = 0;
9618 }
9619
9620 /* Signals caused by debugger's own actions should not be given to
9621 the program afterwards.
9622
9623 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9624 explicitly specifies that it should be delivered to the target
9625 program. Typically, that would occur when a user is debugging a
9626 target monitor on a simulator: the target monitor sets a
9627 breakpoint; the simulator encounters this breakpoint and halts
9628 the simulation handing control to GDB; GDB, noting that the stop
9629 address doesn't map to any known breakpoint, returns control back
9630 to the simulator; the simulator then delivers the hardware
9631 equivalent of a GDB_SIGNAL_TRAP to the program being
9632 debugged. */
9633 signal_program[GDB_SIGNAL_TRAP] = 0;
9634 signal_program[GDB_SIGNAL_INT] = 0;
9635
9636 /* Signals that are not errors should not normally enter the debugger. */
9637 signal_stop[GDB_SIGNAL_ALRM] = 0;
9638 signal_print[GDB_SIGNAL_ALRM] = 0;
9639 signal_stop[GDB_SIGNAL_VTALRM] = 0;
9640 signal_print[GDB_SIGNAL_VTALRM] = 0;
9641 signal_stop[GDB_SIGNAL_PROF] = 0;
9642 signal_print[GDB_SIGNAL_PROF] = 0;
9643 signal_stop[GDB_SIGNAL_CHLD] = 0;
9644 signal_print[GDB_SIGNAL_CHLD] = 0;
9645 signal_stop[GDB_SIGNAL_IO] = 0;
9646 signal_print[GDB_SIGNAL_IO] = 0;
9647 signal_stop[GDB_SIGNAL_POLL] = 0;
9648 signal_print[GDB_SIGNAL_POLL] = 0;
9649 signal_stop[GDB_SIGNAL_URG] = 0;
9650 signal_print[GDB_SIGNAL_URG] = 0;
9651 signal_stop[GDB_SIGNAL_WINCH] = 0;
9652 signal_print[GDB_SIGNAL_WINCH] = 0;
9653 signal_stop[GDB_SIGNAL_PRIO] = 0;
9654 signal_print[GDB_SIGNAL_PRIO] = 0;
9655
9656 /* These signals are used internally by user-level thread
9657 implementations. (See signal(5) on Solaris.) Like the above
9658 signals, a healthy program receives and handles them as part of
9659 its normal operation. */
9660 signal_stop[GDB_SIGNAL_LWP] = 0;
9661 signal_print[GDB_SIGNAL_LWP] = 0;
9662 signal_stop[GDB_SIGNAL_WAITING] = 0;
9663 signal_print[GDB_SIGNAL_WAITING] = 0;
9664 signal_stop[GDB_SIGNAL_CANCEL] = 0;
9665 signal_print[GDB_SIGNAL_CANCEL] = 0;
9666 signal_stop[GDB_SIGNAL_LIBRT] = 0;
9667 signal_print[GDB_SIGNAL_LIBRT] = 0;
9668
9669 /* Update cached state. */
9670 signal_cache_update (-1);
9671
9672 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
9673 &stop_on_solib_events, _("\
9674 Set stopping for shared library events."), _("\
9675 Show stopping for shared library events."), _("\
9676 If nonzero, gdb will give control to the user when the dynamic linker\n\
9677 notifies gdb of shared library events. The most common event of interest\n\
9678 to the user would be loading/unloading of a new library."),
9679 set_stop_on_solib_events,
9680 show_stop_on_solib_events,
9681 &setlist, &showlist);
9682
9683 add_setshow_enum_cmd ("follow-fork-mode", class_run,
9684 follow_fork_mode_kind_names,
9685 &follow_fork_mode_string, _("\
9686 Set debugger response to a program call of fork or vfork."), _("\
9687 Show debugger response to a program call of fork or vfork."), _("\
9688 A fork or vfork creates a new process. follow-fork-mode can be:\n\
9689 parent - the original process is debugged after a fork\n\
9690 child - the new process is debugged after a fork\n\
9691 The unfollowed process will continue to run.\n\
9692 By default, the debugger will follow the parent process."),
9693 NULL,
9694 show_follow_fork_mode_string,
9695 &setlist, &showlist);
9696
9697 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9698 follow_exec_mode_names,
9699 &follow_exec_mode_string, _("\
9700 Set debugger response to a program call of exec."), _("\
9701 Show debugger response to a program call of exec."), _("\
9702 An exec call replaces the program image of a process.\n\
9703 \n\
9704 follow-exec-mode can be:\n\
9705 \n\
9706 new - the debugger creates a new inferior and rebinds the process\n\
9707 to this new inferior. The program the process was running before\n\
9708 the exec call can be restarted afterwards by restarting the original\n\
9709 inferior.\n\
9710 \n\
9711 same - the debugger keeps the process bound to the same inferior.\n\
9712 The new executable image replaces the previous executable loaded in\n\
9713 the inferior. Restarting the inferior after the exec call restarts\n\
9714 the executable the process was running after the exec call.\n\
9715 \n\
9716 By default, the debugger will use the same inferior."),
9717 NULL,
9718 show_follow_exec_mode_string,
9719 &setlist, &showlist);
9720
9721 add_setshow_enum_cmd ("scheduler-locking", class_run,
9722 scheduler_enums, &scheduler_mode, _("\
9723 Set mode for locking scheduler during execution."), _("\
9724 Show mode for locking scheduler during execution."), _("\
9725 off == no locking (threads may preempt at any time)\n\
9726 on == full locking (no thread except the current thread may run)\n\
9727 This applies to both normal execution and replay mode.\n\
9728 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
9729 In this mode, other threads may run during other commands.\n\
9730 This applies to both normal execution and replay mode.\n\
9731 replay == scheduler locked in replay mode and unlocked during normal execution."),
9732 set_schedlock_func, /* traps on target vector */
9733 show_scheduler_mode,
9734 &setlist, &showlist);
9735
9736 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9737 Set mode for resuming threads of all processes."), _("\
9738 Show mode for resuming threads of all processes."), _("\
9739 When on, execution commands (such as 'continue' or 'next') resume all\n\
9740 threads of all processes. When off (which is the default), execution\n\
9741 commands only resume the threads of the current process. The set of\n\
9742 threads that are resumed is further refined by the scheduler-locking\n\
9743 mode (see help set scheduler-locking)."),
9744 NULL,
9745 show_schedule_multiple,
9746 &setlist, &showlist);
9747
9748 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9749 Set mode of the step operation."), _("\
9750 Show mode of the step operation."), _("\
9751 When set, doing a step over a function without debug line information\n\
9752 will stop at the first instruction of that function. Otherwise, the\n\
9753 function is skipped and the step command stops at a different source line."),
9754 NULL,
9755 show_step_stop_if_no_debug,
9756 &setlist, &showlist);
9757
9758 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9759 &can_use_displaced_stepping, _("\
9760 Set debugger's willingness to use displaced stepping."), _("\
9761 Show debugger's willingness to use displaced stepping."), _("\
9762 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9763 supported by the target architecture. If off, gdb will not use displaced\n\
9764 stepping to step over breakpoints, even if such is supported by the target\n\
9765 architecture. If auto (which is the default), gdb will use displaced stepping\n\
9766 if the target architecture supports it and non-stop mode is active, but will not\n\
9767 use it in all-stop mode (see help set non-stop)."),
9768 NULL,
9769 show_can_use_displaced_stepping,
9770 &setlist, &showlist);
9771
9772 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9773 &exec_direction, _("Set direction of execution.\n\
9774 Options are 'forward' or 'reverse'."),
9775 _("Show direction of execution (forward/reverse)."),
9776 _("Tells gdb whether to execute forward or backward."),
9777 set_exec_direction_func, show_exec_direction_func,
9778 &setlist, &showlist);
9779
9780 /* Set/show detach-on-fork: user-settable mode. */
9781
9782 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9783 Set whether gdb will detach the child of a fork."), _("\
9784 Show whether gdb will detach the child of a fork."), _("\
9785 Tells gdb whether to detach the child of a fork."),
9786 NULL, NULL, &setlist, &showlist);
9787
9788 /* Set/show disable address space randomization mode. */
9789
9790 add_setshow_boolean_cmd ("disable-randomization", class_support,
9791 &disable_randomization, _("\
9792 Set disabling of debuggee's virtual address space randomization."), _("\
9793 Show disabling of debuggee's virtual address space randomization."), _("\
9794 When this mode is on (which is the default), randomization of the virtual\n\
9795 address space is disabled. Standalone programs run with the randomization\n\
9796 enabled by default on some platforms."),
9797 &set_disable_randomization,
9798 &show_disable_randomization,
9799 &setlist, &showlist);
9800
9801 /* ptid initializations */
9802 inferior_ptid = null_ptid;
9803 target_last_wait_ptid = minus_one_ptid;
9804
9805 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed);
9806 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested);
9807 gdb::observers::thread_exit.attach (infrun_thread_thread_exit);
9808 gdb::observers::inferior_exit.attach (infrun_inferior_exit);
9809
9810 /* Explicitly create without lookup, since that tries to create a
9811 value with a void typed value, and when we get here, gdbarch
9812 isn't initialized yet. At this point, we're quite sure there
9813 isn't another convenience variable of the same name. */
9814 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
9815
9816 add_setshow_boolean_cmd ("observer", no_class,
9817 &observer_mode_1, _("\
9818 Set whether gdb controls the inferior in observer mode."), _("\
9819 Show whether gdb controls the inferior in observer mode."), _("\
9820 In observer mode, GDB can get data from the inferior, but not\n\
9821 affect its execution. Registers and memory may not be changed,\n\
9822 breakpoints may not be set, and the program cannot be interrupted\n\
9823 or signalled."),
9824 set_observer_mode,
9825 show_observer_mode,
9826 &setlist,
9827 &showlist);
9828
9829 #if GDB_SELF_TEST
9830 selftests::register_test ("infrun_thread_ptid_changed",
9831 selftests::infrun_thread_ptid_changed);
9832 #endif
9833 }