]>
Commit | Line | Data |
---|---|---|
ca557f44 AC |
1 | /* Target-struct-independent code to start (run) and stop an inferior |
2 | process. | |
8926118c | 3 | |
d01e8234 | 4 | Copyright (C) 1986-2025 Free Software Foundation, Inc. |
c906108c | 5 | |
c5aa993b | 6 | This file is part of GDB. |
c906108c | 7 | |
c5aa993b JM |
8 | This program is free software; you can redistribute it and/or modify |
9 | it under the terms of the GNU General Public License as published by | |
a9762ec7 | 10 | the Free Software Foundation; either version 3 of the License, or |
c5aa993b | 11 | (at your option) any later version. |
c906108c | 12 | |
c5aa993b JM |
13 | This program is distributed in the hope that it will be useful, |
14 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | GNU General Public License for more details. | |
c906108c | 17 | |
c5aa993b | 18 | You should have received a copy of the GNU General Public License |
a9762ec7 | 19 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ |
c906108c | 20 | |
eb97e684 | 21 | #include "cli/cli-cmds.h" |
9e69a2e1 | 22 | #include "cli/cli-style.h" |
bab37966 | 23 | #include "displaced-stepping.h" |
45741a9c | 24 | #include "infrun.h" |
c906108c | 25 | #include <ctype.h> |
05d9d66d | 26 | #include "exceptions.h" |
c906108c SS |
27 | #include "symtab.h" |
28 | #include "frame.h" | |
29 | #include "inferior.h" | |
30 | #include "breakpoint.h" | |
c906108c | 31 | #include "gdbcore.h" |
c906108c | 32 | #include "target.h" |
2f4fcf00 | 33 | #include "target-connection.h" |
c906108c SS |
34 | #include "gdbthread.h" |
35 | #include "annotate.h" | |
1adeb98a | 36 | #include "symfile.h" |
7a292a7a | 37 | #include "top.h" |
13d03262 | 38 | #include "ui.h" |
2acceee2 | 39 | #include "inf-loop.h" |
4e052eda | 40 | #include "regcache.h" |
fd0407d6 | 41 | #include "value.h" |
76727919 | 42 | #include "observable.h" |
f636b87d | 43 | #include "language.h" |
a77053c2 | 44 | #include "solib.h" |
f17517ea | 45 | #include "main.h" |
186c406b | 46 | #include "block.h" |
034dad6f | 47 | #include "mi/mi-common.h" |
4f8d22e3 | 48 | #include "event-top.h" |
96429cc8 | 49 | #include "record.h" |
d02ed0bb | 50 | #include "record-full.h" |
edb3359d | 51 | #include "inline-frame.h" |
4efc6507 | 52 | #include "jit.h" |
06cd862c | 53 | #include "tracepoint.h" |
1bfeeb0f | 54 | #include "skip.h" |
28106bc2 SDJ |
55 | #include "probe.h" |
56 | #include "objfiles.h" | |
de0bea00 | 57 | #include "completer.h" |
9107fc8d | 58 | #include "target-descriptions.h" |
f15cb84a | 59 | #include "target-dcache.h" |
d83ad864 | 60 | #include "terminal.h" |
400b5eca | 61 | #include "gdbsupport/event-loop.h" |
243a9253 | 62 | #include "thread-fsm.h" |
268a13a5 | 63 | #include "gdbsupport/enum-flags.h" |
5ed8105e | 64 | #include "progspace-and-thread.h" |
6b09f134 | 65 | #include <optional> |
46a62268 | 66 | #include "arch-utils.h" |
268a13a5 TT |
67 | #include "gdbsupport/scope-exit.h" |
68 | #include "gdbsupport/forward-scope-exit.h" | |
06cc9596 | 69 | #include "gdbsupport/gdb_select.h" |
5b6d1e4f | 70 | #include <unordered_map> |
93b54c8e | 71 | #include "async-event.h" |
b161a60d SM |
72 | #include "gdbsupport/selftest.h" |
73 | #include "scoped-mock-context.h" | |
74 | #include "test-target.h" | |
ba988419 | 75 | #include "gdbsupport/common-debug.h" |
7904e961 | 76 | #include "gdbsupport/buildargv.h" |
141cd158 | 77 | #include "extension.h" |
6d84a385 | 78 | #include "disasm.h" |
3f75a984 | 79 | #include "interps.h" |
c906108c SS |
80 | |
81 | /* Prototypes for local functions */ | |
82 | ||
2ea28649 | 83 | static void sig_print_info (enum gdb_signal); |
c906108c | 84 | |
96baa820 | 85 | static void sig_print_header (void); |
c906108c | 86 | |
d83ad864 DB |
87 | static void follow_inferior_reset_breakpoints (void); |
88 | ||
c4464ade | 89 | static bool currently_stepping (struct thread_info *tp); |
a289b8f6 | 90 | |
8480a37e | 91 | static void insert_hp_step_resume_breakpoint_at_frame (const frame_info_ptr &); |
2484c66b | 92 | |
8480a37e | 93 | static void insert_step_resume_breakpoint_at_caller (const frame_info_ptr &); |
2484c66b | 94 | |
2484c66b UW |
95 | static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR); |
96 | ||
22b11ba9 | 97 | static bool maybe_software_singlestep (struct gdbarch *gdbarch); |
8550d3b3 | 98 | |
aff4e175 AB |
99 | static void resume (gdb_signal sig); |
100 | ||
5b6d1e4f PA |
101 | static void wait_for_inferior (inferior *inf); |
102 | ||
d8bbae6e SM |
103 | static void restart_threads (struct thread_info *event_thread, |
104 | inferior *inf = nullptr); | |
105 | ||
106 | static bool start_step_over (void); | |
107 | ||
2b718529 LS |
108 | static bool step_over_info_valid_p (void); |
109 | ||
7ac958f2 PA |
110 | static bool schedlock_applies (struct thread_info *tp); |
111 | ||
372316f1 PA |
112 | /* Asynchronous signal handler registered as event loop source for |
113 | when we have pending events ready to be passed to the core. */ | |
114 | static struct async_event_handler *infrun_async_inferior_event_token; | |
115 | ||
116 | /* Stores whether infrun_async was previously enabled or disabled. | |
117 | Starts off as -1, indicating "never enabled/disabled". */ | |
118 | static int infrun_is_async = -1; | |
fe6356de CL |
119 | static CORE_ADDR update_line_range_start (CORE_ADDR pc, |
120 | struct execution_control_state *ecs); | |
372316f1 PA |
121 | |
122 | /* See infrun.h. */ | |
123 | ||
124 | void | |
125 | infrun_async (int enable) | |
126 | { | |
127 | if (infrun_is_async != enable) | |
128 | { | |
129 | infrun_is_async = enable; | |
130 | ||
1eb8556f | 131 | infrun_debug_printf ("enable=%d", enable); |
372316f1 PA |
132 | |
133 | if (enable) | |
134 | mark_async_event_handler (infrun_async_inferior_event_token); | |
135 | else | |
136 | clear_async_event_handler (infrun_async_inferior_event_token); | |
137 | } | |
138 | } | |
139 | ||
0b333c5e PA |
140 | /* See infrun.h. */ |
141 | ||
142 | void | |
143 | mark_infrun_async_event_handler (void) | |
144 | { | |
145 | mark_async_event_handler (infrun_async_inferior_event_token); | |
146 | } | |
147 | ||
5fbbeb29 CF |
148 | /* When set, stop the 'step' command if we enter a function which has |
149 | no line number information. The normal behavior is that we step | |
150 | over such function. */ | |
491144b5 | 151 | bool step_stop_if_no_debug = false; |
920d2a44 AC |
152 | static void |
153 | show_step_stop_if_no_debug (struct ui_file *file, int from_tty, | |
154 | struct cmd_list_element *c, const char *value) | |
155 | { | |
6cb06a8c | 156 | gdb_printf (file, _("Mode of the step operation is %s.\n"), value); |
920d2a44 | 157 | } |
5fbbeb29 | 158 | |
b9f437de | 159 | /* proceed and normal_stop use this to notify the user when the |
6bf09ec0 PA |
160 | inferior stopped in a different thread than it had been running in. |
161 | It can also be used to find for which thread normal_stop last | |
162 | reported a stop. */ | |
a81871f7 | 163 | static thread_info_ref previous_thread; |
96baa820 | 164 | |
a81871f7 PA |
165 | /* See infrun.h. */ |
166 | ||
167 | void | |
168 | update_previous_thread () | |
169 | { | |
170 | if (inferior_ptid == null_ptid) | |
171 | previous_thread = nullptr; | |
172 | else | |
173 | previous_thread = thread_info_ref::new_reference (inferior_thread ()); | |
174 | } | |
7a292a7a | 175 | |
6bf09ec0 PA |
176 | /* See infrun.h. */ |
177 | ||
178 | thread_info * | |
179 | get_previous_thread () | |
180 | { | |
181 | return previous_thread.get (); | |
182 | } | |
183 | ||
07107ca6 LM |
184 | /* If set (default for legacy reasons), when following a fork, GDB |
185 | will detach from one of the fork branches, child or parent. | |
186 | Exactly which branch is detached depends on 'set follow-fork-mode' | |
187 | setting. */ | |
188 | ||
491144b5 | 189 | static bool detach_fork = true; |
6c95b8df | 190 | |
94ba44a6 | 191 | bool debug_infrun = false; |
920d2a44 AC |
192 | static void |
193 | show_debug_infrun (struct ui_file *file, int from_tty, | |
194 | struct cmd_list_element *c, const char *value) | |
195 | { | |
6cb06a8c | 196 | gdb_printf (file, _("Inferior debugging is %s.\n"), value); |
920d2a44 | 197 | } |
527159b7 | 198 | |
03583c20 UW |
199 | /* Support for disabling address space randomization. */ |
200 | ||
491144b5 | 201 | bool disable_randomization = true; |
03583c20 UW |
202 | |
203 | static void | |
204 | show_disable_randomization (struct ui_file *file, int from_tty, | |
205 | struct cmd_list_element *c, const char *value) | |
206 | { | |
207 | if (target_supports_disable_randomization ()) | |
6cb06a8c TT |
208 | gdb_printf (file, |
209 | _("Disabling randomization of debuggee's " | |
210 | "virtual address space is %s.\n"), | |
211 | value); | |
03583c20 | 212 | else |
0426ad51 TT |
213 | gdb_puts (_("Disabling randomization of debuggee's " |
214 | "virtual address space is unsupported on\n" | |
215 | "this platform.\n"), file); | |
03583c20 UW |
216 | } |
217 | ||
218 | static void | |
eb4c3f4a | 219 | set_disable_randomization (const char *args, int from_tty, |
03583c20 UW |
220 | struct cmd_list_element *c) |
221 | { | |
222 | if (!target_supports_disable_randomization ()) | |
223 | error (_("Disabling randomization of debuggee's " | |
224 | "virtual address space is unsupported on\n" | |
225 | "this platform.")); | |
226 | } | |
227 | ||
d32dc48e PA |
228 | /* User interface for non-stop mode. */ |
229 | ||
491144b5 CB |
230 | bool non_stop = false; |
231 | static bool non_stop_1 = false; | |
d32dc48e PA |
232 | |
233 | static void | |
eb4c3f4a | 234 | set_non_stop (const char *args, int from_tty, |
d32dc48e PA |
235 | struct cmd_list_element *c) |
236 | { | |
55f6301a | 237 | if (target_has_execution ()) |
d32dc48e PA |
238 | { |
239 | non_stop_1 = non_stop; | |
240 | error (_("Cannot change this setting while the inferior is running.")); | |
241 | } | |
242 | ||
243 | non_stop = non_stop_1; | |
244 | } | |
245 | ||
246 | static void | |
247 | show_non_stop (struct ui_file *file, int from_tty, | |
248 | struct cmd_list_element *c, const char *value) | |
249 | { | |
6cb06a8c TT |
250 | gdb_printf (file, |
251 | _("Controlling the inferior in non-stop mode is %s.\n"), | |
252 | value); | |
d32dc48e PA |
253 | } |
254 | ||
d914c394 SS |
255 | /* "Observer mode" is somewhat like a more extreme version of |
256 | non-stop, in which all GDB operations that might affect the | |
257 | target's execution have been disabled. */ | |
258 | ||
6bd434d6 | 259 | static bool observer_mode = false; |
491144b5 | 260 | static bool observer_mode_1 = false; |
d914c394 SS |
261 | |
262 | static void | |
eb4c3f4a | 263 | set_observer_mode (const char *args, int from_tty, |
d914c394 SS |
264 | struct cmd_list_element *c) |
265 | { | |
55f6301a | 266 | if (target_has_execution ()) |
d914c394 SS |
267 | { |
268 | observer_mode_1 = observer_mode; | |
269 | error (_("Cannot change this setting while the inferior is running.")); | |
270 | } | |
271 | ||
272 | observer_mode = observer_mode_1; | |
273 | ||
274 | may_write_registers = !observer_mode; | |
275 | may_write_memory = !observer_mode; | |
276 | may_insert_breakpoints = !observer_mode; | |
277 | may_insert_tracepoints = !observer_mode; | |
278 | /* We can insert fast tracepoints in or out of observer mode, | |
279 | but enable them if we're going into this mode. */ | |
280 | if (observer_mode) | |
491144b5 | 281 | may_insert_fast_tracepoints = true; |
d914c394 SS |
282 | may_stop = !observer_mode; |
283 | update_target_permissions (); | |
284 | ||
285 | /* Going *into* observer mode we must force non-stop, then | |
286 | going out we leave it that way. */ | |
287 | if (observer_mode) | |
288 | { | |
2f6831b8 | 289 | pagination_enabled = false; |
491144b5 | 290 | non_stop = non_stop_1 = true; |
d914c394 SS |
291 | } |
292 | ||
293 | if (from_tty) | |
6cb06a8c TT |
294 | gdb_printf (_("Observer mode is now %s.\n"), |
295 | (observer_mode ? "on" : "off")); | |
d914c394 SS |
296 | } |
297 | ||
298 | static void | |
299 | show_observer_mode (struct ui_file *file, int from_tty, | |
300 | struct cmd_list_element *c, const char *value) | |
301 | { | |
6cb06a8c | 302 | gdb_printf (file, _("Observer mode is %s.\n"), value); |
d914c394 SS |
303 | } |
304 | ||
305 | /* This updates the value of observer mode based on changes in | |
306 | permissions. Note that we are deliberately ignoring the values of | |
307 | may-write-registers and may-write-memory, since the user may have | |
308 | reason to enable these during a session, for instance to turn on a | |
309 | debugging-related global. */ | |
310 | ||
311 | void | |
312 | update_observer_mode (void) | |
313 | { | |
491144b5 CB |
314 | bool newval = (!may_insert_breakpoints |
315 | && !may_insert_tracepoints | |
316 | && may_insert_fast_tracepoints | |
317 | && !may_stop | |
318 | && non_stop); | |
d914c394 SS |
319 | |
320 | /* Let the user know if things change. */ | |
321 | if (newval != observer_mode) | |
6cb06a8c TT |
322 | gdb_printf (_("Observer mode is now %s.\n"), |
323 | (newval ? "on" : "off")); | |
d914c394 SS |
324 | |
325 | observer_mode = observer_mode_1 = newval; | |
326 | } | |
c2c6d25f | 327 | |
c906108c SS |
328 | /* Tables of how to react to signals; the user sets them. */ |
329 | ||
adc6a863 PA |
330 | static unsigned char signal_stop[GDB_SIGNAL_LAST]; |
331 | static unsigned char signal_print[GDB_SIGNAL_LAST]; | |
332 | static unsigned char signal_program[GDB_SIGNAL_LAST]; | |
c906108c | 333 | |
ab04a2af TT |
334 | /* Table of signals that are registered with "catch signal". A |
335 | non-zero entry indicates that the signal is caught by some "catch | |
adc6a863 PA |
336 | signal" command. */ |
337 | static unsigned char signal_catch[GDB_SIGNAL_LAST]; | |
ab04a2af | 338 | |
2455069d UW |
339 | /* Table of signals that the target may silently handle. |
340 | This is automatically determined from the flags above, | |
341 | and simply cached here. */ | |
adc6a863 | 342 | static unsigned char signal_pass[GDB_SIGNAL_LAST]; |
2455069d | 343 | |
c906108c SS |
344 | #define SET_SIGS(nsigs,sigs,flags) \ |
345 | do { \ | |
346 | int signum = (nsigs); \ | |
347 | while (signum-- > 0) \ | |
348 | if ((sigs)[signum]) \ | |
349 | (flags)[signum] = 1; \ | |
350 | } while (0) | |
351 | ||
352 | #define UNSET_SIGS(nsigs,sigs,flags) \ | |
353 | do { \ | |
354 | int signum = (nsigs); \ | |
355 | while (signum-- > 0) \ | |
356 | if ((sigs)[signum]) \ | |
357 | (flags)[signum] = 0; \ | |
358 | } while (0) | |
359 | ||
9b224c5e PA |
360 | /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of |
361 | this function is to avoid exporting `signal_program'. */ | |
362 | ||
363 | void | |
364 | update_signals_program_target (void) | |
365 | { | |
adc6a863 | 366 | target_program_signals (signal_program); |
9b224c5e PA |
367 | } |
368 | ||
1777feb0 | 369 | /* Value to pass to target_resume() to cause all threads to resume. */ |
39f77062 | 370 | |
edb3359d | 371 | #define RESUME_ALL minus_one_ptid |
c906108c SS |
372 | |
373 | /* Command list pointer for the "stop" placeholder. */ | |
374 | ||
375 | static struct cmd_list_element *stop_command; | |
376 | ||
c906108c SS |
377 | /* Nonzero if we want to give control to the user when we're notified |
378 | of shared library events by the dynamic linker. */ | |
628fe4e4 | 379 | int stop_on_solib_events; |
f9e14852 GB |
380 | |
381 | /* Enable or disable optional shared library event breakpoints | |
382 | as appropriate when the above flag is changed. */ | |
383 | ||
384 | static void | |
eb4c3f4a TT |
385 | set_stop_on_solib_events (const char *args, |
386 | int from_tty, struct cmd_list_element *c) | |
f9e14852 GB |
387 | { |
388 | update_solib_breakpoints (); | |
389 | } | |
390 | ||
920d2a44 AC |
391 | static void |
392 | show_stop_on_solib_events (struct ui_file *file, int from_tty, | |
393 | struct cmd_list_element *c, const char *value) | |
394 | { | |
6cb06a8c TT |
395 | gdb_printf (file, _("Stopping for shared library events is %s.\n"), |
396 | value); | |
920d2a44 | 397 | } |
c906108c | 398 | |
c4464ade | 399 | /* True after stop if current stack frame should be printed. */ |
c906108c | 400 | |
c4464ade | 401 | static bool stop_print_frame; |
c906108c | 402 | |
5b6d1e4f | 403 | /* This is a cached copy of the target/ptid/waitstatus of the last |
fb85cece | 404 | event returned by target_wait(). |
5b6d1e4f PA |
405 | This information is returned by get_last_target_status(). */ |
406 | static process_stratum_target *target_last_proc_target; | |
39f77062 | 407 | static ptid_t target_last_wait_ptid; |
e02bc4cc DS |
408 | static struct target_waitstatus target_last_waitstatus; |
409 | ||
4e1c45ea | 410 | void init_thread_stepping_state (struct thread_info *tss); |
0d1e5fa7 | 411 | |
53904c9e AC |
412 | static const char follow_fork_mode_child[] = "child"; |
413 | static const char follow_fork_mode_parent[] = "parent"; | |
414 | ||
40478521 | 415 | static const char *const follow_fork_mode_kind_names[] = { |
53904c9e AC |
416 | follow_fork_mode_child, |
417 | follow_fork_mode_parent, | |
03acd4d8 | 418 | nullptr |
ef346e04 | 419 | }; |
c906108c | 420 | |
53904c9e | 421 | static const char *follow_fork_mode_string = follow_fork_mode_parent; |
920d2a44 AC |
422 | static void |
423 | show_follow_fork_mode_string (struct ui_file *file, int from_tty, | |
424 | struct cmd_list_element *c, const char *value) | |
425 | { | |
6cb06a8c TT |
426 | gdb_printf (file, |
427 | _("Debugger response to a program " | |
428 | "call of fork or vfork is \"%s\".\n"), | |
429 | value); | |
920d2a44 | 430 | } |
c906108c SS |
431 | \f |
432 | ||
d83ad864 DB |
433 | /* Handle changes to the inferior list based on the type of fork, |
434 | which process is being followed, and whether the other process | |
435 | should be detached. On entry inferior_ptid must be the ptid of | |
436 | the fork parent. At return inferior_ptid is the ptid of the | |
437 | followed inferior. */ | |
438 | ||
5ab2fbf1 SM |
439 | static bool |
440 | follow_fork_inferior (bool follow_child, bool detach_fork) | |
d83ad864 | 441 | { |
b26b06dd AB |
442 | INFRUN_SCOPED_DEBUG_ENTER_EXIT; |
443 | ||
444 | infrun_debug_printf ("follow_child = %d, detach_fork = %d", | |
445 | follow_child, detach_fork); | |
446 | ||
183be222 | 447 | target_waitkind fork_kind = inferior_thread ()->pending_follow.kind (); |
3a849a34 SM |
448 | gdb_assert (fork_kind == TARGET_WAITKIND_FORKED |
449 | || fork_kind == TARGET_WAITKIND_VFORKED); | |
450 | bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED; | |
451 | ptid_t parent_ptid = inferior_ptid; | |
183be222 | 452 | ptid_t child_ptid = inferior_thread ()->pending_follow.child_ptid (); |
d83ad864 DB |
453 | |
454 | if (has_vforked | |
455 | && !non_stop /* Non-stop always resumes both branches. */ | |
3b12939d | 456 | && current_ui->prompt_state == PROMPT_BLOCKED |
d83ad864 DB |
457 | && !(follow_child || detach_fork || sched_multi)) |
458 | { | |
459 | /* The parent stays blocked inside the vfork syscall until the | |
460 | child execs or exits. If we don't let the child run, then | |
461 | the parent stays blocked. If we're telling the parent to run | |
462 | in the foreground, the user will not be able to ctrl-c to get | |
463 | back the terminal, effectively hanging the debug session. */ | |
6cb06a8c | 464 | gdb_printf (gdb_stderr, _("\ |
d83ad864 | 465 | Can not resume the parent process over vfork in the foreground while\n\ |
9e69a2e1 TT |
466 | holding the child stopped. Try \"set %ps\" or \"%ps\".\n"), |
467 | styled_string (command_style.style (), "set detach-on-fork"), | |
468 | styled_string (command_style.style (), | |
469 | "set schedule-multiple")); | |
e97007b6 | 470 | return true; |
d83ad864 DB |
471 | } |
472 | ||
82d1f134 SM |
473 | inferior *parent_inf = current_inferior (); |
474 | inferior *child_inf = nullptr; | |
3cb6bc13 | 475 | bool child_has_new_pspace = false; |
ff770835 | 476 | |
d8bbae6e SM |
477 | gdb_assert (parent_inf->thread_waiting_for_vfork_done == nullptr); |
478 | ||
d83ad864 DB |
479 | if (!follow_child) |
480 | { | |
481 | /* Detach new forked process? */ | |
482 | if (detach_fork) | |
483 | { | |
d83ad864 DB |
484 | /* Before detaching from the child, remove all breakpoints |
485 | from it. If we forked, then this has already been taken | |
486 | care of by infrun.c. If we vforked however, any | |
487 | breakpoint inserted in the parent is visible in the | |
488 | child, even those added while stopped in a vfork | |
489 | catchpoint. This will remove the breakpoints from the | |
490 | parent also, but they'll be reinserted below. */ | |
491 | if (has_vforked) | |
492 | { | |
493 | /* Keep breakpoints list in sync. */ | |
00431a78 | 494 | remove_breakpoints_inf (current_inferior ()); |
d83ad864 DB |
495 | } |
496 | ||
f67c0c91 | 497 | if (print_inferior_events) |
d83ad864 | 498 | { |
8dd06f7a | 499 | /* Ensure that we have a process ptid. */ |
e99b03dc | 500 | ptid_t process_ptid = ptid_t (child_ptid.pid ()); |
8dd06f7a | 501 | |
223ffa71 | 502 | target_terminal::ours_for_output (); |
6cb06a8c TT |
503 | gdb_printf (_("[Detaching after %s from child %s]\n"), |
504 | has_vforked ? "vfork" : "fork", | |
505 | target_pid_to_str (process_ptid).c_str ()); | |
d83ad864 DB |
506 | } |
507 | } | |
508 | else | |
509 | { | |
d83ad864 | 510 | /* Add process to GDB's tables. */ |
e99b03dc | 511 | child_inf = add_inferior (child_ptid.pid ()); |
d83ad864 | 512 | |
d83ad864 DB |
513 | child_inf->attach_flag = parent_inf->attach_flag; |
514 | copy_terminal_info (child_inf, parent_inf); | |
27b1f19f | 515 | child_inf->set_arch (parent_inf->arch ()); |
57768366 | 516 | child_inf->tdesc_info = parent_inf->tdesc_info; |
d83ad864 | 517 | |
d83ad864 DB |
518 | child_inf->symfile_flags = SYMFILE_NO_READ; |
519 | ||
520 | /* If this is a vfork child, then the address-space is | |
521 | shared with the parent. */ | |
522 | if (has_vforked) | |
523 | { | |
524 | child_inf->pspace = parent_inf->pspace; | |
525 | child_inf->aspace = parent_inf->aspace; | |
526 | ||
82d1f134 | 527 | exec_on_vfork (child_inf); |
5b6d1e4f | 528 | |
d83ad864 DB |
529 | /* The parent will be frozen until the child is done |
530 | with the shared region. Keep track of the | |
531 | parent. */ | |
532 | child_inf->vfork_parent = parent_inf; | |
30220b46 | 533 | child_inf->pending_detach = false; |
d83ad864 | 534 | parent_inf->vfork_child = child_inf; |
30220b46 | 535 | parent_inf->pending_detach = false; |
d83ad864 DB |
536 | } |
537 | else | |
538 | { | |
f9582a22 | 539 | child_inf->pspace = new program_space (new_address_space ()); |
3cb6bc13 | 540 | child_has_new_pspace = true; |
f9582a22 | 541 | child_inf->aspace = child_inf->pspace->aspace; |
30220b46 | 542 | child_inf->removable = true; |
d83ad864 | 543 | clone_program_space (child_inf->pspace, parent_inf->pspace); |
d83ad864 | 544 | } |
d83ad864 DB |
545 | } |
546 | ||
547 | if (has_vforked) | |
548 | { | |
d83ad864 DB |
549 | /* If we detached from the child, then we have to be careful |
550 | to not insert breakpoints in the parent until the child | |
551 | is done with the shared memory region. However, if we're | |
552 | staying attached to the child, then we can and should | |
553 | insert breakpoints, so that we can debug it. A | |
554 | subsequent child exec or exit is enough to know when does | |
555 | the child stops using the parent's address space. */ | |
6f5d514f SM |
556 | parent_inf->thread_waiting_for_vfork_done |
557 | = detach_fork ? inferior_thread () : nullptr; | |
d83ad864 | 558 | parent_inf->pspace->breakpoints_not_allowed = detach_fork; |
b26b06dd AB |
559 | |
560 | infrun_debug_printf | |
561 | ("parent_inf->thread_waiting_for_vfork_done == %s", | |
562 | (parent_inf->thread_waiting_for_vfork_done == nullptr | |
563 | ? "nullptr" | |
564 | : (parent_inf->thread_waiting_for_vfork_done | |
565 | ->ptid.to_string ().c_str ()))); | |
d83ad864 DB |
566 | } |
567 | } | |
568 | else | |
569 | { | |
570 | /* Follow the child. */ | |
d83ad864 | 571 | |
f67c0c91 | 572 | if (print_inferior_events) |
d83ad864 | 573 | { |
f67c0c91 SDJ |
574 | std::string parent_pid = target_pid_to_str (parent_ptid); |
575 | std::string child_pid = target_pid_to_str (child_ptid); | |
576 | ||
223ffa71 | 577 | target_terminal::ours_for_output (); |
6cb06a8c TT |
578 | gdb_printf (_("[Attaching after %s %s to child %s]\n"), |
579 | parent_pid.c_str (), | |
580 | has_vforked ? "vfork" : "fork", | |
581 | child_pid.c_str ()); | |
d83ad864 DB |
582 | } |
583 | ||
584 | /* Add the new inferior first, so that the target_detach below | |
585 | doesn't unpush the target. */ | |
586 | ||
e99b03dc | 587 | child_inf = add_inferior (child_ptid.pid ()); |
d83ad864 | 588 | |
d83ad864 DB |
589 | child_inf->attach_flag = parent_inf->attach_flag; |
590 | copy_terminal_info (child_inf, parent_inf); | |
27b1f19f | 591 | child_inf->set_arch (parent_inf->arch ()); |
57768366 | 592 | child_inf->tdesc_info = parent_inf->tdesc_info; |
d83ad864 | 593 | |
da474da1 | 594 | if (has_vforked) |
d83ad864 | 595 | { |
da474da1 SM |
596 | /* If this is a vfork child, then the address-space is shared |
597 | with the parent. */ | |
598 | child_inf->aspace = parent_inf->aspace; | |
599 | child_inf->pspace = parent_inf->pspace; | |
5b6d1e4f | 600 | |
82d1f134 | 601 | exec_on_vfork (child_inf); |
d83ad864 | 602 | } |
da474da1 SM |
603 | else if (detach_fork) |
604 | { | |
605 | /* We follow the child and detach from the parent: move the parent's | |
606 | program space to the child. This simplifies some things, like | |
607 | doing "next" over fork() and landing on the expected line in the | |
608 | child (note, that is broken with "set detach-on-fork off"). | |
609 | ||
610 | Before assigning brand new spaces for the parent, remove | |
611 | breakpoints from it: because the new pspace won't match | |
612 | currently inserted locations, the normal detach procedure | |
613 | wouldn't remove them, and we would leave them inserted when | |
614 | detaching. */ | |
615 | remove_breakpoints_inf (parent_inf); | |
616 | ||
617 | child_inf->aspace = parent_inf->aspace; | |
618 | child_inf->pspace = parent_inf->pspace; | |
f9582a22 TV |
619 | parent_inf->pspace = new program_space (new_address_space ()); |
620 | parent_inf->aspace = parent_inf->pspace->aspace; | |
da474da1 SM |
621 | clone_program_space (parent_inf->pspace, child_inf->pspace); |
622 | ||
623 | /* The parent inferior is still the current one, so keep things | |
624 | in sync. */ | |
625 | set_current_program_space (parent_inf->pspace); | |
626 | } | |
d83ad864 DB |
627 | else |
628 | { | |
f9582a22 | 629 | child_inf->pspace = new program_space (new_address_space ()); |
3cb6bc13 | 630 | child_has_new_pspace = true; |
f9582a22 | 631 | child_inf->aspace = child_inf->pspace->aspace; |
30220b46 | 632 | child_inf->removable = true; |
d83ad864 | 633 | child_inf->symfile_flags = SYMFILE_NO_READ; |
da474da1 | 634 | clone_program_space (child_inf->pspace, parent_inf->pspace); |
d83ad864 DB |
635 | } |
636 | } | |
637 | ||
82d1f134 SM |
638 | gdb_assert (current_inferior () == parent_inf); |
639 | ||
640 | /* If we are setting up an inferior for the child, target_follow_fork is | |
641 | responsible for pushing the appropriate targets on the new inferior's | |
642 | target stack and adding the initial thread (with ptid CHILD_PTID). | |
643 | ||
644 | If we are not setting up an inferior for the child (because following | |
645 | the parent and detach_fork is true), it is responsible for detaching | |
646 | from CHILD_PTID. */ | |
647 | target_follow_fork (child_inf, child_ptid, fork_kind, follow_child, | |
648 | detach_fork); | |
649 | ||
f5694400 SM |
650 | gdb::observers::inferior_forked.notify (parent_inf, child_inf, fork_kind); |
651 | ||
82d1f134 SM |
652 | /* target_follow_fork must leave the parent as the current inferior. If we |
653 | want to follow the child, we make it the current one below. */ | |
654 | gdb_assert (current_inferior () == parent_inf); | |
655 | ||
656 | /* If there is a child inferior, target_follow_fork must have created a thread | |
657 | for it. */ | |
658 | if (child_inf != nullptr) | |
659 | gdb_assert (!child_inf->thread_list.empty ()); | |
660 | ||
577d2167 SM |
661 | /* Clear the parent thread's pending follow field. Do this before calling |
662 | target_detach, so that the target can differentiate the two following | |
663 | cases: | |
664 | ||
665 | - We continue past a fork with "follow-fork-mode == child" && | |
666 | "detach-on-fork on", and therefore detach the parent. In that | |
667 | case the target should not detach the fork child. | |
668 | - We run to a fork catchpoint and the user types "detach". In that | |
669 | case, the target should detach the fork child in addition to the | |
670 | parent. | |
671 | ||
672 | The former case will have pending_follow cleared, the later will have | |
673 | pending_follow set. */ | |
3c8af02f | 674 | thread_info *parent_thread = parent_inf->find_thread (parent_ptid); |
577d2167 SM |
675 | gdb_assert (parent_thread != nullptr); |
676 | parent_thread->pending_follow.set_spurious (); | |
677 | ||
82d1f134 SM |
678 | /* Detach the parent if needed. */ |
679 | if (follow_child) | |
680 | { | |
681 | /* If we're vforking, we want to hold on to the parent until | |
682 | the child exits or execs. At child exec or exit time we | |
683 | can remove the old breakpoints from the parent and detach | |
684 | or resume debugging it. Otherwise, detach the parent now; | |
685 | we'll want to reuse it's program/address spaces, but we | |
686 | can't set them to the child before removing breakpoints | |
687 | from the parent, otherwise, the breakpoints module could | |
688 | decide to remove breakpoints from the wrong process (since | |
689 | they'd be assigned to the same address space). */ | |
690 | ||
691 | if (has_vforked) | |
692 | { | |
03acd4d8 CL |
693 | gdb_assert (child_inf->vfork_parent == nullptr); |
694 | gdb_assert (parent_inf->vfork_child == nullptr); | |
82d1f134 | 695 | child_inf->vfork_parent = parent_inf; |
30220b46 | 696 | child_inf->pending_detach = false; |
82d1f134 SM |
697 | parent_inf->vfork_child = child_inf; |
698 | parent_inf->pending_detach = detach_fork; | |
82d1f134 SM |
699 | } |
700 | else if (detach_fork) | |
701 | { | |
702 | if (print_inferior_events) | |
703 | { | |
704 | /* Ensure that we have a process ptid. */ | |
705 | ptid_t process_ptid = ptid_t (parent_ptid.pid ()); | |
706 | ||
707 | target_terminal::ours_for_output (); | |
6cb06a8c TT |
708 | gdb_printf (_("[Detaching after fork from " |
709 | "parent %s]\n"), | |
710 | target_pid_to_str (process_ptid).c_str ()); | |
82d1f134 SM |
711 | } |
712 | ||
713 | target_detach (parent_inf, 0); | |
714 | } | |
715 | } | |
e97007b6 | 716 | |
ff770835 SM |
717 | /* If we ended up creating a new inferior, call post_create_inferior to inform |
718 | the various subcomponents. */ | |
82d1f134 | 719 | if (child_inf != nullptr) |
ff770835 | 720 | { |
82d1f134 | 721 | /* If FOLLOW_CHILD, we leave CHILD_INF as the current inferior |
287de656 | 722 | (do not restore the parent as the current inferior). */ |
6b09f134 | 723 | std::optional<scoped_restore_current_thread> maybe_restore; |
82d1f134 | 724 | |
05e1cac2 | 725 | if (!follow_child && !sched_multi) |
82d1f134 | 726 | maybe_restore.emplace (); |
ff770835 | 727 | |
82d1f134 | 728 | switch_to_thread (*child_inf->threads ().begin ()); |
3cb6bc13 SM |
729 | |
730 | post_create_inferior (0, child_has_new_pspace); | |
ff770835 SM |
731 | } |
732 | ||
e97007b6 | 733 | return false; |
d83ad864 DB |
734 | } |
735 | ||
3505d4c4 PA |
736 | /* Set the last target status as TP having stopped. */ |
737 | ||
738 | static void | |
739 | set_last_target_status_stopped (thread_info *tp) | |
740 | { | |
741 | set_last_target_status (tp->inf->process_target (), tp->ptid, | |
742 | target_waitstatus {}.set_stopped (GDB_SIGNAL_0)); | |
743 | } | |
744 | ||
e58b0e63 PA |
745 | /* Tell the target to follow the fork we're stopped at. Returns true |
746 | if the inferior should be resumed; false, if the target for some | |
747 | reason decided it's best not to resume. */ | |
748 | ||
5ab2fbf1 SM |
749 | static bool |
750 | follow_fork () | |
c906108c | 751 | { |
b26b06dd AB |
752 | INFRUN_SCOPED_DEBUG_ENTER_EXIT; |
753 | ||
5ab2fbf1 SM |
754 | bool follow_child = (follow_fork_mode_string == follow_fork_mode_child); |
755 | bool should_resume = true; | |
e58b0e63 PA |
756 | |
757 | /* Copy user stepping state to the new inferior thread. FIXME: the | |
758 | followed fork child thread should have a copy of most of the | |
4e3990f4 DE |
759 | parent thread structure's run control related fields, not just these. |
760 | Initialized to avoid "may be used uninitialized" warnings from gcc. */ | |
03acd4d8 CL |
761 | struct breakpoint *step_resume_breakpoint = nullptr; |
762 | struct breakpoint *exception_resume_breakpoint = nullptr; | |
4e3990f4 DE |
763 | CORE_ADDR step_range_start = 0; |
764 | CORE_ADDR step_range_end = 0; | |
bf4cb9be | 765 | int current_line = 0; |
03acd4d8 | 766 | symtab *current_symtab = nullptr; |
4e3990f4 | 767 | struct frame_id step_frame_id = { 0 }; |
e58b0e63 PA |
768 | |
769 | if (!non_stop) | |
770 | { | |
3505d4c4 PA |
771 | thread_info *cur_thr = inferior_thread (); |
772 | ||
773 | ptid_t resume_ptid | |
774 | = user_visible_resume_ptid (cur_thr->control.stepping_command); | |
775 | process_stratum_target *resume_target | |
776 | = user_visible_resume_target (resume_ptid); | |
777 | ||
778 | /* Check if there's a thread that we're about to resume, other | |
779 | than the current, with an unfollowed fork/vfork. If so, | |
780 | switch back to it, to tell the target to follow it (in either | |
781 | direction). We'll afterwards refuse to resume, and inform | |
782 | the user what happened. */ | |
783 | for (thread_info *tp : all_non_exited_threads (resume_target, | |
784 | resume_ptid)) | |
e58b0e63 | 785 | { |
3505d4c4 PA |
786 | if (tp == cur_thr) |
787 | continue; | |
788 | ||
bd9482bc PA |
789 | /* follow_fork_inferior clears tp->pending_follow, and below |
790 | we'll need the value after the follow_fork_inferior | |
791 | call. */ | |
792 | target_waitkind kind = tp->pending_follow.kind (); | |
793 | ||
794 | if (kind != TARGET_WAITKIND_SPURIOUS) | |
3505d4c4 PA |
795 | { |
796 | infrun_debug_printf ("need to follow-fork [%s] first", | |
797 | tp->ptid.to_string ().c_str ()); | |
798 | ||
799 | switch_to_thread (tp); | |
bd9482bc PA |
800 | |
801 | /* Set up inferior(s) as specified by the caller, and | |
802 | tell the target to do whatever is necessary to follow | |
803 | either parent or child. */ | |
804 | if (follow_child) | |
805 | { | |
806 | /* The thread that started the execution command | |
807 | won't exist in the child. Abort the command and | |
808 | immediately stop in this thread, in the child, | |
809 | inside fork. */ | |
810 | should_resume = false; | |
811 | } | |
812 | else | |
813 | { | |
814 | /* Following the parent, so let the thread fork its | |
815 | child freely, it won't influence the current | |
816 | execution command. */ | |
817 | if (follow_fork_inferior (follow_child, detach_fork)) | |
818 | { | |
819 | /* Target refused to follow, or there's some | |
820 | other reason we shouldn't resume. */ | |
821 | switch_to_thread (cur_thr); | |
822 | set_last_target_status_stopped (cur_thr); | |
823 | return false; | |
824 | } | |
825 | ||
826 | /* If we're following a vfork, when we need to leave | |
827 | the just-forked thread as selected, as we need to | |
828 | solo-resume it to collect the VFORK_DONE event. | |
829 | If we're following a fork, however, switch back | |
830 | to the original thread that we continue stepping | |
831 | it, etc. */ | |
832 | if (kind != TARGET_WAITKIND_VFORKED) | |
833 | { | |
834 | gdb_assert (kind == TARGET_WAITKIND_FORKED); | |
835 | switch_to_thread (cur_thr); | |
836 | } | |
837 | } | |
838 | ||
3505d4c4 PA |
839 | break; |
840 | } | |
e58b0e63 PA |
841 | } |
842 | } | |
843 | ||
577d2167 | 844 | thread_info *tp = inferior_thread (); |
e58b0e63 PA |
845 | |
846 | /* If there were any forks/vforks that were caught and are now to be | |
847 | followed, then do so now. */ | |
183be222 | 848 | switch (tp->pending_follow.kind ()) |
e58b0e63 PA |
849 | { |
850 | case TARGET_WAITKIND_FORKED: | |
851 | case TARGET_WAITKIND_VFORKED: | |
852 | { | |
853 | ptid_t parent, child; | |
573269a8 | 854 | std::unique_ptr<struct thread_fsm> thread_fsm; |
e58b0e63 PA |
855 | |
856 | /* If the user did a next/step, etc, over a fork call, | |
857 | preserve the stepping state in the fork child. */ | |
858 | if (follow_child && should_resume) | |
859 | { | |
8358c15c JK |
860 | step_resume_breakpoint = clone_momentary_breakpoint |
861 | (tp->control.step_resume_breakpoint); | |
16c381f0 JK |
862 | step_range_start = tp->control.step_range_start; |
863 | step_range_end = tp->control.step_range_end; | |
bf4cb9be TV |
864 | current_line = tp->current_line; |
865 | current_symtab = tp->current_symtab; | |
16c381f0 | 866 | step_frame_id = tp->control.step_frame_id; |
186c406b TT |
867 | exception_resume_breakpoint |
868 | = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint); | |
573269a8 | 869 | thread_fsm = tp->release_thread_fsm (); |
e58b0e63 PA |
870 | |
871 | /* For now, delete the parent's sr breakpoint, otherwise, | |
872 | parent/child sr breakpoints are considered duplicates, | |
873 | and the child version will not be installed. Remove | |
874 | this when the breakpoints module becomes aware of | |
875 | inferiors and address spaces. */ | |
876 | delete_step_resume_breakpoint (tp); | |
16c381f0 JK |
877 | tp->control.step_range_start = 0; |
878 | tp->control.step_range_end = 0; | |
879 | tp->control.step_frame_id = null_frame_id; | |
186c406b | 880 | delete_exception_resume_breakpoint (tp); |
e58b0e63 PA |
881 | } |
882 | ||
883 | parent = inferior_ptid; | |
183be222 | 884 | child = tp->pending_follow.child_ptid (); |
e58b0e63 | 885 | |
d8bbae6e SM |
886 | /* If handling a vfork, stop all the inferior's threads, they will be |
887 | restarted when the vfork shared region is complete. */ | |
888 | if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED | |
889 | && target_is_non_stop_p ()) | |
890 | stop_all_threads ("handling vfork", tp->inf); | |
891 | ||
5b6d1e4f | 892 | process_stratum_target *parent_targ = tp->inf->process_target (); |
d83ad864 DB |
893 | /* Set up inferior(s) as specified by the caller, and tell the |
894 | target to do whatever is necessary to follow either parent | |
895 | or child. */ | |
896 | if (follow_fork_inferior (follow_child, detach_fork)) | |
e58b0e63 PA |
897 | { |
898 | /* Target refused to follow, or there's some other reason | |
899 | we shouldn't resume. */ | |
900 | should_resume = 0; | |
901 | } | |
902 | else | |
903 | { | |
1777feb0 | 904 | /* If we followed the child, switch to it... */ |
e58b0e63 PA |
905 | if (follow_child) |
906 | { | |
9213a6d7 | 907 | tp = parent_targ->find_thread (child); |
3505d4c4 | 908 | switch_to_thread (tp); |
e58b0e63 PA |
909 | |
910 | /* ... and preserve the stepping state, in case the | |
911 | user was stepping over the fork call. */ | |
912 | if (should_resume) | |
913 | { | |
8358c15c JK |
914 | tp->control.step_resume_breakpoint |
915 | = step_resume_breakpoint; | |
16c381f0 JK |
916 | tp->control.step_range_start = step_range_start; |
917 | tp->control.step_range_end = step_range_end; | |
bf4cb9be TV |
918 | tp->current_line = current_line; |
919 | tp->current_symtab = current_symtab; | |
16c381f0 | 920 | tp->control.step_frame_id = step_frame_id; |
186c406b TT |
921 | tp->control.exception_resume_breakpoint |
922 | = exception_resume_breakpoint; | |
573269a8 | 923 | tp->set_thread_fsm (std::move (thread_fsm)); |
e58b0e63 PA |
924 | } |
925 | else | |
926 | { | |
927 | /* If we get here, it was because we're trying to | |
928 | resume from a fork catchpoint, but, the user | |
929 | has switched threads away from the thread that | |
930 | forked. In that case, the resume command | |
931 | issued is most likely not applicable to the | |
932 | child, so just warn, and refuse to resume. */ | |
3e43a32a | 933 | warning (_("Not resuming: switched threads " |
fd7dcb94 | 934 | "before following fork child.")); |
e58b0e63 PA |
935 | } |
936 | ||
937 | /* Reset breakpoints in the child as appropriate. */ | |
938 | follow_inferior_reset_breakpoints (); | |
939 | } | |
e58b0e63 PA |
940 | } |
941 | } | |
942 | break; | |
943 | case TARGET_WAITKIND_SPURIOUS: | |
944 | /* Nothing to follow. */ | |
945 | break; | |
946 | default: | |
f34652de | 947 | internal_error ("Unexpected pending_follow.kind %d\n", |
183be222 | 948 | tp->pending_follow.kind ()); |
e58b0e63 PA |
949 | break; |
950 | } | |
c906108c | 951 | |
3505d4c4 PA |
952 | if (!should_resume) |
953 | set_last_target_status_stopped (tp); | |
e58b0e63 | 954 | return should_resume; |
c906108c SS |
955 | } |
956 | ||
d83ad864 | 957 | static void |
6604731b | 958 | follow_inferior_reset_breakpoints (void) |
c906108c | 959 | { |
4e1c45ea PA |
960 | struct thread_info *tp = inferior_thread (); |
961 | ||
6604731b DJ |
962 | /* Was there a step_resume breakpoint? (There was if the user |
963 | did a "next" at the fork() call.) If so, explicitly reset its | |
a1aa2221 LM |
964 | thread number. Cloned step_resume breakpoints are disabled on |
965 | creation, so enable it here now that it is associated with the | |
966 | correct thread. | |
6604731b DJ |
967 | |
968 | step_resumes are a form of bp that are made to be per-thread. | |
969 | Since we created the step_resume bp when the parent process | |
970 | was being debugged, and now are switching to the child process, | |
971 | from the breakpoint package's viewpoint, that's a switch of | |
972 | "threads". We must update the bp's notion of which thread | |
973 | it is for, or it'll be ignored when it triggers. */ | |
974 | ||
8358c15c | 975 | if (tp->control.step_resume_breakpoint) |
a1aa2221 LM |
976 | { |
977 | breakpoint_re_set_thread (tp->control.step_resume_breakpoint); | |
f5951b9f | 978 | tp->control.step_resume_breakpoint->first_loc ().enabled = 1; |
a1aa2221 | 979 | } |
6604731b | 980 | |
a1aa2221 | 981 | /* Treat exception_resume breakpoints like step_resume breakpoints. */ |
186c406b | 982 | if (tp->control.exception_resume_breakpoint) |
a1aa2221 LM |
983 | { |
984 | breakpoint_re_set_thread (tp->control.exception_resume_breakpoint); | |
f5951b9f | 985 | tp->control.exception_resume_breakpoint->first_loc ().enabled = 1; |
a1aa2221 | 986 | } |
186c406b | 987 | |
6604731b DJ |
988 | /* Reinsert all breakpoints in the child. The user may have set |
989 | breakpoints after catching the fork, in which case those | |
990 | were never set in the child, but only in the parent. This makes | |
991 | sure the inserted breakpoints match the breakpoint list. */ | |
992 | ||
993 | breakpoint_re_set (); | |
994 | insert_breakpoints (); | |
c906108c | 995 | } |
c906108c | 996 | |
69eadcc9 SM |
997 | /* The child has exited or execed: resume THREAD, a thread of the parent, |
998 | if it was meant to be executing. */ | |
6c95b8df | 999 | |
69eadcc9 SM |
1000 | static void |
1001 | proceed_after_vfork_done (thread_info *thread) | |
6c95b8df | 1002 | { |
69eadcc9 | 1003 | if (thread->state == THREAD_RUNNING |
611841bb | 1004 | && !thread->executing () |
6c95b8df | 1005 | && !thread->stop_requested |
1edb66d8 | 1006 | && thread->stop_signal () == GDB_SIGNAL_0) |
6c95b8df | 1007 | { |
1eb8556f | 1008 | infrun_debug_printf ("resuming vfork parent thread %s", |
0fab7955 | 1009 | thread->ptid.to_string ().c_str ()); |
6c95b8df | 1010 | |
00431a78 | 1011 | switch_to_thread (thread); |
70509625 | 1012 | clear_proceed_status (0); |
64ce06e4 | 1013 | proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT); |
6c95b8df | 1014 | } |
6c95b8df PA |
1015 | } |
1016 | ||
1017 | /* Called whenever we notice an exec or exit event, to handle | |
1018 | detaching or resuming a vfork parent. */ | |
1019 | ||
1020 | static void | |
1021 | handle_vfork_child_exec_or_exit (int exec) | |
1022 | { | |
b26b06dd AB |
1023 | INFRUN_SCOPED_DEBUG_ENTER_EXIT; |
1024 | ||
6c95b8df PA |
1025 | struct inferior *inf = current_inferior (); |
1026 | ||
1027 | if (inf->vfork_parent) | |
1028 | { | |
69eadcc9 | 1029 | inferior *resume_parent = nullptr; |
6c95b8df PA |
1030 | |
1031 | /* This exec or exit marks the end of the shared memory region | |
b73715df TV |
1032 | between the parent and the child. Break the bonds. */ |
1033 | inferior *vfork_parent = inf->vfork_parent; | |
03acd4d8 CL |
1034 | inf->vfork_parent->vfork_child = nullptr; |
1035 | inf->vfork_parent = nullptr; | |
6c95b8df | 1036 | |
b73715df TV |
1037 | /* If the user wanted to detach from the parent, now is the |
1038 | time. */ | |
1039 | if (vfork_parent->pending_detach) | |
6c95b8df | 1040 | { |
6c95b8df | 1041 | struct program_space *pspace; |
6c95b8df | 1042 | |
1777feb0 | 1043 | /* follow-fork child, detach-on-fork on. */ |
6c95b8df | 1044 | |
30220b46 | 1045 | vfork_parent->pending_detach = false; |
68c9da30 | 1046 | |
18493a00 | 1047 | scoped_restore_current_pspace_and_thread restore_thread; |
6c95b8df PA |
1048 | |
1049 | /* We're letting loose of the parent. */ | |
18493a00 | 1050 | thread_info *tp = any_live_thread_of_inferior (vfork_parent); |
00431a78 | 1051 | switch_to_thread (tp); |
6c95b8df PA |
1052 | |
1053 | /* We're about to detach from the parent, which implicitly | |
1054 | removes breakpoints from its address space. There's a | |
1055 | catch here: we want to reuse the spaces for the child, | |
1056 | but, parent/child are still sharing the pspace at this | |
1057 | point, although the exec in reality makes the kernel give | |
1058 | the child a fresh set of new pages. The problem here is | |
1059 | that the breakpoints module being unaware of this, would | |
1060 | likely chose the child process to write to the parent | |
1061 | address space. Swapping the child temporarily away from | |
1062 | the spaces has the desired effect. Yes, this is "sort | |
1063 | of" a hack. */ | |
1064 | ||
1065 | pspace = inf->pspace; | |
03acd4d8 | 1066 | inf->pspace = nullptr; |
f9582a22 | 1067 | address_space_ref_ptr aspace = std::move (inf->aspace); |
6c95b8df | 1068 | |
f67c0c91 | 1069 | if (print_inferior_events) |
6c95b8df | 1070 | { |
a068643d | 1071 | std::string pidstr |
b73715df | 1072 | = target_pid_to_str (ptid_t (vfork_parent->pid)); |
f67c0c91 | 1073 | |
223ffa71 | 1074 | target_terminal::ours_for_output (); |
6c95b8df PA |
1075 | |
1076 | if (exec) | |
6f259a23 | 1077 | { |
6cb06a8c TT |
1078 | gdb_printf (_("[Detaching vfork parent %s " |
1079 | "after child exec]\n"), pidstr.c_str ()); | |
6f259a23 | 1080 | } |
6c95b8df | 1081 | else |
6f259a23 | 1082 | { |
6cb06a8c TT |
1083 | gdb_printf (_("[Detaching vfork parent %s " |
1084 | "after child exit]\n"), pidstr.c_str ()); | |
6f259a23 | 1085 | } |
6c95b8df PA |
1086 | } |
1087 | ||
b73715df | 1088 | target_detach (vfork_parent, 0); |
6c95b8df PA |
1089 | |
1090 | /* Put it back. */ | |
1091 | inf->pspace = pspace; | |
1092 | inf->aspace = aspace; | |
6c95b8df PA |
1093 | } |
1094 | else if (exec) | |
1095 | { | |
1096 | /* We're staying attached to the parent, so, really give the | |
1097 | child a new address space. */ | |
564b1e3f | 1098 | inf->pspace = new program_space (maybe_new_address_space ()); |
6c95b8df | 1099 | inf->aspace = inf->pspace->aspace; |
30220b46 | 1100 | inf->removable = true; |
6c95b8df PA |
1101 | set_current_program_space (inf->pspace); |
1102 | ||
69eadcc9 | 1103 | resume_parent = vfork_parent; |
6c95b8df PA |
1104 | } |
1105 | else | |
1106 | { | |
6c95b8df PA |
1107 | /* If this is a vfork child exiting, then the pspace and |
1108 | aspaces were shared with the parent. Since we're | |
1109 | reporting the process exit, we'll be mourning all that is | |
1110 | found in the address space, and switching to null_ptid, | |
1111 | preparing to start a new inferior. But, since we don't | |
1112 | want to clobber the parent's address/program spaces, we | |
1113 | go ahead and create a new one for this exiting | |
1114 | inferior. */ | |
1115 | ||
18493a00 | 1116 | scoped_restore_current_thread restore_thread; |
6c95b8df | 1117 | |
14414227 TV |
1118 | /* Temporarily switch to the vfork parent, to facilitate ptrace |
1119 | calls done during maybe_new_address_space. */ | |
1120 | switch_to_thread (any_live_thread_of_inferior (vfork_parent)); | |
1121 | address_space_ref_ptr aspace = maybe_new_address_space (); | |
1122 | ||
1123 | /* Switch back to the vfork child inferior. Switch to no-thread | |
1124 | while running clone_program_space, so that clone_program_space | |
1125 | doesn't want to read the selected frame of a dead process. */ | |
1126 | switch_to_inferior_no_thread (inf); | |
1127 | ||
1128 | inf->pspace = new program_space (std::move (aspace)); | |
53af73bf PA |
1129 | inf->aspace = inf->pspace->aspace; |
1130 | set_current_program_space (inf->pspace); | |
30220b46 | 1131 | inf->removable = true; |
7dcd53a0 | 1132 | inf->symfile_flags = SYMFILE_NO_READ; |
53af73bf | 1133 | clone_program_space (inf->pspace, vfork_parent->pspace); |
6c95b8df | 1134 | |
69eadcc9 | 1135 | resume_parent = vfork_parent; |
6c95b8df PA |
1136 | } |
1137 | ||
6c95b8df PA |
1138 | gdb_assert (current_program_space == inf->pspace); |
1139 | ||
69eadcc9 | 1140 | if (non_stop && resume_parent != nullptr) |
6c95b8df PA |
1141 | { |
1142 | /* If the user wanted the parent to be running, let it go | |
1143 | free now. */ | |
5ed8105e | 1144 | scoped_restore_current_thread restore_thread; |
6c95b8df | 1145 | |
1eb8556f | 1146 | infrun_debug_printf ("resuming vfork parent process %d", |
69eadcc9 | 1147 | resume_parent->pid); |
6c95b8df | 1148 | |
69eadcc9 SM |
1149 | for (thread_info *thread : resume_parent->threads ()) |
1150 | proceed_after_vfork_done (thread); | |
6c95b8df PA |
1151 | } |
1152 | } | |
1153 | } | |
1154 | ||
d8bbae6e SM |
1155 | /* Handle TARGET_WAITKIND_VFORK_DONE. */ |
1156 | ||
1157 | static void | |
1158 | handle_vfork_done (thread_info *event_thread) | |
1159 | { | |
b26b06dd AB |
1160 | INFRUN_SCOPED_DEBUG_ENTER_EXIT; |
1161 | ||
d8bbae6e SM |
1162 | /* We only care about this event if inferior::thread_waiting_for_vfork_done is |
1163 | set, that is if we are waiting for a vfork child not under our control | |
1164 | (because we detached it) to exec or exit. | |
1165 | ||
1166 | If an inferior has vforked and we are debugging the child, we don't use | |
1167 | the vfork-done event to get notified about the end of the shared address | |
1168 | space window. We rely instead on the child's exec or exit event, and the | |
1169 | inferior::vfork_{parent,child} fields are used instead. See | |
1170 | handle_vfork_child_exec_or_exit for that. */ | |
1171 | if (event_thread->inf->thread_waiting_for_vfork_done == nullptr) | |
1172 | { | |
1173 | infrun_debug_printf ("not waiting for a vfork-done event"); | |
1174 | return; | |
1175 | } | |
1176 | ||
d8bbae6e SM |
1177 | /* We stopped all threads (other than the vforking thread) of the inferior in |
1178 | follow_fork and kept them stopped until now. It should therefore not be | |
1179 | possible for another thread to have reported a vfork during that window. | |
1180 | If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose | |
1181 | vfork-done we are handling right now. */ | |
1182 | gdb_assert (event_thread->inf->thread_waiting_for_vfork_done == event_thread); | |
1183 | ||
1184 | event_thread->inf->thread_waiting_for_vfork_done = nullptr; | |
1185 | event_thread->inf->pspace->breakpoints_not_allowed = 0; | |
1186 | ||
1187 | /* On non-stop targets, we stopped all the inferior's threads in follow_fork, | |
1188 | resume them now. On all-stop targets, everything that needs to be resumed | |
1189 | will be when we resume the event thread. */ | |
1190 | if (target_is_non_stop_p ()) | |
1191 | { | |
1192 | /* restart_threads and start_step_over may change the current thread, make | |
1193 | sure we leave the event thread as the current thread. */ | |
1194 | scoped_restore_current_thread restore_thread; | |
1195 | ||
1196 | insert_breakpoints (); | |
d8bbae6e | 1197 | start_step_over (); |
2b718529 LS |
1198 | |
1199 | if (!step_over_info_valid_p ()) | |
1200 | restart_threads (event_thread, event_thread->inf); | |
d8bbae6e SM |
1201 | } |
1202 | } | |
1203 | ||
eb6c553b | 1204 | /* Enum strings for "set|show follow-exec-mode". */ |
6c95b8df PA |
1205 | |
1206 | static const char follow_exec_mode_new[] = "new"; | |
1207 | static const char follow_exec_mode_same[] = "same"; | |
40478521 | 1208 | static const char *const follow_exec_mode_names[] = |
6c95b8df PA |
1209 | { |
1210 | follow_exec_mode_new, | |
1211 | follow_exec_mode_same, | |
03acd4d8 | 1212 | nullptr, |
6c95b8df PA |
1213 | }; |
1214 | ||
1215 | static const char *follow_exec_mode_string = follow_exec_mode_same; | |
1216 | static void | |
1217 | show_follow_exec_mode_string (struct ui_file *file, int from_tty, | |
1218 | struct cmd_list_element *c, const char *value) | |
1219 | { | |
6cb06a8c | 1220 | gdb_printf (file, _("Follow exec mode is \"%s\".\n"), value); |
6c95b8df PA |
1221 | } |
1222 | ||
ecf45d2c | 1223 | /* EXEC_FILE_TARGET is assumed to be non-NULL. */ |
1adeb98a | 1224 | |
c906108c | 1225 | static void |
4ca51187 | 1226 | follow_exec (ptid_t ptid, const char *exec_file_target) |
c906108c | 1227 | { |
e99b03dc | 1228 | int pid = ptid.pid (); |
94585166 | 1229 | ptid_t process_ptid; |
7a292a7a | 1230 | |
65d2b333 PW |
1231 | /* Switch terminal for any messages produced e.g. by |
1232 | breakpoint_re_set. */ | |
1233 | target_terminal::ours_for_output (); | |
1234 | ||
c906108c SS |
1235 | /* This is an exec event that we actually wish to pay attention to. |
1236 | Refresh our symbol table to the newly exec'd program, remove any | |
1237 | momentary bp's, etc. | |
1238 | ||
1239 | If there are breakpoints, they aren't really inserted now, | |
1240 | since the exec() transformed our inferior into a fresh set | |
1241 | of instructions. | |
1242 | ||
1243 | We want to preserve symbolic breakpoints on the list, since | |
1244 | we have hopes that they can be reset after the new a.out's | |
1245 | symbol table is read. | |
1246 | ||
1247 | However, any "raw" breakpoints must be removed from the list | |
1248 | (e.g., the solib bp's), since their address is probably invalid | |
1249 | now. | |
1250 | ||
1251 | And, we DON'T want to call delete_breakpoints() here, since | |
1252 | that may write the bp's "shadow contents" (the instruction | |
85102364 | 1253 | value that was overwritten with a TRAP instruction). Since |
1777feb0 | 1254 | we now have a new a.out, those shadow contents aren't valid. */ |
6c95b8df | 1255 | |
c72348e3 | 1256 | mark_breakpoints_out (current_program_space); |
6c95b8df | 1257 | |
95e50b27 PA |
1258 | /* The target reports the exec event to the main thread, even if |
1259 | some other thread does the exec, and even if the main thread was | |
1260 | stopped or already gone. We may still have non-leader threads of | |
1261 | the process on our list. E.g., on targets that don't have thread | |
6a534f85 PA |
1262 | exit events (like remote) and nothing forces an update of the |
1263 | thread list up to here. When debugging remotely, it's best to | |
95e50b27 PA |
1264 | avoid extra traffic, when possible, so avoid syncing the thread |
1265 | list with the target, and instead go ahead and delete all threads | |
6a534f85 | 1266 | of the process but the one that reported the event. Note this must |
95e50b27 PA |
1267 | be done before calling update_breakpoints_after_exec, as |
1268 | otherwise clearing the threads' resources would reference stale | |
1269 | thread breakpoints -- it may have been one of these threads that | |
1270 | stepped across the exec. We could just clear their stepping | |
1271 | states, but as long as we're iterating, might as well delete | |
1272 | them. Deleting them now rather than at the next user-visible | |
1273 | stop provides a nicer sequence of events for user and MI | |
1274 | notifications. */ | |
08036331 | 1275 | for (thread_info *th : all_threads_safe ()) |
d7e15655 | 1276 | if (th->ptid.pid () == pid && th->ptid != ptid) |
00431a78 | 1277 | delete_thread (th); |
95e50b27 PA |
1278 | |
1279 | /* We also need to clear any left over stale state for the | |
1280 | leader/event thread. E.g., if there was any step-resume | |
1281 | breakpoint or similar, it's gone now. We cannot truly | |
1282 | step-to-next statement through an exec(). */ | |
08036331 | 1283 | thread_info *th = inferior_thread (); |
03acd4d8 CL |
1284 | th->control.step_resume_breakpoint = nullptr; |
1285 | th->control.exception_resume_breakpoint = nullptr; | |
1286 | th->control.single_step_breakpoints = nullptr; | |
16c381f0 JK |
1287 | th->control.step_range_start = 0; |
1288 | th->control.step_range_end = 0; | |
c906108c | 1289 | |
95e50b27 PA |
1290 | /* The user may have had the main thread held stopped in the |
1291 | previous image (e.g., schedlock on, or non-stop). Release | |
1292 | it now. */ | |
956bbb55 | 1293 | th->stop_requested = false; |
a75724bc | 1294 | |
95e50b27 PA |
1295 | update_breakpoints_after_exec (); |
1296 | ||
1777feb0 | 1297 | /* What is this a.out's name? */ |
f2907e49 | 1298 | process_ptid = ptid_t (pid); |
6cb06a8c TT |
1299 | gdb_printf (_("%s is executing new program: %s\n"), |
1300 | target_pid_to_str (process_ptid).c_str (), | |
1301 | exec_file_target); | |
c906108c SS |
1302 | |
1303 | /* We've followed the inferior through an exec. Therefore, the | |
1777feb0 | 1304 | inferior has essentially been killed & reborn. */ |
7a292a7a | 1305 | |
f5928702 | 1306 | breakpoint_init_inferior (current_inferior (), inf_execd); |
e85a822c | 1307 | |
797bc1cb | 1308 | gdb::unique_xmalloc_ptr<char> exec_file_host |
03acd4d8 | 1309 | = exec_file_find (exec_file_target, nullptr); |
ff862be4 | 1310 | |
ecf45d2c SL |
1311 | /* If we were unable to map the executable target pathname onto a host |
1312 | pathname, tell the user that. Otherwise GDB's subsequent behavior | |
1313 | is confusing. Maybe it would even be better to stop at this point | |
1314 | so that the user can specify a file manually before continuing. */ | |
03acd4d8 | 1315 | if (exec_file_host == nullptr) |
ecf45d2c | 1316 | warning (_("Could not load symbols for executable %s.\n" |
9e69a2e1 TT |
1317 | "Do you need \"%ps\"?"), |
1318 | exec_file_target, | |
1319 | styled_string (command_style.style (), "set sysroot")); | |
c906108c | 1320 | |
cce9b6bf PA |
1321 | /* Reset the shared library package. This ensures that we get a |
1322 | shlib event when the child reaches "_start", at which point the | |
1323 | dld will have had a chance to initialize the child. */ | |
1324 | /* Also, loading a symbol file below may trigger symbol lookups, and | |
1325 | we don't want those to be satisfied by the libraries of the | |
1326 | previous incarnation of this process. */ | |
b8c9d0de | 1327 | no_shared_libraries (current_program_space); |
3cb6bc13 | 1328 | current_program_space->unset_solib_ops (); |
cce9b6bf | 1329 | |
4a1283c8 SM |
1330 | inferior *execing_inferior = current_inferior (); |
1331 | inferior *following_inferior; | |
294c36eb | 1332 | |
6c95b8df PA |
1333 | if (follow_exec_mode_string == follow_exec_mode_new) |
1334 | { | |
6c95b8df PA |
1335 | /* The user wants to keep the old inferior and program spaces |
1336 | around. Create a new fresh one, and switch to it. */ | |
1337 | ||
35ed81d4 SM |
1338 | /* Do exit processing for the original inferior before setting the new |
1339 | inferior's pid. Having two inferiors with the same pid would confuse | |
1340 | find_inferior_p(t)id. Transfer the terminal state and info from the | |
1341 | old to the new inferior. */ | |
4a1283c8 | 1342 | following_inferior = add_inferior_with_spaces (); |
294c36eb | 1343 | |
4a1283c8 | 1344 | swap_terminal_info (following_inferior, execing_inferior); |
9324bfea | 1345 | exit_inferior (execing_inferior); |
294c36eb | 1346 | |
4a1283c8 | 1347 | following_inferior->pid = pid; |
6c95b8df | 1348 | } |
9107fc8d PA |
1349 | else |
1350 | { | |
4a1283c8 SM |
1351 | /* follow-exec-mode is "same", we continue execution in the execing |
1352 | inferior. */ | |
1353 | following_inferior = execing_inferior; | |
1354 | ||
9107fc8d PA |
1355 | /* The old description may no longer be fit for the new image. |
1356 | E.g, a 64-bit process exec'ed a 32-bit process. Clear the | |
1357 | old description; we'll read a new one below. No need to do | |
1358 | this on "follow-exec-mode new", as the old inferior stays | |
1359 | around (its description is later cleared/refetched on | |
1360 | restart). */ | |
1361 | target_clear_description (); | |
1362 | } | |
6c95b8df | 1363 | |
4a1283c8 SM |
1364 | target_follow_exec (following_inferior, ptid, exec_file_target); |
1365 | ||
1366 | gdb_assert (current_inferior () == following_inferior); | |
1367 | gdb_assert (current_program_space == following_inferior->pspace); | |
6c95b8df | 1368 | |
ecf45d2c SL |
1369 | /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used |
1370 | because the proper displacement for a PIE (Position Independent | |
1371 | Executable) main symbol file will only be computed by | |
1372 | solib_create_inferior_hook below. breakpoint_re_set would fail | |
1373 | to insert the breakpoints with the zero displacement. */ | |
4a1283c8 SM |
1374 | try_open_exec_file (exec_file_host.get (), following_inferior, |
1375 | SYMFILE_DEFER_BP_RESET); | |
c906108c | 1376 | |
9107fc8d PA |
1377 | /* If the target can specify a description, read it. Must do this |
1378 | after flipping to the new executable (because the target supplied | |
1379 | description must be compatible with the executable's | |
1380 | architecture, and the old executable may e.g., be 32-bit, while | |
1381 | the new one 64-bit), and before anything involving memory or | |
1382 | registers. */ | |
1383 | target_find_description (); | |
1384 | ||
3cb6bc13 | 1385 | current_program_space->set_solib_ops |
a2e3cce3 | 1386 | (gdbarch_make_solib_ops (following_inferior->arch ())); |
4a1283c8 | 1387 | gdb::observers::inferior_execd.notify (execing_inferior, following_inferior); |
4efc6507 | 1388 | |
c1e56572 JK |
1389 | breakpoint_re_set (); |
1390 | ||
c906108c SS |
1391 | /* Reinsert all breakpoints. (Those which were symbolic have |
1392 | been reset to the proper address in the new a.out, thanks | |
1777feb0 | 1393 | to symbol_file_command...). */ |
c906108c SS |
1394 | insert_breakpoints (); |
1395 | ||
1396 | /* The next resume of this inferior should bring it to the shlib | |
1397 | startup breakpoints. (If the user had also set bp's on | |
1398 | "main" from the old (parent) process, then they'll auto- | |
1777feb0 | 1399 | matically get reset there in the new process.). */ |
c906108c SS |
1400 | } |
1401 | ||
28d5518b | 1402 | /* The chain of threads that need to do a step-over operation to get |
c2829269 PA |
1403 | past e.g., a breakpoint. What technique is used to step over the |
1404 | breakpoint/watchpoint does not matter -- all threads end up in the | |
1405 | same queue, to maintain rough temporal order of execution, in order | |
1406 | to avoid starvation, otherwise, we could e.g., find ourselves | |
1407 | constantly stepping the same couple threads past their breakpoints | |
1408 | over and over, if the single-step finish fast enough. */ | |
8b6a69b2 | 1409 | thread_step_over_list global_thread_step_over_list; |
c2829269 | 1410 | |
6c4cfb24 PA |
1411 | /* Bit flags indicating what the thread needs to step over. */ |
1412 | ||
8d297bbf | 1413 | enum step_over_what_flag |
6c4cfb24 PA |
1414 | { |
1415 | /* Step over a breakpoint. */ | |
1416 | STEP_OVER_BREAKPOINT = 1, | |
1417 | ||
1418 | /* Step past a non-continuable watchpoint, in order to let the | |
1419 | instruction execute so we can evaluate the watchpoint | |
1420 | expression. */ | |
1421 | STEP_OVER_WATCHPOINT = 2 | |
1422 | }; | |
8d297bbf | 1423 | DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what); |
6c4cfb24 | 1424 | |
963f9c80 | 1425 | /* Info about an instruction that is being stepped over. */ |
31e77af2 PA |
1426 | |
1427 | struct step_over_info | |
1428 | { | |
963f9c80 PA |
1429 | /* If we're stepping past a breakpoint, this is the address space |
1430 | and address of the instruction the breakpoint is set at. We'll | |
1431 | skip inserting all breakpoints here. Valid iff ASPACE is | |
1432 | non-NULL. */ | |
ac7d717c PA |
1433 | const address_space *aspace = nullptr; |
1434 | CORE_ADDR address = 0; | |
963f9c80 PA |
1435 | |
1436 | /* The instruction being stepped over triggers a nonsteppable | |
1437 | watchpoint. If true, we'll skip inserting watchpoints. */ | |
ac7d717c | 1438 | int nonsteppable_watchpoint_p = 0; |
21edc42f YQ |
1439 | |
1440 | /* The thread's global number. */ | |
ac7d717c | 1441 | int thread = -1; |
31e77af2 PA |
1442 | }; |
1443 | ||
1444 | /* The step-over info of the location that is being stepped over. | |
1445 | ||
1446 | Note that with async/breakpoint always-inserted mode, a user might | |
1447 | set a new breakpoint/watchpoint/etc. exactly while a breakpoint is | |
1448 | being stepped over. As setting a new breakpoint inserts all | |
1449 | breakpoints, we need to make sure the breakpoint being stepped over | |
1450 | isn't inserted then. We do that by only clearing the step-over | |
1451 | info when the step-over is actually finished (or aborted). | |
1452 | ||
1453 | Presently GDB can only step over one breakpoint at any given time. | |
1454 | Given threads that can't run code in the same address space as the | |
1455 | breakpoint's can't really miss the breakpoint, GDB could be taught | |
1456 | to step-over at most one breakpoint per address space (so this info | |
1457 | could move to the address space object if/when GDB is extended). | |
1458 | The set of breakpoints being stepped over will normally be much | |
1459 | smaller than the set of all breakpoints, so a flag in the | |
1460 | breakpoint location structure would be wasteful. A separate list | |
1461 | also saves complexity and run-time, as otherwise we'd have to go | |
1462 | through all breakpoint locations clearing their flag whenever we | |
1463 | start a new sequence. Similar considerations weigh against storing | |
1464 | this info in the thread object. Plus, not all step overs actually | |
1465 | have breakpoint locations -- e.g., stepping past a single-step | |
1466 | breakpoint, or stepping to complete a non-continuable | |
1467 | watchpoint. */ | |
1468 | static struct step_over_info step_over_info; | |
1469 | ||
1470 | /* Record the address of the breakpoint/instruction we're currently | |
ce0db137 DE |
1471 | stepping over. |
1472 | N.B. We record the aspace and address now, instead of say just the thread, | |
1473 | because when we need the info later the thread may be running. */ | |
31e77af2 PA |
1474 | |
1475 | static void | |
8b86c959 | 1476 | set_step_over_info (const address_space *aspace, CORE_ADDR address, |
21edc42f YQ |
1477 | int nonsteppable_watchpoint_p, |
1478 | int thread) | |
31e77af2 PA |
1479 | { |
1480 | step_over_info.aspace = aspace; | |
1481 | step_over_info.address = address; | |
963f9c80 | 1482 | step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p; |
21edc42f | 1483 | step_over_info.thread = thread; |
31e77af2 PA |
1484 | } |
1485 | ||
1486 | /* Called when we're not longer stepping over a breakpoint / an | |
1487 | instruction, so all breakpoints are free to be (re)inserted. */ | |
1488 | ||
1489 | static void | |
1490 | clear_step_over_info (void) | |
1491 | { | |
1eb8556f | 1492 | infrun_debug_printf ("clearing step over info"); |
03acd4d8 | 1493 | step_over_info.aspace = nullptr; |
31e77af2 | 1494 | step_over_info.address = 0; |
963f9c80 | 1495 | step_over_info.nonsteppable_watchpoint_p = 0; |
21edc42f | 1496 | step_over_info.thread = -1; |
31e77af2 PA |
1497 | } |
1498 | ||
7f89fd65 | 1499 | /* See infrun.h. */ |
31e77af2 PA |
1500 | |
1501 | int | |
1502 | stepping_past_instruction_at (struct address_space *aspace, | |
1503 | CORE_ADDR address) | |
1504 | { | |
03acd4d8 | 1505 | return (step_over_info.aspace != nullptr |
31e77af2 PA |
1506 | && breakpoint_address_match (aspace, address, |
1507 | step_over_info.aspace, | |
1508 | step_over_info.address)); | |
1509 | } | |
1510 | ||
963f9c80 PA |
1511 | /* See infrun.h. */ |
1512 | ||
21edc42f YQ |
1513 | int |
1514 | thread_is_stepping_over_breakpoint (int thread) | |
1515 | { | |
1516 | return (step_over_info.thread != -1 | |
1517 | && thread == step_over_info.thread); | |
1518 | } | |
1519 | ||
1520 | /* See infrun.h. */ | |
1521 | ||
963f9c80 PA |
1522 | int |
1523 | stepping_past_nonsteppable_watchpoint (void) | |
1524 | { | |
1525 | return step_over_info.nonsteppable_watchpoint_p; | |
1526 | } | |
1527 | ||
6cc83d2a PA |
1528 | /* Returns true if step-over info is valid. */ |
1529 | ||
c4464ade | 1530 | static bool |
6cc83d2a PA |
1531 | step_over_info_valid_p (void) |
1532 | { | |
03acd4d8 | 1533 | return (step_over_info.aspace != nullptr |
963f9c80 | 1534 | || stepping_past_nonsteppable_watchpoint ()); |
6cc83d2a PA |
1535 | } |
1536 | ||
c906108c | 1537 | \f |
237fc4c9 PA |
1538 | /* Displaced stepping. */ |
1539 | ||
1540 | /* In non-stop debugging mode, we must take special care to manage | |
1541 | breakpoints properly; in particular, the traditional strategy for | |
1542 | stepping a thread past a breakpoint it has hit is unsuitable. | |
1543 | 'Displaced stepping' is a tactic for stepping one thread past a | |
1544 | breakpoint it has hit while ensuring that other threads running | |
1545 | concurrently will hit the breakpoint as they should. | |
1546 | ||
1547 | The traditional way to step a thread T off a breakpoint in a | |
1548 | multi-threaded program in all-stop mode is as follows: | |
1549 | ||
1550 | a0) Initially, all threads are stopped, and breakpoints are not | |
1551 | inserted. | |
1552 | a1) We single-step T, leaving breakpoints uninserted. | |
1553 | a2) We insert breakpoints, and resume all threads. | |
1554 | ||
1555 | In non-stop debugging, however, this strategy is unsuitable: we | |
1556 | don't want to have to stop all threads in the system in order to | |
1557 | continue or step T past a breakpoint. Instead, we use displaced | |
1558 | stepping: | |
1559 | ||
1560 | n0) Initially, T is stopped, other threads are running, and | |
1561 | breakpoints are inserted. | |
1562 | n1) We copy the instruction "under" the breakpoint to a separate | |
1563 | location, outside the main code stream, making any adjustments | |
1564 | to the instruction, register, and memory state as directed by | |
1565 | T's architecture. | |
1566 | n2) We single-step T over the instruction at its new location. | |
1567 | n3) We adjust the resulting register and memory state as directed | |
1568 | by T's architecture. This includes resetting T's PC to point | |
1569 | back into the main instruction stream. | |
1570 | n4) We resume T. | |
1571 | ||
1572 | This approach depends on the following gdbarch methods: | |
1573 | ||
1574 | - gdbarch_max_insn_length and gdbarch_displaced_step_location | |
1575 | indicate where to copy the instruction, and how much space must | |
1576 | be reserved there. We use these in step n1. | |
1577 | ||
1578 | - gdbarch_displaced_step_copy_insn copies a instruction to a new | |
1579 | address, and makes any necessary adjustments to the instruction, | |
1580 | register contents, and memory. We use this in step n1. | |
1581 | ||
1582 | - gdbarch_displaced_step_fixup adjusts registers and memory after | |
85102364 | 1583 | we have successfully single-stepped the instruction, to yield the |
237fc4c9 PA |
1584 | same effect the instruction would have had if we had executed it |
1585 | at its original address. We use this in step n3. | |
1586 | ||
237fc4c9 PA |
1587 | The gdbarch_displaced_step_copy_insn and |
1588 | gdbarch_displaced_step_fixup functions must be written so that | |
1589 | copying an instruction with gdbarch_displaced_step_copy_insn, | |
1590 | single-stepping across the copied instruction, and then applying | |
1591 | gdbarch_displaced_insn_fixup should have the same effects on the | |
1592 | thread's memory and registers as stepping the instruction in place | |
1593 | would have. Exactly which responsibilities fall to the copy and | |
1594 | which fall to the fixup is up to the author of those functions. | |
1595 | ||
1596 | See the comments in gdbarch.sh for details. | |
1597 | ||
1598 | Note that displaced stepping and software single-step cannot | |
1599 | currently be used in combination, although with some care I think | |
1600 | they could be made to. Software single-step works by placing | |
1601 | breakpoints on all possible subsequent instructions; if the | |
1602 | displaced instruction is a PC-relative jump, those breakpoints | |
1603 | could fall in very strange places --- on pages that aren't | |
1604 | executable, or at addresses that are not proper instruction | |
1605 | boundaries. (We do generally let other threads run while we wait | |
1606 | to hit the software single-step breakpoint, and they might | |
1607 | encounter such a corrupted instruction.) One way to work around | |
1608 | this would be to have gdbarch_displaced_step_copy_insn fully | |
1609 | simulate the effect of PC-relative instructions (and return NULL) | |
1610 | on architectures that use software single-stepping. | |
1611 | ||
1612 | In non-stop mode, we can have independent and simultaneous step | |
1613 | requests, so more than one thread may need to simultaneously step | |
1614 | over a breakpoint. The current implementation assumes there is | |
1615 | only one scratch space per process. In this case, we have to | |
1616 | serialize access to the scratch space. If thread A wants to step | |
1617 | over a breakpoint, but we are currently waiting for some other | |
1618 | thread to complete a displaced step, we leave thread A stopped and | |
1619 | place it in the displaced_step_request_queue. Whenever a displaced | |
1620 | step finishes, we pick the next thread in the queue and start a new | |
1621 | displaced step operation on it. See displaced_step_prepare and | |
7def77a1 | 1622 | displaced_step_finish for details. */ |
237fc4c9 | 1623 | |
a46d1843 | 1624 | /* Return true if THREAD is doing a displaced step. */ |
c0987663 | 1625 | |
c4464ade | 1626 | static bool |
00431a78 | 1627 | displaced_step_in_progress_thread (thread_info *thread) |
c0987663 | 1628 | { |
03acd4d8 | 1629 | gdb_assert (thread != nullptr); |
c0987663 | 1630 | |
187b041e | 1631 | return thread->displaced_step_state.in_progress (); |
c0987663 YQ |
1632 | } |
1633 | ||
a46d1843 | 1634 | /* Return true if INF has a thread doing a displaced step. */ |
8f572e5c | 1635 | |
c4464ade | 1636 | static bool |
00431a78 | 1637 | displaced_step_in_progress (inferior *inf) |
8f572e5c | 1638 | { |
187b041e | 1639 | return inf->displaced_step_state.in_progress_count > 0; |
fc1cf338 PA |
1640 | } |
1641 | ||
187b041e | 1642 | /* Return true if any thread is doing a displaced step. */ |
a42244db | 1643 | |
187b041e SM |
1644 | static bool |
1645 | displaced_step_in_progress_any_thread () | |
a42244db | 1646 | { |
187b041e SM |
1647 | for (inferior *inf : all_non_exited_inferiors ()) |
1648 | { | |
1649 | if (displaced_step_in_progress (inf)) | |
1650 | return true; | |
1651 | } | |
a42244db | 1652 | |
187b041e | 1653 | return false; |
a42244db YQ |
1654 | } |
1655 | ||
fc1cf338 PA |
1656 | static void |
1657 | infrun_inferior_exit (struct inferior *inf) | |
1658 | { | |
d20172fc | 1659 | inf->displaced_step_state.reset (); |
6f5d514f | 1660 | inf->thread_waiting_for_vfork_done = nullptr; |
fc1cf338 | 1661 | } |
237fc4c9 | 1662 | |
3b7a962d | 1663 | static void |
4a1283c8 | 1664 | infrun_inferior_execd (inferior *exec_inf, inferior *follow_inf) |
3b7a962d | 1665 | { |
187b041e SM |
1666 | /* If some threads where was doing a displaced step in this inferior at the |
1667 | moment of the exec, they no longer exist. Even if the exec'ing thread | |
3b7a962d SM |
1668 | doing a displaced step, we don't want to to any fixup nor restore displaced |
1669 | stepping buffer bytes. */ | |
4a1283c8 | 1670 | follow_inf->displaced_step_state.reset (); |
3b7a962d | 1671 | |
4a1283c8 | 1672 | for (thread_info *thread : follow_inf->threads ()) |
187b041e SM |
1673 | thread->displaced_step_state.reset (); |
1674 | ||
3b7a962d SM |
1675 | /* Since an in-line step is done with everything else stopped, if there was |
1676 | one in progress at the time of the exec, it must have been the exec'ing | |
1677 | thread. */ | |
1678 | clear_step_over_info (); | |
6f5d514f | 1679 | |
4a1283c8 | 1680 | follow_inf->thread_waiting_for_vfork_done = nullptr; |
3b7a962d SM |
1681 | } |
1682 | ||
fff08868 HZ |
1683 | /* If ON, and the architecture supports it, GDB will use displaced |
1684 | stepping to step over breakpoints. If OFF, or if the architecture | |
1685 | doesn't support it, GDB will instead use the traditional | |
1686 | hold-and-step approach. If AUTO (which is the default), GDB will | |
1687 | decide which technique to use to step over breakpoints depending on | |
9822cb57 | 1688 | whether the target works in a non-stop way (see use_displaced_stepping). */ |
fff08868 | 1689 | |
72d0e2c5 | 1690 | static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO; |
fff08868 | 1691 | |
237fc4c9 PA |
1692 | static void |
1693 | show_can_use_displaced_stepping (struct ui_file *file, int from_tty, | |
1694 | struct cmd_list_element *c, | |
1695 | const char *value) | |
1696 | { | |
72d0e2c5 | 1697 | if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO) |
6cb06a8c TT |
1698 | gdb_printf (file, |
1699 | _("Debugger's willingness to use displaced stepping " | |
1700 | "to step over breakpoints is %s (currently %s).\n"), | |
1701 | value, target_is_non_stop_p () ? "on" : "off"); | |
fff08868 | 1702 | else |
6cb06a8c TT |
1703 | gdb_printf (file, |
1704 | _("Debugger's willingness to use displaced stepping " | |
1705 | "to step over breakpoints is %s.\n"), value); | |
237fc4c9 PA |
1706 | } |
1707 | ||
7f7e6755 | 1708 | /* Return true if the target behind THREAD supports displaced stepping. */ |
9822cb57 SM |
1709 | |
1710 | static bool | |
7f7e6755 | 1711 | target_supports_displaced_stepping (thread_info *thread) |
9822cb57 | 1712 | { |
7f7e6755 | 1713 | return thread->inf->top_target ()->supports_displaced_step (thread); |
9822cb57 SM |
1714 | } |
1715 | ||
fff08868 | 1716 | /* Return non-zero if displaced stepping can/should be used to step |
3fc8eb30 | 1717 | over breakpoints of thread TP. */ |
fff08868 | 1718 | |
9822cb57 SM |
1719 | static bool |
1720 | use_displaced_stepping (thread_info *tp) | |
237fc4c9 | 1721 | { |
9822cb57 SM |
1722 | /* If the user disabled it explicitly, don't use displaced stepping. */ |
1723 | if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE) | |
1724 | return false; | |
1725 | ||
1726 | /* If "auto", only use displaced stepping if the target operates in a non-stop | |
1727 | way. */ | |
1728 | if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO | |
1729 | && !target_is_non_stop_p ()) | |
1730 | return false; | |
1731 | ||
7f7e6755 SM |
1732 | /* If the target doesn't support displaced stepping, don't use it. */ |
1733 | if (!target_supports_displaced_stepping (tp)) | |
9822cb57 SM |
1734 | return false; |
1735 | ||
1736 | /* If recording, don't use displaced stepping. */ | |
1737 | if (find_record_target () != nullptr) | |
1738 | return false; | |
1739 | ||
9822cb57 SM |
1740 | /* If displaced stepping failed before for this inferior, don't bother trying |
1741 | again. */ | |
f5f01699 | 1742 | if (tp->inf->displaced_step_state.failed_before) |
9822cb57 SM |
1743 | return false; |
1744 | ||
1745 | return true; | |
237fc4c9 PA |
1746 | } |
1747 | ||
187b041e | 1748 | /* Simple function wrapper around displaced_step_thread_state::reset. */ |
d8d83535 | 1749 | |
237fc4c9 | 1750 | static void |
187b041e | 1751 | displaced_step_reset (displaced_step_thread_state *displaced) |
237fc4c9 | 1752 | { |
d8d83535 | 1753 | displaced->reset (); |
237fc4c9 PA |
1754 | } |
1755 | ||
d8d83535 SM |
1756 | /* A cleanup that wraps displaced_step_reset. We use this instead of, say, |
1757 | SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */ | |
1758 | ||
1759 | using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset); | |
237fc4c9 | 1760 | |
237fc4c9 PA |
1761 | /* Prepare to single-step, using displaced stepping. |
1762 | ||
1763 | Note that we cannot use displaced stepping when we have a signal to | |
1764 | deliver. If we have a signal to deliver and an instruction to step | |
1765 | over, then after the step, there will be no indication from the | |
1766 | target whether the thread entered a signal handler or ignored the | |
1767 | signal and stepped over the instruction successfully --- both cases | |
1768 | result in a simple SIGTRAP. In the first case we mustn't do a | |
1769 | fixup, and in the second case we must --- but we can't tell which. | |
1770 | Comments in the code for 'random signals' in handle_inferior_event | |
1771 | explain how we handle this case instead. | |
1772 | ||
bab37966 SM |
1773 | Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this |
1774 | thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE | |
1775 | if displaced stepping this thread got queued; or | |
1776 | DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced | |
1777 | stepped. */ | |
7f03bd92 | 1778 | |
bab37966 | 1779 | static displaced_step_prepare_status |
00431a78 | 1780 | displaced_step_prepare_throw (thread_info *tp) |
237fc4c9 | 1781 | { |
00431a78 | 1782 | regcache *regcache = get_thread_regcache (tp); |
ac7936df | 1783 | struct gdbarch *gdbarch = regcache->arch (); |
187b041e SM |
1784 | displaced_step_thread_state &disp_step_thread_state |
1785 | = tp->displaced_step_state; | |
237fc4c9 | 1786 | |
7f7e6755 | 1787 | /* We should never reach this function if the target does not |
237fc4c9 | 1788 | support displaced stepping. */ |
7f7e6755 | 1789 | gdb_assert (target_supports_displaced_stepping (tp)); |
237fc4c9 | 1790 | |
c2829269 PA |
1791 | /* Nor if the thread isn't meant to step over a breakpoint. */ |
1792 | gdb_assert (tp->control.trap_expected); | |
1793 | ||
c1e36e3e PA |
1794 | /* Disable range stepping while executing in the scratch pad. We |
1795 | want a single-step even if executing the displaced instruction in | |
1796 | the scratch buffer lands within the stepping range (e.g., a | |
1797 | jump/branch). */ | |
1798 | tp->control.may_range_step = 0; | |
1799 | ||
187b041e SM |
1800 | /* We are about to start a displaced step for this thread. If one is already |
1801 | in progress, something's wrong. */ | |
1802 | gdb_assert (!disp_step_thread_state.in_progress ()); | |
237fc4c9 | 1803 | |
187b041e | 1804 | if (tp->inf->displaced_step_state.unavailable) |
237fc4c9 | 1805 | { |
187b041e SM |
1806 | /* The gdbarch tells us it's not worth asking to try a prepare because |
1807 | it is likely that it will return unavailable, so don't bother asking. */ | |
237fc4c9 | 1808 | |
136821d9 | 1809 | displaced_debug_printf ("deferring step of %s", |
0fab7955 | 1810 | tp->ptid.to_string ().c_str ()); |
237fc4c9 | 1811 | |
28d5518b | 1812 | global_thread_step_over_chain_enqueue (tp); |
bab37966 | 1813 | return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE; |
237fc4c9 | 1814 | } |
237fc4c9 | 1815 | |
187b041e | 1816 | displaced_debug_printf ("displaced-stepping %s now", |
0fab7955 | 1817 | tp->ptid.to_string ().c_str ()); |
237fc4c9 | 1818 | |
00431a78 PA |
1819 | scoped_restore_current_thread restore_thread; |
1820 | ||
1821 | switch_to_thread (tp); | |
ad53cd71 | 1822 | |
187b041e SM |
1823 | CORE_ADDR original_pc = regcache_read_pc (regcache); |
1824 | CORE_ADDR displaced_pc; | |
237fc4c9 | 1825 | |
6d84a385 AB |
1826 | /* Display the instruction we are going to displaced step. */ |
1827 | if (debug_displaced) | |
1828 | { | |
1829 | string_file tmp_stream; | |
1830 | int dislen = gdb_print_insn (gdbarch, original_pc, &tmp_stream, | |
1831 | nullptr); | |
1832 | ||
1833 | if (dislen > 0) | |
1834 | { | |
1835 | gdb::byte_vector insn_buf (dislen); | |
1836 | read_memory (original_pc, insn_buf.data (), insn_buf.size ()); | |
1837 | ||
a6e5abae | 1838 | std::string insn_bytes = bytes_to_string (insn_buf); |
6d84a385 AB |
1839 | |
1840 | displaced_debug_printf ("original insn %s: %s \t %s", | |
1841 | paddress (gdbarch, original_pc), | |
1842 | insn_bytes.c_str (), | |
1843 | tmp_stream.string ().c_str ()); | |
1844 | } | |
1845 | else | |
1846 | displaced_debug_printf ("original insn %s: invalid length: %d", | |
1847 | paddress (gdbarch, original_pc), dislen); | |
1848 | } | |
1849 | ||
7f7e6755 SM |
1850 | auto status |
1851 | = tp->inf->top_target ()->displaced_step_prepare (tp, displaced_pc); | |
237fc4c9 | 1852 | |
187b041e | 1853 | if (status == DISPLACED_STEP_PREPARE_STATUS_CANT) |
d35ae833 | 1854 | { |
187b041e | 1855 | displaced_debug_printf ("failed to prepare (%s)", |
0fab7955 | 1856 | tp->ptid.to_string ().c_str ()); |
d35ae833 | 1857 | |
bab37966 | 1858 | return DISPLACED_STEP_PREPARE_STATUS_CANT; |
d35ae833 | 1859 | } |
187b041e | 1860 | else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE) |
7f03bd92 | 1861 | { |
187b041e SM |
1862 | /* Not enough displaced stepping resources available, defer this |
1863 | request by placing it the queue. */ | |
1864 | ||
1865 | displaced_debug_printf ("not enough resources available, " | |
1866 | "deferring step of %s", | |
0fab7955 | 1867 | tp->ptid.to_string ().c_str ()); |
187b041e SM |
1868 | |
1869 | global_thread_step_over_chain_enqueue (tp); | |
1870 | ||
1871 | return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE; | |
7f03bd92 | 1872 | } |
237fc4c9 | 1873 | |
187b041e SM |
1874 | gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK); |
1875 | ||
9f5a595d UW |
1876 | /* Save the information we need to fix things up if the step |
1877 | succeeds. */ | |
187b041e | 1878 | disp_step_thread_state.set (gdbarch); |
9f5a595d | 1879 | |
187b041e | 1880 | tp->inf->displaced_step_state.in_progress_count++; |
ad53cd71 | 1881 | |
187b041e SM |
1882 | displaced_debug_printf ("prepared successfully thread=%s, " |
1883 | "original_pc=%s, displaced_pc=%s", | |
0fab7955 | 1884 | tp->ptid.to_string ().c_str (), |
187b041e SM |
1885 | paddress (gdbarch, original_pc), |
1886 | paddress (gdbarch, displaced_pc)); | |
237fc4c9 | 1887 | |
6d84a385 AB |
1888 | /* Display the new displaced instruction(s). */ |
1889 | if (debug_displaced) | |
1890 | { | |
1891 | string_file tmp_stream; | |
1892 | CORE_ADDR addr = displaced_pc; | |
1893 | ||
1894 | /* If displaced stepping is going to use h/w single step then we know | |
1895 | that the replacement instruction can only be a single instruction, | |
1896 | in that case set the end address at the next byte. | |
1897 | ||
1898 | Otherwise the displaced stepping copy instruction routine could | |
1899 | have generated multiple instructions, and all we know is that they | |
1900 | must fit within the LEN bytes of the buffer. */ | |
1901 | CORE_ADDR end | |
1902 | = addr + (gdbarch_displaced_step_hw_singlestep (gdbarch) | |
1903 | ? 1 : gdbarch_displaced_step_buffer_length (gdbarch)); | |
1904 | ||
1905 | while (addr < end) | |
1906 | { | |
1907 | int dislen = gdb_print_insn (gdbarch, addr, &tmp_stream, nullptr); | |
1908 | if (dislen <= 0) | |
1909 | { | |
1910 | displaced_debug_printf | |
1911 | ("replacement insn %s: invalid length: %d", | |
1912 | paddress (gdbarch, addr), dislen); | |
1913 | break; | |
1914 | } | |
1915 | ||
1916 | gdb::byte_vector insn_buf (dislen); | |
1917 | read_memory (addr, insn_buf.data (), insn_buf.size ()); | |
1918 | ||
a6e5abae | 1919 | std::string insn_bytes = bytes_to_string (insn_buf); |
6d84a385 AB |
1920 | std::string insn_str = tmp_stream.release (); |
1921 | displaced_debug_printf ("replacement insn %s: %s \t %s", | |
1922 | paddress (gdbarch, addr), | |
1923 | insn_bytes.c_str (), | |
1924 | insn_str.c_str ()); | |
1925 | addr += dislen; | |
1926 | } | |
1927 | } | |
1928 | ||
bab37966 | 1929 | return DISPLACED_STEP_PREPARE_STATUS_OK; |
237fc4c9 PA |
1930 | } |
1931 | ||
3fc8eb30 PA |
1932 | /* Wrapper for displaced_step_prepare_throw that disabled further |
1933 | attempts at displaced stepping if we get a memory error. */ | |
1934 | ||
bab37966 | 1935 | static displaced_step_prepare_status |
00431a78 | 1936 | displaced_step_prepare (thread_info *thread) |
3fc8eb30 | 1937 | { |
bab37966 SM |
1938 | displaced_step_prepare_status status |
1939 | = DISPLACED_STEP_PREPARE_STATUS_CANT; | |
3fc8eb30 | 1940 | |
a70b8144 | 1941 | try |
3fc8eb30 | 1942 | { |
bab37966 | 1943 | status = displaced_step_prepare_throw (thread); |
3fc8eb30 | 1944 | } |
230d2906 | 1945 | catch (const gdb_exception_error &ex) |
3fc8eb30 | 1946 | { |
16b41842 PA |
1947 | if (ex.error != MEMORY_ERROR |
1948 | && ex.error != NOT_SUPPORTED_ERROR) | |
eedc3f4f | 1949 | throw; |
3fc8eb30 | 1950 | |
1eb8556f SM |
1951 | infrun_debug_printf ("caught exception, disabling displaced stepping: %s", |
1952 | ex.what ()); | |
3fc8eb30 PA |
1953 | |
1954 | /* Be verbose if "set displaced-stepping" is "on", silent if | |
1955 | "auto". */ | |
1956 | if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE) | |
1957 | { | |
fd7dcb94 | 1958 | warning (_("disabling displaced stepping: %s"), |
3d6e9d23 | 1959 | ex.what ()); |
3fc8eb30 PA |
1960 | } |
1961 | ||
1962 | /* Disable further displaced stepping attempts. */ | |
f5f01699 | 1963 | thread->inf->displaced_step_state.failed_before = 1; |
3fc8eb30 | 1964 | } |
3fc8eb30 | 1965 | |
bab37966 | 1966 | return status; |
3fc8eb30 PA |
1967 | } |
1968 | ||
9488c327 PA |
1969 | /* True if any thread of TARGET that matches RESUME_PTID requires |
1970 | target_thread_events enabled. This assumes TARGET does not support | |
1971 | target thread options. */ | |
1972 | ||
1973 | static bool | |
1974 | any_thread_needs_target_thread_events (process_stratum_target *target, | |
1975 | ptid_t resume_ptid) | |
1976 | { | |
1977 | for (thread_info *tp : all_non_exited_threads (target, resume_ptid)) | |
1978 | if (displaced_step_in_progress_thread (tp) | |
1979 | || schedlock_applies (tp) | |
1980 | || tp->thread_fsm () != nullptr) | |
1981 | return true; | |
1982 | return false; | |
1983 | } | |
1984 | ||
65c459ab PA |
1985 | /* Maybe disable thread-{cloned,created,exited} event reporting after |
1986 | a step-over (either in-line or displaced) finishes. */ | |
1987 | ||
1988 | static void | |
21d48304 PA |
1989 | update_thread_events_after_step_over (thread_info *event_thread, |
1990 | const target_waitstatus &event_status) | |
65c459ab | 1991 | { |
7ac958f2 PA |
1992 | if (schedlock_applies (event_thread)) |
1993 | { | |
1994 | /* If scheduler-locking applies, continue reporting | |
1995 | thread-created/thread-cloned events. */ | |
1996 | return; | |
1997 | } | |
1998 | else if (target_supports_set_thread_options (0)) | |
65c459ab PA |
1999 | { |
2000 | /* We can control per-thread options. Disable events for the | |
21d48304 PA |
2001 | event thread, unless the thread is gone. */ |
2002 | if (event_status.kind () != TARGET_WAITKIND_THREAD_EXITED) | |
2003 | event_thread->set_thread_options (0); | |
65c459ab PA |
2004 | } |
2005 | else | |
2006 | { | |
2007 | /* We can only control the target-wide target_thread_events | |
9488c327 PA |
2008 | setting. Disable it, but only if other threads in the target |
2009 | don't need it enabled. */ | |
2010 | process_stratum_target *target = event_thread->inf->process_target (); | |
2011 | if (!any_thread_needs_target_thread_events (target, minus_one_ptid)) | |
65c459ab PA |
2012 | target_thread_events (false); |
2013 | } | |
2014 | } | |
2015 | ||
bab37966 SM |
2016 | /* If we displaced stepped an instruction successfully, adjust registers and |
2017 | memory to yield the same effect the instruction would have had if we had | |
2018 | executed it at its original address, and return | |
2019 | DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete, | |
2020 | relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED. | |
372316f1 | 2021 | |
bab37966 SM |
2022 | If the thread wasn't displaced stepping, return |
2023 | DISPLACED_STEP_FINISH_STATUS_OK as well. */ | |
2024 | ||
2025 | static displaced_step_finish_status | |
58c01087 PA |
2026 | displaced_step_finish (thread_info *event_thread, |
2027 | const target_waitstatus &event_status) | |
237fc4c9 | 2028 | { |
0d36baa9 | 2029 | /* Check whether the parent is displaced stepping. */ |
0d36baa9 | 2030 | inferior *parent_inf = event_thread->inf; |
7f7e6755 | 2031 | target_ops *top_target = parent_inf->top_target (); |
0d36baa9 PA |
2032 | |
2033 | /* If this was a fork/vfork/clone, this event indicates that the | |
2034 | displaced stepping of the syscall instruction has been done, so | |
2035 | we perform cleanup for parent here. Also note that this | |
2036 | operation also cleans up the child for vfork, because their pages | |
2037 | are shared. */ | |
2038 | ||
2039 | /* If this is a fork (child gets its own address space copy) and | |
2040 | some displaced step buffers were in use at the time of the fork, | |
2041 | restore the displaced step buffer bytes in the child process. | |
2042 | ||
2043 | Architectures which support displaced stepping and fork events | |
2044 | must supply an implementation of | |
2045 | gdbarch_displaced_step_restore_all_in_ptid. This is not enforced | |
2046 | during gdbarch validation to support architectures which support | |
2047 | displaced stepping but not forks. */ | |
7f7e6755 SM |
2048 | if (event_status.kind () == TARGET_WAITKIND_FORKED |
2049 | && target_supports_displaced_stepping (event_thread)) | |
2050 | top_target->displaced_step_restore_all_in_ptid | |
2051 | (parent_inf, event_status.child_ptid ()); | |
0d36baa9 | 2052 | |
187b041e | 2053 | displaced_step_thread_state *displaced = &event_thread->displaced_step_state; |
fc1cf338 | 2054 | |
187b041e SM |
2055 | /* Was this thread performing a displaced step? */ |
2056 | if (!displaced->in_progress ()) | |
bab37966 | 2057 | return DISPLACED_STEP_FINISH_STATUS_OK; |
237fc4c9 | 2058 | |
21d48304 | 2059 | update_thread_events_after_step_over (event_thread, event_status); |
65c459ab | 2060 | |
187b041e SM |
2061 | gdb_assert (event_thread->inf->displaced_step_state.in_progress_count > 0); |
2062 | event_thread->inf->displaced_step_state.in_progress_count--; | |
2063 | ||
cb71640d PA |
2064 | /* Fixup may need to read memory/registers. Switch to the thread |
2065 | that we're fixing up. Also, target_stopped_by_watchpoint checks | |
d43b7a2d | 2066 | the current thread, and displaced_step_restore performs ptid-dependent |
328d42d8 | 2067 | memory accesses using current_inferior(). */ |
00431a78 | 2068 | switch_to_thread (event_thread); |
cb71640d | 2069 | |
d43b7a2d TBA |
2070 | displaced_step_reset_cleanup cleanup (displaced); |
2071 | ||
187b041e SM |
2072 | /* Do the fixup, and release the resources acquired to do the displaced |
2073 | step. */ | |
7f7e6755 | 2074 | auto status = top_target->displaced_step_finish (event_thread, event_status); |
0d36baa9 PA |
2075 | |
2076 | if (event_status.kind () == TARGET_WAITKIND_FORKED | |
2077 | || event_status.kind () == TARGET_WAITKIND_VFORKED | |
2078 | || event_status.kind () == TARGET_WAITKIND_THREAD_CLONED) | |
2079 | { | |
2080 | /* Since the vfork/fork/clone syscall instruction was executed | |
2081 | in the scratchpad, the child's PC is also within the | |
2082 | scratchpad. Set the child's PC to the parent's PC value, | |
2083 | which has already been fixed up. Note: we use the parent's | |
2084 | aspace here, although we're touching the child, because the | |
2085 | child hasn't been added to the inferior list yet at this | |
2086 | point. */ | |
2087 | ||
249d0812 PA |
2088 | struct regcache *parent_regcache = get_thread_regcache (event_thread); |
2089 | struct gdbarch *gdbarch = parent_regcache->arch (); | |
0d36baa9 | 2090 | struct regcache *child_regcache |
74387712 SM |
2091 | = get_thread_arch_regcache (parent_inf, event_status.child_ptid (), |
2092 | gdbarch); | |
0d36baa9 | 2093 | /* Read PC value of parent. */ |
249d0812 | 2094 | CORE_ADDR parent_pc = regcache_read_pc (parent_regcache); |
0d36baa9 PA |
2095 | |
2096 | displaced_debug_printf ("write child pc from %s to %s", | |
2097 | paddress (gdbarch, | |
2098 | regcache_read_pc (child_regcache)), | |
2099 | paddress (gdbarch, parent_pc)); | |
2100 | ||
2101 | regcache_write_pc (child_regcache, parent_pc); | |
2102 | } | |
2103 | ||
2104 | return status; | |
c2829269 | 2105 | } |
1c5cfe86 | 2106 | |
4d9d9d04 PA |
2107 | /* Data to be passed around while handling an event. This data is |
2108 | discarded between events. */ | |
2109 | struct execution_control_state | |
2110 | { | |
aa563d16 TT |
2111 | explicit execution_control_state (thread_info *thr = nullptr) |
2112 | : ptid (thr == nullptr ? null_ptid : thr->ptid), | |
2113 | event_thread (thr) | |
183be222 | 2114 | { |
183be222 SM |
2115 | } |
2116 | ||
aa563d16 | 2117 | process_stratum_target *target = nullptr; |
4d9d9d04 PA |
2118 | ptid_t ptid; |
2119 | /* The thread that got the event, if this was a thread event; NULL | |
2120 | otherwise. */ | |
2121 | struct thread_info *event_thread; | |
2122 | ||
2123 | struct target_waitstatus ws; | |
aa563d16 | 2124 | int stop_func_filled_in = 0; |
2a8339b7 | 2125 | CORE_ADDR stop_func_alt_start = 0; |
aa563d16 TT |
2126 | CORE_ADDR stop_func_start = 0; |
2127 | CORE_ADDR stop_func_end = 0; | |
2128 | const char *stop_func_name = nullptr; | |
2129 | int wait_some_more = 0; | |
4d9d9d04 PA |
2130 | |
2131 | /* True if the event thread hit the single-step breakpoint of | |
2132 | another thread. Thus the event doesn't cause a stop, the thread | |
2133 | needs to be single-stepped past the single-step breakpoint before | |
2134 | we can switch back to the original stepping thread. */ | |
aa563d16 | 2135 | int hit_singlestep_breakpoint = 0; |
4d9d9d04 PA |
2136 | }; |
2137 | ||
4d9d9d04 PA |
2138 | static void keep_going_pass_signal (struct execution_control_state *ecs); |
2139 | static void prepare_to_wait (struct execution_control_state *ecs); | |
c4464ade | 2140 | static bool keep_going_stepped_thread (struct thread_info *tp); |
8d297bbf | 2141 | static step_over_what thread_still_needs_step_over (struct thread_info *tp); |
4d9d9d04 PA |
2142 | |
2143 | /* Are there any pending step-over requests? If so, run all we can | |
2144 | now and return true. Otherwise, return false. */ | |
2145 | ||
c4464ade | 2146 | static bool |
c2829269 PA |
2147 | start_step_over (void) |
2148 | { | |
3ec3145c SM |
2149 | INFRUN_SCOPED_DEBUG_ENTER_EXIT; |
2150 | ||
372316f1 PA |
2151 | /* Don't start a new step-over if we already have an in-line |
2152 | step-over operation ongoing. */ | |
2153 | if (step_over_info_valid_p ()) | |
c4464ade | 2154 | return false; |
372316f1 | 2155 | |
187b041e SM |
2156 | /* Steal the global thread step over chain. As we try to initiate displaced |
2157 | steps, threads will be enqueued in the global chain if no buffers are | |
2158 | available. If we iterated on the global chain directly, we might iterate | |
2159 | indefinitely. */ | |
8b6a69b2 SM |
2160 | thread_step_over_list threads_to_step |
2161 | = std::move (global_thread_step_over_list); | |
187b041e SM |
2162 | |
2163 | infrun_debug_printf ("stealing global queue of threads to step, length = %d", | |
2164 | thread_step_over_chain_length (threads_to_step)); | |
2165 | ||
2166 | bool started = false; | |
2167 | ||
2168 | /* On scope exit (whatever the reason, return or exception), if there are | |
2169 | threads left in the THREADS_TO_STEP chain, put back these threads in the | |
2170 | global list. */ | |
2171 | SCOPE_EXIT | |
2172 | { | |
8b6a69b2 | 2173 | if (threads_to_step.empty ()) |
187b041e SM |
2174 | infrun_debug_printf ("step-over queue now empty"); |
2175 | else | |
2176 | { | |
2177 | infrun_debug_printf ("putting back %d threads to step in global queue", | |
2178 | thread_step_over_chain_length (threads_to_step)); | |
2179 | ||
8b6a69b2 SM |
2180 | global_thread_step_over_chain_enqueue_chain |
2181 | (std::move (threads_to_step)); | |
187b041e SM |
2182 | } |
2183 | }; | |
2184 | ||
8b6a69b2 SM |
2185 | thread_step_over_list_safe_range range |
2186 | = make_thread_step_over_list_safe_range (threads_to_step); | |
2187 | ||
2188 | for (thread_info *tp : range) | |
237fc4c9 | 2189 | { |
8d297bbf | 2190 | step_over_what step_what; |
372316f1 | 2191 | int must_be_in_line; |
c2829269 | 2192 | |
c65d6b55 PA |
2193 | gdb_assert (!tp->stop_requested); |
2194 | ||
187b041e SM |
2195 | if (tp->inf->displaced_step_state.unavailable) |
2196 | { | |
2197 | /* The arch told us to not even try preparing another displaced step | |
2198 | for this inferior. Just leave the thread in THREADS_TO_STEP, it | |
2199 | will get moved to the global chain on scope exit. */ | |
2200 | continue; | |
2201 | } | |
2202 | ||
d8bbae6e SM |
2203 | if (tp->inf->thread_waiting_for_vfork_done != nullptr) |
2204 | { | |
2205 | /* When we stop all threads, handling a vfork, any thread in the step | |
2206 | over chain remains there. A user could also try to continue a | |
2207 | thread stopped at a breakpoint while another thread is waiting for | |
2208 | a vfork-done event. In any case, we don't want to start a step | |
2209 | over right now. */ | |
2210 | continue; | |
2211 | } | |
2212 | ||
187b041e SM |
2213 | /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong |
2214 | while we try to prepare the displaced step, we don't add it back to | |
2215 | the global step over chain. This is to avoid a thread staying in the | |
2216 | step over chain indefinitely if something goes wrong when resuming it | |
2217 | If the error is intermittent and it still needs a step over, it will | |
2218 | get enqueued again when we try to resume it normally. */ | |
8b6a69b2 | 2219 | threads_to_step.erase (threads_to_step.iterator_to (*tp)); |
c2829269 | 2220 | |
372316f1 PA |
2221 | step_what = thread_still_needs_step_over (tp); |
2222 | must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT) | |
2223 | || ((step_what & STEP_OVER_BREAKPOINT) | |
3fc8eb30 | 2224 | && !use_displaced_stepping (tp))); |
372316f1 PA |
2225 | |
2226 | /* We currently stop all threads of all processes to step-over | |
2227 | in-line. If we need to start a new in-line step-over, let | |
2228 | any pending displaced steps finish first. */ | |
187b041e SM |
2229 | if (must_be_in_line && displaced_step_in_progress_any_thread ()) |
2230 | { | |
2231 | global_thread_step_over_chain_enqueue (tp); | |
2232 | continue; | |
2233 | } | |
c2829269 | 2234 | |
372316f1 | 2235 | if (tp->control.trap_expected |
7846f3aa | 2236 | || tp->resumed () |
611841bb | 2237 | || tp->executing ()) |
ad53cd71 | 2238 | { |
f34652de | 2239 | internal_error ("[%s] has inconsistent state: " |
372316f1 | 2240 | "trap_expected=%d, resumed=%d, executing=%d\n", |
0fab7955 | 2241 | tp->ptid.to_string ().c_str (), |
4d9d9d04 | 2242 | tp->control.trap_expected, |
7846f3aa | 2243 | tp->resumed (), |
611841bb | 2244 | tp->executing ()); |
ad53cd71 | 2245 | } |
1c5cfe86 | 2246 | |
1eb8556f | 2247 | infrun_debug_printf ("resuming [%s] for step-over", |
0fab7955 | 2248 | tp->ptid.to_string ().c_str ()); |
4d9d9d04 PA |
2249 | |
2250 | /* keep_going_pass_signal skips the step-over if the breakpoint | |
2251 | is no longer inserted. In all-stop, we want to keep looking | |
2252 | for a thread that needs a step-over instead of resuming TP, | |
2253 | because we wouldn't be able to resume anything else until the | |
2254 | target stops again. In non-stop, the resume always resumes | |
2255 | only TP, so it's OK to let the thread resume freely. */ | |
fbea99ea | 2256 | if (!target_is_non_stop_p () && !step_what) |
4d9d9d04 | 2257 | continue; |
8550d3b3 | 2258 | |
00431a78 | 2259 | switch_to_thread (tp); |
aa563d16 TT |
2260 | execution_control_state ecs (tp); |
2261 | keep_going_pass_signal (&ecs); | |
1c5cfe86 | 2262 | |
aa563d16 | 2263 | if (!ecs.wait_some_more) |
4d9d9d04 | 2264 | error (_("Command aborted.")); |
1c5cfe86 | 2265 | |
187b041e SM |
2266 | /* If the thread's step over could not be initiated because no buffers |
2267 | were available, it was re-added to the global step over chain. */ | |
7846f3aa | 2268 | if (tp->resumed ()) |
187b041e SM |
2269 | { |
2270 | infrun_debug_printf ("[%s] was resumed.", | |
0fab7955 | 2271 | tp->ptid.to_string ().c_str ()); |
187b041e SM |
2272 | gdb_assert (!thread_is_in_step_over_chain (tp)); |
2273 | } | |
2274 | else | |
2275 | { | |
2276 | infrun_debug_printf ("[%s] was NOT resumed.", | |
0fab7955 | 2277 | tp->ptid.to_string ().c_str ()); |
187b041e SM |
2278 | gdb_assert (thread_is_in_step_over_chain (tp)); |
2279 | } | |
372316f1 PA |
2280 | |
2281 | /* If we started a new in-line step-over, we're done. */ | |
2282 | if (step_over_info_valid_p ()) | |
2283 | { | |
2284 | gdb_assert (tp->control.trap_expected); | |
187b041e SM |
2285 | started = true; |
2286 | break; | |
372316f1 PA |
2287 | } |
2288 | ||
fbea99ea | 2289 | if (!target_is_non_stop_p ()) |
4d9d9d04 PA |
2290 | { |
2291 | /* On all-stop, shouldn't have resumed unless we needed a | |
2292 | step over. */ | |
2293 | gdb_assert (tp->control.trap_expected | |
2294 | || tp->step_after_step_resume_breakpoint); | |
2295 | ||
2296 | /* With remote targets (at least), in all-stop, we can't | |
2297 | issue any further remote commands until the program stops | |
2298 | again. */ | |
187b041e SM |
2299 | started = true; |
2300 | break; | |
1c5cfe86 | 2301 | } |
c2829269 | 2302 | |
4d9d9d04 PA |
2303 | /* Either the thread no longer needed a step-over, or a new |
2304 | displaced stepping sequence started. Even in the latter | |
2305 | case, continue looking. Maybe we can also start another | |
2306 | displaced step on a thread of other process. */ | |
237fc4c9 | 2307 | } |
4d9d9d04 | 2308 | |
187b041e | 2309 | return started; |
237fc4c9 PA |
2310 | } |
2311 | ||
5231c1fd PA |
2312 | /* Update global variables holding ptids to hold NEW_PTID if they were |
2313 | holding OLD_PTID. */ | |
2314 | static void | |
b161a60d SM |
2315 | infrun_thread_ptid_changed (process_stratum_target *target, |
2316 | ptid_t old_ptid, ptid_t new_ptid) | |
5231c1fd | 2317 | { |
b161a60d SM |
2318 | if (inferior_ptid == old_ptid |
2319 | && current_inferior ()->process_target () == target) | |
5231c1fd | 2320 | inferior_ptid = new_ptid; |
5231c1fd PA |
2321 | } |
2322 | ||
237fc4c9 | 2323 | \f |
c906108c | 2324 | |
53904c9e AC |
2325 | static const char schedlock_off[] = "off"; |
2326 | static const char schedlock_on[] = "on"; | |
2327 | static const char schedlock_step[] = "step"; | |
f2665db5 | 2328 | static const char schedlock_replay[] = "replay"; |
40478521 | 2329 | static const char *const scheduler_enums[] = { |
ef346e04 AC |
2330 | schedlock_off, |
2331 | schedlock_on, | |
2332 | schedlock_step, | |
f2665db5 | 2333 | schedlock_replay, |
03acd4d8 | 2334 | nullptr |
ef346e04 | 2335 | }; |
f2665db5 | 2336 | static const char *scheduler_mode = schedlock_replay; |
920d2a44 AC |
2337 | static void |
2338 | show_scheduler_mode (struct ui_file *file, int from_tty, | |
2339 | struct cmd_list_element *c, const char *value) | |
2340 | { | |
6cb06a8c TT |
2341 | gdb_printf (file, |
2342 | _("Mode for locking scheduler " | |
2343 | "during execution is \"%s\".\n"), | |
2344 | value); | |
920d2a44 | 2345 | } |
c906108c SS |
2346 | |
2347 | static void | |
eb4c3f4a | 2348 | set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c) |
c906108c | 2349 | { |
8a3ecb79 | 2350 | if (!target_can_lock_scheduler ()) |
eefe576e AC |
2351 | { |
2352 | scheduler_mode = schedlock_off; | |
d777bf0d SM |
2353 | error (_("Target '%s' cannot support this command."), |
2354 | target_shortname ()); | |
eefe576e | 2355 | } |
c906108c SS |
2356 | } |
2357 | ||
d4db2f36 PA |
2358 | /* True if execution commands resume all threads of all processes by |
2359 | default; otherwise, resume only threads of the current inferior | |
2360 | process. */ | |
491144b5 | 2361 | bool sched_multi = false; |
d4db2f36 | 2362 | |
22b11ba9 LS |
2363 | /* Try to setup for software single stepping. Return true if target_resume() |
2364 | should use hardware single step. | |
2facfe5c | 2365 | |
22b11ba9 | 2366 | GDBARCH the current gdbarch. */ |
2facfe5c | 2367 | |
c4464ade | 2368 | static bool |
22b11ba9 | 2369 | maybe_software_singlestep (struct gdbarch *gdbarch) |
2facfe5c | 2370 | { |
c4464ade | 2371 | bool hw_step = true; |
2facfe5c | 2372 | |
f02253f1 | 2373 | if (execution_direction == EXEC_FORWARD |
93f9a11f YQ |
2374 | && gdbarch_software_single_step_p (gdbarch)) |
2375 | hw_step = !insert_single_step_breakpoints (gdbarch); | |
2376 | ||
2facfe5c DD |
2377 | return hw_step; |
2378 | } | |
c906108c | 2379 | |
f3263aa4 PA |
2380 | /* See infrun.h. */ |
2381 | ||
09cee04b PA |
2382 | ptid_t |
2383 | user_visible_resume_ptid (int step) | |
2384 | { | |
f3263aa4 | 2385 | ptid_t resume_ptid; |
09cee04b | 2386 | |
09cee04b PA |
2387 | if (non_stop) |
2388 | { | |
2389 | /* With non-stop mode on, threads are always handled | |
2390 | individually. */ | |
2391 | resume_ptid = inferior_ptid; | |
2392 | } | |
2393 | else if ((scheduler_mode == schedlock_on) | |
03d46957 | 2394 | || (scheduler_mode == schedlock_step && step)) |
09cee04b | 2395 | { |
f3263aa4 PA |
2396 | /* User-settable 'scheduler' mode requires solo thread |
2397 | resume. */ | |
09cee04b PA |
2398 | resume_ptid = inferior_ptid; |
2399 | } | |
f2665db5 MM |
2400 | else if ((scheduler_mode == schedlock_replay) |
2401 | && target_record_will_replay (minus_one_ptid, execution_direction)) | |
2402 | { | |
2403 | /* User-settable 'scheduler' mode requires solo thread resume in replay | |
2404 | mode. */ | |
2405 | resume_ptid = inferior_ptid; | |
2406 | } | |
3df78436 AB |
2407 | else if (inferior_ptid != null_ptid |
2408 | && inferior_thread ()->control.in_cond_eval) | |
2409 | { | |
2410 | /* The inferior thread is evaluating a BP condition. Other threads | |
2411 | might be stopped or running and we do not want to change their | |
2412 | state, thus, resume only the current thread. */ | |
2413 | resume_ptid = inferior_ptid; | |
2414 | } | |
f3263aa4 PA |
2415 | else if (!sched_multi && target_supports_multi_process ()) |
2416 | { | |
2417 | /* Resume all threads of the current process (and none of other | |
2418 | processes). */ | |
e99b03dc | 2419 | resume_ptid = ptid_t (inferior_ptid.pid ()); |
f3263aa4 PA |
2420 | } |
2421 | else | |
2422 | { | |
2423 | /* Resume all threads of all processes. */ | |
2424 | resume_ptid = RESUME_ALL; | |
2425 | } | |
09cee04b PA |
2426 | |
2427 | return resume_ptid; | |
2428 | } | |
2429 | ||
5b6d1e4f PA |
2430 | /* See infrun.h. */ |
2431 | ||
2432 | process_stratum_target * | |
2433 | user_visible_resume_target (ptid_t resume_ptid) | |
2434 | { | |
2435 | return (resume_ptid == minus_one_ptid && sched_multi | |
03acd4d8 | 2436 | ? nullptr |
5b6d1e4f PA |
2437 | : current_inferior ()->process_target ()); |
2438 | } | |
2439 | ||
bd9482bc PA |
2440 | /* Find a thread from the inferiors that we'll resume that is waiting |
2441 | for a vfork-done event. */ | |
2442 | ||
2443 | static thread_info * | |
2444 | find_thread_waiting_for_vfork_done () | |
2445 | { | |
2446 | gdb_assert (!target_is_non_stop_p ()); | |
2447 | ||
2448 | if (sched_multi) | |
2449 | { | |
2450 | for (inferior *inf : all_non_exited_inferiors ()) | |
2451 | if (inf->thread_waiting_for_vfork_done != nullptr) | |
2452 | return inf->thread_waiting_for_vfork_done; | |
2453 | } | |
2454 | else | |
2455 | { | |
2456 | inferior *cur_inf = current_inferior (); | |
2457 | if (cur_inf->thread_waiting_for_vfork_done != nullptr) | |
2458 | return cur_inf->thread_waiting_for_vfork_done; | |
2459 | } | |
2460 | return nullptr; | |
2461 | } | |
2462 | ||
fbea99ea PA |
2463 | /* Return a ptid representing the set of threads that we will resume, |
2464 | in the perspective of the target, assuming run control handling | |
2465 | does not require leaving some threads stopped (e.g., stepping past | |
2466 | breakpoint). USER_STEP indicates whether we're about to start the | |
2467 | target for a stepping command. */ | |
2468 | ||
2469 | static ptid_t | |
2470 | internal_resume_ptid (int user_step) | |
2471 | { | |
2472 | /* In non-stop, we always control threads individually. Note that | |
2473 | the target may always work in non-stop mode even with "set | |
2474 | non-stop off", in which case user_visible_resume_ptid could | |
2475 | return a wildcard ptid. */ | |
2476 | if (target_is_non_stop_p ()) | |
2477 | return inferior_ptid; | |
d8bbae6e SM |
2478 | |
2479 | /* The rest of the function assumes non-stop==off and | |
2480 | target-non-stop==off. | |
2481 | ||
2482 | If a thread is waiting for a vfork-done event, it means breakpoints are out | |
2483 | for this inferior (well, program space in fact). We don't want to resume | |
2484 | any thread other than the one waiting for vfork done, otherwise these other | |
2485 | threads could miss breakpoints. So if a thread in the resumption set is | |
2486 | waiting for a vfork-done event, resume only that thread. | |
2487 | ||
2488 | The resumption set width depends on whether schedule-multiple is on or off. | |
2489 | ||
2490 | Note that if the target_resume interface was more flexible, we could be | |
2491 | smarter here when schedule-multiple is on. For example, imagine 3 | |
2492 | inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads | |
2493 | 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the | |
2494 | target(s) to resume: | |
2495 | ||
2496 | - All threads of inferior 1 | |
2497 | - Thread 2.1 | |
2498 | - Thread 3.2 | |
2499 | ||
2500 | Since we don't have that flexibility (we can only pass one ptid), just | |
2501 | resume the first thread waiting for a vfork-done event we find (e.g. thread | |
2502 | 2.1). */ | |
bd9482bc PA |
2503 | thread_info *thr = find_thread_waiting_for_vfork_done (); |
2504 | if (thr != nullptr) | |
d8bbae6e | 2505 | { |
bd9482bc PA |
2506 | /* If we have a thread that is waiting for a vfork-done event, |
2507 | then we should have switched to it earlier. Calling | |
2508 | target_resume with thread scope is only possible when the | |
2509 | current thread matches the thread scope. */ | |
2510 | gdb_assert (thr->ptid == inferior_ptid); | |
2511 | gdb_assert (thr->inf->process_target () | |
2512 | == inferior_thread ()->inf->process_target ()); | |
2513 | return thr->ptid; | |
d8bbae6e | 2514 | } |
d8bbae6e SM |
2515 | |
2516 | return user_visible_resume_ptid (user_step); | |
fbea99ea PA |
2517 | } |
2518 | ||
64ce06e4 PA |
2519 | /* Wrapper for target_resume, that handles infrun-specific |
2520 | bookkeeping. */ | |
2521 | ||
2522 | static void | |
c4464ade | 2523 | do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig) |
64ce06e4 PA |
2524 | { |
2525 | struct thread_info *tp = inferior_thread (); | |
2526 | ||
c65d6b55 PA |
2527 | gdb_assert (!tp->stop_requested); |
2528 | ||
64ce06e4 | 2529 | /* Install inferior's terminal modes. */ |
223ffa71 | 2530 | target_terminal::inferior (); |
64ce06e4 PA |
2531 | |
2532 | /* Avoid confusing the next resume, if the next stop/resume | |
2533 | happens to apply to another thread. */ | |
1edb66d8 | 2534 | tp->set_stop_signal (GDB_SIGNAL_0); |
64ce06e4 | 2535 | |
8f572e5c PA |
2536 | /* Advise target which signals may be handled silently. |
2537 | ||
2538 | If we have removed breakpoints because we are stepping over one | |
2539 | in-line (in any thread), we need to receive all signals to avoid | |
2540 | accidentally skipping a breakpoint during execution of a signal | |
2541 | handler. | |
2542 | ||
2543 | Likewise if we're displaced stepping, otherwise a trap for a | |
2544 | breakpoint in a signal handler might be confused with the | |
7def77a1 | 2545 | displaced step finishing. We don't make the displaced_step_finish |
8f572e5c PA |
2546 | step distinguish the cases instead, because: |
2547 | ||
2548 | - a backtrace while stopped in the signal handler would show the | |
2549 | scratch pad as frame older than the signal handler, instead of | |
2550 | the real mainline code. | |
2551 | ||
2552 | - when the thread is later resumed, the signal handler would | |
2553 | return to the scratch pad area, which would no longer be | |
2554 | valid. */ | |
2555 | if (step_over_info_valid_p () | |
00431a78 | 2556 | || displaced_step_in_progress (tp->inf)) |
adc6a863 | 2557 | target_pass_signals ({}); |
64ce06e4 | 2558 | else |
adc6a863 | 2559 | target_pass_signals (signal_pass); |
64ce06e4 | 2560 | |
d8d96409 PA |
2561 | /* Request that the target report thread-{created,cloned,exited} |
2562 | events in the following situations: | |
65c459ab PA |
2563 | |
2564 | - If we are performing an in-line step-over-breakpoint, then we | |
2565 | will remove a breakpoint from the target and only run the | |
2566 | current thread. We don't want any new thread (spawned by the | |
d8d96409 PA |
2567 | step) to start running, as it might miss the breakpoint. We |
2568 | need to clear the step-over state if the stepped thread exits, | |
2569 | so we also enable thread-exit events. | |
65c459ab PA |
2570 | |
2571 | - If we are stepping over a breakpoint out of line (displaced | |
2572 | stepping) then we won't remove a breakpoint from the target, | |
2573 | but, if the step spawns a new clone thread, then we will need | |
2574 | to fixup the $pc address in the clone child too, so we need it | |
d8d96409 PA |
2575 | to start stopped. We need to release the displaced stepping |
2576 | buffer if the stepped thread exits, so we also enable | |
2577 | thread-exit events. | |
7ac958f2 PA |
2578 | |
2579 | - If scheduler-locking applies, threads that the current thread | |
2580 | spawns should remain halted. It's not strictly necessary to | |
2581 | enable thread-exit events in this case, but it doesn't hurt. | |
65c459ab PA |
2582 | */ |
2583 | if (step_over_info_valid_p () | |
7ac958f2 PA |
2584 | || displaced_step_in_progress_thread (tp) |
2585 | || schedlock_applies (tp)) | |
65c459ab | 2586 | { |
d8d96409 PA |
2587 | gdb_thread_options options |
2588 | = GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT; | |
65c459ab PA |
2589 | if (target_supports_set_thread_options (options)) |
2590 | tp->set_thread_options (options); | |
2591 | else | |
2592 | target_thread_events (true); | |
2593 | } | |
9488c327 PA |
2594 | else if (tp->thread_fsm () != nullptr) |
2595 | { | |
2596 | gdb_thread_options options = GDB_THREAD_OPTION_EXIT; | |
2597 | if (target_supports_set_thread_options (options)) | |
2598 | tp->set_thread_options (options); | |
2599 | else | |
2600 | target_thread_events (true); | |
2601 | } | |
7ac958f2 PA |
2602 | else |
2603 | { | |
2604 | if (target_supports_set_thread_options (0)) | |
2605 | tp->set_thread_options (0); | |
9488c327 PA |
2606 | else |
2607 | { | |
2608 | process_stratum_target *resume_target = tp->inf->process_target (); | |
2609 | if (!any_thread_needs_target_thread_events (resume_target, | |
2610 | resume_ptid)) | |
2611 | target_thread_events (false); | |
2612 | } | |
7ac958f2 | 2613 | } |
65c459ab PA |
2614 | |
2615 | /* If we're resuming more than one thread simultaneously, then any | |
2616 | thread other than the leader is being set to run free. Clear any | |
2617 | previous thread option for those threads. */ | |
2618 | if (resume_ptid != inferior_ptid && target_supports_set_thread_options (0)) | |
2619 | { | |
2620 | process_stratum_target *resume_target = tp->inf->process_target (); | |
2621 | for (thread_info *thr_iter : all_non_exited_threads (resume_target, | |
2622 | resume_ptid)) | |
2623 | if (thr_iter != tp) | |
2624 | thr_iter->set_thread_options (0); | |
2625 | } | |
2626 | ||
05d65a7a SM |
2627 | infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s", |
2628 | resume_ptid.to_string ().c_str (), | |
2629 | step, gdb_signal_to_symbol_string (sig)); | |
2630 | ||
64ce06e4 PA |
2631 | target_resume (resume_ptid, step, sig); |
2632 | } | |
2633 | ||
d930703d | 2634 | /* Resume the inferior. SIG is the signal to give the inferior |
71d378ae PA |
2635 | (GDB_SIGNAL_0 for none). Note: don't call this directly; instead |
2636 | call 'resume', which handles exceptions. */ | |
c906108c | 2637 | |
71d378ae PA |
2638 | static void |
2639 | resume_1 (enum gdb_signal sig) | |
c906108c | 2640 | { |
4e1c45ea | 2641 | struct thread_info *tp = inferior_thread (); |
9c742269 SM |
2642 | regcache *regcache = get_thread_regcache (tp); |
2643 | struct gdbarch *gdbarch = regcache->arch (); | |
b0f16a3e | 2644 | ptid_t resume_ptid; |
856e7dd6 PA |
2645 | /* This represents the user's step vs continue request. When |
2646 | deciding whether "set scheduler-locking step" applies, it's the | |
2647 | user's intention that counts. */ | |
2648 | const int user_step = tp->control.stepping_command; | |
64ce06e4 PA |
2649 | /* This represents what we'll actually request the target to do. |
2650 | This can decay from a step to a continue, if e.g., we need to | |
2651 | implement single-stepping with breakpoints (software | |
2652 | single-step). */ | |
c4464ade | 2653 | bool step; |
c7e8a53c | 2654 | |
c65d6b55 | 2655 | gdb_assert (!tp->stop_requested); |
c2829269 PA |
2656 | gdb_assert (!thread_is_in_step_over_chain (tp)); |
2657 | ||
1edb66d8 | 2658 | if (tp->has_pending_waitstatus ()) |
372316f1 | 2659 | { |
1eb8556f SM |
2660 | infrun_debug_printf |
2661 | ("thread %s has pending wait " | |
2662 | "status %s (currently_stepping=%d).", | |
0fab7955 | 2663 | tp->ptid.to_string ().c_str (), |
7dca2ea7 | 2664 | tp->pending_waitstatus ().to_string ().c_str (), |
1eb8556f | 2665 | currently_stepping (tp)); |
372316f1 | 2666 | |
5b6d1e4f | 2667 | tp->inf->process_target ()->threads_executing = true; |
7846f3aa | 2668 | tp->set_resumed (true); |
372316f1 PA |
2669 | |
2670 | /* FIXME: What should we do if we are supposed to resume this | |
2671 | thread with a signal? Maybe we should maintain a queue of | |
2672 | pending signals to deliver. */ | |
2673 | if (sig != GDB_SIGNAL_0) | |
2674 | { | |
fd7dcb94 | 2675 | warning (_("Couldn't deliver signal %s to %s."), |
a068643d | 2676 | gdb_signal_to_name (sig), |
0fab7955 | 2677 | tp->ptid.to_string ().c_str ()); |
372316f1 PA |
2678 | } |
2679 | ||
1edb66d8 | 2680 | tp->set_stop_signal (GDB_SIGNAL_0); |
372316f1 PA |
2681 | |
2682 | if (target_can_async_p ()) | |
9516f85a | 2683 | { |
4a570176 | 2684 | target_async (true); |
9516f85a AB |
2685 | /* Tell the event loop we have an event to process. */ |
2686 | mark_async_event_handler (infrun_async_inferior_event_token); | |
2687 | } | |
372316f1 PA |
2688 | return; |
2689 | } | |
2690 | ||
2691 | tp->stepped_breakpoint = 0; | |
2692 | ||
6b403daa PA |
2693 | /* Depends on stepped_breakpoint. */ |
2694 | step = currently_stepping (tp); | |
2695 | ||
6f5d514f | 2696 | if (current_inferior ()->thread_waiting_for_vfork_done != nullptr) |
74609e71 | 2697 | { |
48f9886d PA |
2698 | /* Don't try to single-step a vfork parent that is waiting for |
2699 | the child to get out of the shared memory region (by exec'ing | |
2700 | or exiting). This is particularly important on software | |
2701 | single-step archs, as the child process would trip on the | |
2702 | software single step breakpoint inserted for the parent | |
2703 | process. Since the parent will not actually execute any | |
2704 | instruction until the child is out of the shared region (such | |
2705 | are vfork's semantics), it is safe to simply continue it. | |
2706 | Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for | |
2707 | the parent, and tell it to `keep_going', which automatically | |
2708 | re-sets it stepping. */ | |
1eb8556f | 2709 | infrun_debug_printf ("resume : clear step"); |
c4464ade | 2710 | step = false; |
74609e71 YQ |
2711 | } |
2712 | ||
7ca9b62a TBA |
2713 | CORE_ADDR pc = regcache_read_pc (regcache); |
2714 | ||
1eb8556f SM |
2715 | infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, " |
2716 | "current thread [%s] at %s", | |
2717 | step, gdb_signal_to_symbol_string (sig), | |
2718 | tp->control.trap_expected, | |
0fab7955 | 2719 | inferior_ptid.to_string ().c_str (), |
1eb8556f | 2720 | paddress (gdbarch, pc)); |
c906108c | 2721 | |
f9582a22 | 2722 | const address_space *aspace = tp->inf->aspace.get (); |
74387712 | 2723 | |
c2c6d25f JM |
2724 | /* Normally, by the time we reach `resume', the breakpoints are either |
2725 | removed or inserted, as appropriate. The exception is if we're sitting | |
2726 | at a permanent breakpoint; we need to step over it, but permanent | |
2727 | breakpoints can't be removed. So we have to test for it here. */ | |
6c95b8df | 2728 | if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here) |
6d350bb5 | 2729 | { |
af48d08f PA |
2730 | if (sig != GDB_SIGNAL_0) |
2731 | { | |
2732 | /* We have a signal to pass to the inferior. The resume | |
2733 | may, or may not take us to the signal handler. If this | |
2734 | is a step, we'll need to stop in the signal handler, if | |
2735 | there's one, (if the target supports stepping into | |
2736 | handlers), or in the next mainline instruction, if | |
2737 | there's no handler. If this is a continue, we need to be | |
2738 | sure to run the handler with all breakpoints inserted. | |
2739 | In all cases, set a breakpoint at the current address | |
2740 | (where the handler returns to), and once that breakpoint | |
2741 | is hit, resume skipping the permanent breakpoint. If | |
2742 | that breakpoint isn't hit, then we've stepped into the | |
2743 | signal handler (or hit some other event). We'll delete | |
2744 | the step-resume breakpoint then. */ | |
2745 | ||
1eb8556f SM |
2746 | infrun_debug_printf ("resume: skipping permanent breakpoint, " |
2747 | "deliver signal first"); | |
af48d08f PA |
2748 | |
2749 | clear_step_over_info (); | |
2750 | tp->control.trap_expected = 0; | |
2751 | ||
03acd4d8 | 2752 | if (tp->control.step_resume_breakpoint == nullptr) |
af48d08f PA |
2753 | { |
2754 | /* Set a "high-priority" step-resume, as we don't want | |
2755 | user breakpoints at PC to trigger (again) when this | |
2756 | hits. */ | |
2757 | insert_hp_step_resume_breakpoint_at_frame (get_current_frame ()); | |
f5951b9f SM |
2758 | gdb_assert (tp->control.step_resume_breakpoint->first_loc () |
2759 | .permanent); | |
af48d08f PA |
2760 | |
2761 | tp->step_after_step_resume_breakpoint = step; | |
2762 | } | |
2763 | ||
2764 | insert_breakpoints (); | |
2765 | } | |
2766 | else | |
2767 | { | |
2768 | /* There's no signal to pass, we can go ahead and skip the | |
2769 | permanent breakpoint manually. */ | |
1eb8556f | 2770 | infrun_debug_printf ("skipping permanent breakpoint"); |
af48d08f PA |
2771 | gdbarch_skip_permanent_breakpoint (gdbarch, regcache); |
2772 | /* Update pc to reflect the new address from which we will | |
2773 | execute instructions. */ | |
2774 | pc = regcache_read_pc (regcache); | |
2775 | ||
2776 | if (step) | |
2777 | { | |
2778 | /* We've already advanced the PC, so the stepping part | |
2779 | is done. Now we need to arrange for a trap to be | |
2780 | reported to handle_inferior_event. Set a breakpoint | |
2781 | at the current PC, and run to it. Don't update | |
2782 | prev_pc, because if we end in | |
44a1ee51 PA |
2783 | switch_back_to_stepped_thread, we want the "expected |
2784 | thread advanced also" branch to be taken. IOW, we | |
2785 | don't want this thread to step further from PC | |
af48d08f | 2786 | (overstep). */ |
1ac806b8 | 2787 | gdb_assert (!step_over_info_valid_p ()); |
af48d08f PA |
2788 | insert_single_step_breakpoint (gdbarch, aspace, pc); |
2789 | insert_breakpoints (); | |
2790 | ||
fbea99ea | 2791 | resume_ptid = internal_resume_ptid (user_step); |
c4464ade | 2792 | do_target_resume (resume_ptid, false, GDB_SIGNAL_0); |
7846f3aa | 2793 | tp->set_resumed (true); |
af48d08f PA |
2794 | return; |
2795 | } | |
2796 | } | |
6d350bb5 | 2797 | } |
c2c6d25f | 2798 | |
c1e36e3e PA |
2799 | /* If we have a breakpoint to step over, make sure to do a single |
2800 | step only. Same if we have software watchpoints. */ | |
2801 | if (tp->control.trap_expected || bpstat_should_step ()) | |
2802 | tp->control.may_range_step = 0; | |
2803 | ||
7da6a5b9 LM |
2804 | /* If displaced stepping is enabled, step over breakpoints by executing a |
2805 | copy of the instruction at a different address. | |
237fc4c9 PA |
2806 | |
2807 | We can't use displaced stepping when we have a signal to deliver; | |
2808 | the comments for displaced_step_prepare explain why. The | |
2809 | comments in the handle_inferior event for dealing with 'random | |
74609e71 YQ |
2810 | signals' explain what we do instead. |
2811 | ||
2812 | We can't use displaced stepping when we are waiting for vfork_done | |
2813 | event, displaced stepping breaks the vfork child similarly as single | |
2814 | step software breakpoint. */ | |
3fc8eb30 PA |
2815 | if (tp->control.trap_expected |
2816 | && use_displaced_stepping (tp) | |
cb71640d | 2817 | && !step_over_info_valid_p () |
a493e3e2 | 2818 | && sig == GDB_SIGNAL_0 |
6f5d514f | 2819 | && current_inferior ()->thread_waiting_for_vfork_done == nullptr) |
237fc4c9 | 2820 | { |
bab37966 SM |
2821 | displaced_step_prepare_status prepare_status |
2822 | = displaced_step_prepare (tp); | |
fc1cf338 | 2823 | |
bab37966 | 2824 | if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE) |
d56b7306 | 2825 | { |
1eb8556f | 2826 | infrun_debug_printf ("Got placed in step-over queue"); |
4d9d9d04 PA |
2827 | |
2828 | tp->control.trap_expected = 0; | |
d56b7306 VP |
2829 | return; |
2830 | } | |
bab37966 | 2831 | else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_CANT) |
3fc8eb30 PA |
2832 | { |
2833 | /* Fallback to stepping over the breakpoint in-line. */ | |
2834 | ||
2835 | if (target_is_non_stop_p ()) | |
4f5539f0 | 2836 | stop_all_threads ("displaced stepping falling back on inline stepping"); |
3fc8eb30 | 2837 | |
74387712 SM |
2838 | set_step_over_info (aspace, regcache_read_pc (regcache), 0, |
2839 | tp->global_num); | |
3fc8eb30 | 2840 | |
22b11ba9 | 2841 | step = maybe_software_singlestep (gdbarch); |
3fc8eb30 PA |
2842 | |
2843 | insert_breakpoints (); | |
2844 | } | |
bab37966 | 2845 | else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK) |
3fc8eb30 | 2846 | { |
3fc8eb30 PA |
2847 | /* Update pc to reflect the new address from which we will |
2848 | execute instructions due to displaced stepping. */ | |
00431a78 | 2849 | pc = regcache_read_pc (get_thread_regcache (tp)); |
ca7781d2 | 2850 | |
40a53766 | 2851 | step = gdbarch_displaced_step_hw_singlestep (gdbarch); |
3fc8eb30 | 2852 | } |
bab37966 | 2853 | else |
557b4d76 SM |
2854 | gdb_assert_not_reached ("Invalid displaced_step_prepare_status " |
2855 | "value."); | |
237fc4c9 PA |
2856 | } |
2857 | ||
2facfe5c | 2858 | /* Do we need to do it the hard way, w/temp breakpoints? */ |
99e40580 | 2859 | else if (step) |
22b11ba9 | 2860 | step = maybe_software_singlestep (gdbarch); |
c906108c | 2861 | |
30852783 UW |
2862 | /* Currently, our software single-step implementation leads to different |
2863 | results than hardware single-stepping in one situation: when stepping | |
2864 | into delivering a signal which has an associated signal handler, | |
2865 | hardware single-step will stop at the first instruction of the handler, | |
2866 | while software single-step will simply skip execution of the handler. | |
2867 | ||
2868 | For now, this difference in behavior is accepted since there is no | |
2869 | easy way to actually implement single-stepping into a signal handler | |
2870 | without kernel support. | |
2871 | ||
2872 | However, there is one scenario where this difference leads to follow-on | |
2873 | problems: if we're stepping off a breakpoint by removing all breakpoints | |
2874 | and then single-stepping. In this case, the software single-step | |
2875 | behavior means that even if there is a *breakpoint* in the signal | |
2876 | handler, GDB still would not stop. | |
2877 | ||
2878 | Fortunately, we can at least fix this particular issue. We detect | |
2879 | here the case where we are about to deliver a signal while software | |
2880 | single-stepping with breakpoints removed. In this situation, we | |
2881 | revert the decisions to remove all breakpoints and insert single- | |
2882 | step breakpoints, and instead we install a step-resume breakpoint | |
2883 | at the current address, deliver the signal without stepping, and | |
2884 | once we arrive back at the step-resume breakpoint, actually step | |
2885 | over the breakpoint we originally wanted to step over. */ | |
34b7e8a6 | 2886 | if (thread_has_single_step_breakpoints_set (tp) |
6cc83d2a PA |
2887 | && sig != GDB_SIGNAL_0 |
2888 | && step_over_info_valid_p ()) | |
30852783 UW |
2889 | { |
2890 | /* If we have nested signals or a pending signal is delivered | |
7da6a5b9 | 2891 | immediately after a handler returns, might already have |
30852783 UW |
2892 | a step-resume breakpoint set on the earlier handler. We cannot |
2893 | set another step-resume breakpoint; just continue on until the | |
2894 | original breakpoint is hit. */ | |
03acd4d8 | 2895 | if (tp->control.step_resume_breakpoint == nullptr) |
30852783 | 2896 | { |
2c03e5be | 2897 | insert_hp_step_resume_breakpoint_at_frame (get_current_frame ()); |
30852783 UW |
2898 | tp->step_after_step_resume_breakpoint = 1; |
2899 | } | |
2900 | ||
34b7e8a6 | 2901 | delete_single_step_breakpoints (tp); |
30852783 | 2902 | |
31e77af2 | 2903 | clear_step_over_info (); |
30852783 | 2904 | tp->control.trap_expected = 0; |
31e77af2 PA |
2905 | |
2906 | insert_breakpoints (); | |
30852783 UW |
2907 | } |
2908 | ||
b0f16a3e SM |
2909 | /* If STEP is set, it's a request to use hardware stepping |
2910 | facilities. But in that case, we should never | |
2911 | use singlestep breakpoint. */ | |
34b7e8a6 | 2912 | gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step)); |
dfcd3bfb | 2913 | |
fbea99ea | 2914 | /* Decide the set of threads to ask the target to resume. */ |
1946c4cc | 2915 | if (tp->control.trap_expected) |
b0f16a3e SM |
2916 | { |
2917 | /* We're allowing a thread to run past a breakpoint it has | |
1946c4cc YQ |
2918 | hit, either by single-stepping the thread with the breakpoint |
2919 | removed, or by displaced stepping, with the breakpoint inserted. | |
2920 | In the former case, we need to single-step only this thread, | |
2921 | and keep others stopped, as they can miss this breakpoint if | |
2922 | allowed to run. That's not really a problem for displaced | |
2923 | stepping, but, we still keep other threads stopped, in case | |
2924 | another thread is also stopped for a breakpoint waiting for | |
2925 | its turn in the displaced stepping queue. */ | |
b0f16a3e SM |
2926 | resume_ptid = inferior_ptid; |
2927 | } | |
fbea99ea PA |
2928 | else |
2929 | resume_ptid = internal_resume_ptid (user_step); | |
d4db2f36 | 2930 | |
7f5ef605 PA |
2931 | if (execution_direction != EXEC_REVERSE |
2932 | && step && breakpoint_inserted_here_p (aspace, pc)) | |
b0f16a3e | 2933 | { |
372316f1 PA |
2934 | /* There are two cases where we currently need to step a |
2935 | breakpoint instruction when we have a signal to deliver: | |
2936 | ||
2937 | - See handle_signal_stop where we handle random signals that | |
2938 | could take out us out of the stepping range. Normally, in | |
2939 | that case we end up continuing (instead of stepping) over the | |
7f5ef605 PA |
2940 | signal handler with a breakpoint at PC, but there are cases |
2941 | where we should _always_ single-step, even if we have a | |
2942 | step-resume breakpoint, like when a software watchpoint is | |
2943 | set. Assuming single-stepping and delivering a signal at the | |
2944 | same time would takes us to the signal handler, then we could | |
2945 | have removed the breakpoint at PC to step over it. However, | |
2946 | some hardware step targets (like e.g., Mac OS) can't step | |
2947 | into signal handlers, and for those, we need to leave the | |
2948 | breakpoint at PC inserted, as otherwise if the handler | |
2949 | recurses and executes PC again, it'll miss the breakpoint. | |
2950 | So we leave the breakpoint inserted anyway, but we need to | |
2951 | record that we tried to step a breakpoint instruction, so | |
372316f1 PA |
2952 | that adjust_pc_after_break doesn't end up confused. |
2953 | ||
dda83cd7 | 2954 | - In non-stop if we insert a breakpoint (e.g., a step-resume) |
372316f1 PA |
2955 | in one thread after another thread that was stepping had been |
2956 | momentarily paused for a step-over. When we re-resume the | |
2957 | stepping thread, it may be resumed from that address with a | |
2958 | breakpoint that hasn't trapped yet. Seen with | |
2959 | gdb.threads/non-stop-fair-events.exp, on targets that don't | |
2960 | do displaced stepping. */ | |
2961 | ||
1eb8556f | 2962 | infrun_debug_printf ("resume: [%s] stepped breakpoint", |
0fab7955 | 2963 | tp->ptid.to_string ().c_str ()); |
7f5ef605 PA |
2964 | |
2965 | tp->stepped_breakpoint = 1; | |
2966 | ||
b0f16a3e SM |
2967 | /* Most targets can step a breakpoint instruction, thus |
2968 | executing it normally. But if this one cannot, just | |
2969 | continue and we will hit it anyway. */ | |
7f5ef605 | 2970 | if (gdbarch_cannot_step_breakpoint (gdbarch)) |
c4464ade | 2971 | step = false; |
b0f16a3e | 2972 | } |
ef5cf84e | 2973 | |
b0f16a3e SM |
2974 | if (tp->control.may_range_step) |
2975 | { | |
2976 | /* If we're resuming a thread with the PC out of the step | |
2977 | range, then we're doing some nested/finer run control | |
2978 | operation, like stepping the thread out of the dynamic | |
2979 | linker or the displaced stepping scratch pad. We | |
2980 | shouldn't have allowed a range step then. */ | |
2981 | gdb_assert (pc_in_thread_step_range (pc, tp)); | |
2982 | } | |
c1e36e3e | 2983 | |
64ce06e4 | 2984 | do_target_resume (resume_ptid, step, sig); |
7846f3aa | 2985 | tp->set_resumed (true); |
c906108c | 2986 | } |
71d378ae PA |
2987 | |
2988 | /* Resume the inferior. SIG is the signal to give the inferior | |
2989 | (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that | |
2990 | rolls back state on error. */ | |
2991 | ||
aff4e175 | 2992 | static void |
71d378ae PA |
2993 | resume (gdb_signal sig) |
2994 | { | |
a70b8144 | 2995 | try |
71d378ae PA |
2996 | { |
2997 | resume_1 (sig); | |
2998 | } | |
230d2906 | 2999 | catch (const gdb_exception &ex) |
71d378ae PA |
3000 | { |
3001 | /* If resuming is being aborted for any reason, delete any | |
3002 | single-step breakpoint resume_1 may have created, to avoid | |
3003 | confusing the following resumption, and to avoid leaving | |
3004 | single-step breakpoints perturbing other threads, in case | |
3005 | we're running in non-stop mode. */ | |
3006 | if (inferior_ptid != null_ptid) | |
3007 | delete_single_step_breakpoints (inferior_thread ()); | |
eedc3f4f | 3008 | throw; |
71d378ae | 3009 | } |
71d378ae PA |
3010 | } |
3011 | ||
c906108c | 3012 | \f |
237fc4c9 | 3013 | /* Proceeding. */ |
c906108c | 3014 | |
4c2f2a79 PA |
3015 | /* See infrun.h. */ |
3016 | ||
3017 | /* Counter that tracks number of user visible stops. This can be used | |
3018 | to tell whether a command has proceeded the inferior past the | |
3019 | current location. This allows e.g., inferior function calls in | |
3020 | breakpoint commands to not interrupt the command list. When the | |
3021 | call finishes successfully, the inferior is standing at the same | |
3022 | breakpoint as if nothing happened (and so we don't call | |
3023 | normal_stop). */ | |
3024 | static ULONGEST current_stop_id; | |
3025 | ||
3026 | /* See infrun.h. */ | |
3027 | ||
3028 | ULONGEST | |
3029 | get_stop_id (void) | |
3030 | { | |
3031 | return current_stop_id; | |
3032 | } | |
3033 | ||
3034 | /* Called when we report a user visible stop. */ | |
3035 | ||
3036 | static void | |
3037 | new_stop_id (void) | |
3038 | { | |
3039 | current_stop_id++; | |
3040 | } | |
3041 | ||
c906108c SS |
3042 | /* Clear out all variables saying what to do when inferior is continued. |
3043 | First do this, then set the ones you want, then call `proceed'. */ | |
3044 | ||
a7212384 UW |
3045 | static void |
3046 | clear_proceed_status_thread (struct thread_info *tp) | |
c906108c | 3047 | { |
0fab7955 | 3048 | infrun_debug_printf ("%s", tp->ptid.to_string ().c_str ()); |
d6b48e9c | 3049 | |
372316f1 PA |
3050 | /* If we're starting a new sequence, then the previous finished |
3051 | single-step is no longer relevant. */ | |
1edb66d8 | 3052 | if (tp->has_pending_waitstatus ()) |
372316f1 | 3053 | { |
1edb66d8 | 3054 | if (tp->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP) |
372316f1 | 3055 | { |
1eb8556f SM |
3056 | infrun_debug_printf ("pending event of %s was a finished step. " |
3057 | "Discarding.", | |
0fab7955 | 3058 | tp->ptid.to_string ().c_str ()); |
372316f1 | 3059 | |
1edb66d8 SM |
3060 | tp->clear_pending_waitstatus (); |
3061 | tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON); | |
372316f1 | 3062 | } |
1eb8556f | 3063 | else |
372316f1 | 3064 | { |
1eb8556f SM |
3065 | infrun_debug_printf |
3066 | ("thread %s has pending wait status %s (currently_stepping=%d).", | |
0fab7955 | 3067 | tp->ptid.to_string ().c_str (), |
7dca2ea7 | 3068 | tp->pending_waitstatus ().to_string ().c_str (), |
1eb8556f | 3069 | currently_stepping (tp)); |
372316f1 PA |
3070 | } |
3071 | } | |
3072 | ||
70509625 PA |
3073 | /* If this signal should not be seen by program, give it zero. |
3074 | Used for debugging signals. */ | |
1edb66d8 SM |
3075 | if (!signal_pass_state (tp->stop_signal ())) |
3076 | tp->set_stop_signal (GDB_SIGNAL_0); | |
70509625 | 3077 | |
573269a8 | 3078 | tp->release_thread_fsm (); |
243a9253 | 3079 | |
16c381f0 JK |
3080 | tp->control.trap_expected = 0; |
3081 | tp->control.step_range_start = 0; | |
3082 | tp->control.step_range_end = 0; | |
c1e36e3e | 3083 | tp->control.may_range_step = 0; |
16c381f0 JK |
3084 | tp->control.step_frame_id = null_frame_id; |
3085 | tp->control.step_stack_frame_id = null_frame_id; | |
3086 | tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE; | |
03acd4d8 | 3087 | tp->control.step_start_function = nullptr; |
956bbb55 | 3088 | tp->stop_requested = false; |
4e1c45ea | 3089 | |
16c381f0 | 3090 | tp->control.stop_step = 0; |
32400beb | 3091 | |
b986eec5 CL |
3092 | tp->control.proceed_to_finish = 0; |
3093 | ||
856e7dd6 | 3094 | tp->control.stepping_command = 0; |
17b2616c | 3095 | |
a7212384 | 3096 | /* Discard any remaining commands or status from previous stop. */ |
16c381f0 | 3097 | bpstat_clear (&tp->control.stop_bpstat); |
a7212384 | 3098 | } |
32400beb | 3099 | |
7603ea6a SM |
3100 | /* Notify the current interpreter and observers that the target is about to |
3101 | proceed. */ | |
3102 | ||
3103 | static void | |
3104 | notify_about_to_proceed () | |
3105 | { | |
3106 | top_level_interpreter ()->on_about_to_proceed (); | |
3107 | gdb::observers::about_to_proceed.notify (); | |
3108 | } | |
3109 | ||
a7212384 | 3110 | void |
70509625 | 3111 | clear_proceed_status (int step) |
a7212384 | 3112 | { |
f2665db5 MM |
3113 | /* With scheduler-locking replay, stop replaying other threads if we're |
3114 | not replaying the user-visible resume ptid. | |
3115 | ||
3116 | This is a convenience feature to not require the user to explicitly | |
3117 | stop replaying the other threads. We're assuming that the user's | |
3118 | intent is to resume tracing the recorded process. */ | |
3119 | if (!non_stop && scheduler_mode == schedlock_replay | |
3120 | && target_record_is_replaying (minus_one_ptid) | |
3121 | && !target_record_will_replay (user_visible_resume_ptid (step), | |
3122 | execution_direction)) | |
3123 | target_record_stop_replaying (); | |
3124 | ||
08036331 | 3125 | if (!non_stop && inferior_ptid != null_ptid) |
6c95b8df | 3126 | { |
08036331 | 3127 | ptid_t resume_ptid = user_visible_resume_ptid (step); |
5b6d1e4f PA |
3128 | process_stratum_target *resume_target |
3129 | = user_visible_resume_target (resume_ptid); | |
70509625 PA |
3130 | |
3131 | /* In all-stop mode, delete the per-thread status of all threads | |
3132 | we're about to resume, implicitly and explicitly. */ | |
5b6d1e4f | 3133 | for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid)) |
08036331 | 3134 | clear_proceed_status_thread (tp); |
6c95b8df PA |
3135 | } |
3136 | ||
d7e15655 | 3137 | if (inferior_ptid != null_ptid) |
a7212384 UW |
3138 | { |
3139 | struct inferior *inferior; | |
3140 | ||
3141 | if (non_stop) | |
3142 | { | |
6c95b8df PA |
3143 | /* If in non-stop mode, only delete the per-thread status of |
3144 | the current thread. */ | |
a7212384 UW |
3145 | clear_proceed_status_thread (inferior_thread ()); |
3146 | } | |
6c95b8df | 3147 | |
d6b48e9c | 3148 | inferior = current_inferior (); |
16c381f0 | 3149 | inferior->control.stop_soon = NO_STOP_QUIETLY; |
4e1c45ea PA |
3150 | } |
3151 | ||
7603ea6a | 3152 | notify_about_to_proceed (); |
c906108c SS |
3153 | } |
3154 | ||
99619bea PA |
3155 | /* Returns true if TP is still stopped at a breakpoint that needs |
3156 | stepping-over in order to make progress. If the breakpoint is gone | |
3157 | meanwhile, we can skip the whole step-over dance. */ | |
ea67f13b | 3158 | |
c4464ade | 3159 | static bool |
6c4cfb24 | 3160 | thread_still_needs_step_over_bp (struct thread_info *tp) |
99619bea PA |
3161 | { |
3162 | if (tp->stepping_over_breakpoint) | |
3163 | { | |
00431a78 | 3164 | struct regcache *regcache = get_thread_regcache (tp); |
99619bea | 3165 | |
f9582a22 | 3166 | if (breakpoint_here_p (tp->inf->aspace.get (), |
af48d08f PA |
3167 | regcache_read_pc (regcache)) |
3168 | == ordinary_breakpoint_here) | |
c4464ade | 3169 | return true; |
99619bea PA |
3170 | |
3171 | tp->stepping_over_breakpoint = 0; | |
3172 | } | |
3173 | ||
c4464ade | 3174 | return false; |
99619bea PA |
3175 | } |
3176 | ||
6c4cfb24 PA |
3177 | /* Check whether thread TP still needs to start a step-over in order |
3178 | to make progress when resumed. Returns an bitwise or of enum | |
3179 | step_over_what bits, indicating what needs to be stepped over. */ | |
3180 | ||
8d297bbf | 3181 | static step_over_what |
6c4cfb24 PA |
3182 | thread_still_needs_step_over (struct thread_info *tp) |
3183 | { | |
8d297bbf | 3184 | step_over_what what = 0; |
6c4cfb24 PA |
3185 | |
3186 | if (thread_still_needs_step_over_bp (tp)) | |
3187 | what |= STEP_OVER_BREAKPOINT; | |
3188 | ||
3189 | if (tp->stepping_over_watchpoint | |
9aed480c | 3190 | && !target_have_steppable_watchpoint ()) |
6c4cfb24 PA |
3191 | what |= STEP_OVER_WATCHPOINT; |
3192 | ||
3193 | return what; | |
3194 | } | |
3195 | ||
483805cf PA |
3196 | /* Returns true if scheduler locking applies. STEP indicates whether |
3197 | we're about to do a step/next-like command to a thread. */ | |
3198 | ||
c4464ade | 3199 | static bool |
856e7dd6 | 3200 | schedlock_applies (struct thread_info *tp) |
483805cf PA |
3201 | { |
3202 | return (scheduler_mode == schedlock_on | |
3203 | || (scheduler_mode == schedlock_step | |
f2665db5 MM |
3204 | && tp->control.stepping_command) |
3205 | || (scheduler_mode == schedlock_replay | |
3206 | && target_record_will_replay (minus_one_ptid, | |
3207 | execution_direction))); | |
483805cf PA |
3208 | } |
3209 | ||
3df78436 AB |
3210 | /* When FORCE_P is false, set process_stratum_target::COMMIT_RESUMED_STATE |
3211 | in all target stacks that have threads executing and don't have threads | |
3212 | with pending events. | |
3213 | ||
3214 | When FORCE_P is true, set process_stratum_target::COMMIT_RESUMED_STATE | |
3215 | in all target stacks that have threads executing regardless of whether | |
3216 | there are pending events or not. | |
3217 | ||
3218 | Passing FORCE_P as false makes sense when GDB is going to wait for | |
3219 | events from all threads and will therefore spot the pending events. | |
3220 | However, if GDB is only going to wait for events from select threads | |
3221 | (i.e. when performing an inferior call) then a pending event on some | |
3222 | other thread will not be spotted, and if we fail to commit the resume | |
3223 | state for the thread performing the inferior call, then the inferior | |
3224 | call will never complete (or even start). */ | |
5b6d1e4f PA |
3225 | |
3226 | static void | |
3df78436 | 3227 | maybe_set_commit_resumed_all_targets (bool force_p) |
1192f124 | 3228 | { |
b4b1a226 SM |
3229 | scoped_restore_current_thread restore_thread; |
3230 | ||
1192f124 SM |
3231 | for (inferior *inf : all_non_exited_inferiors ()) |
3232 | { | |
3233 | process_stratum_target *proc_target = inf->process_target (); | |
3234 | ||
3235 | if (proc_target->commit_resumed_state) | |
3236 | { | |
3237 | /* We already set this in a previous iteration, via another | |
3238 | inferior sharing the process_stratum target. */ | |
3239 | continue; | |
3240 | } | |
3241 | ||
3242 | /* If the target has no resumed threads, it would be useless to | |
3243 | ask it to commit the resumed threads. */ | |
3244 | if (!proc_target->threads_executing) | |
3245 | { | |
3246 | infrun_debug_printf ("not requesting commit-resumed for target " | |
3247 | "%s, no resumed threads", | |
3248 | proc_target->shortname ()); | |
3249 | continue; | |
3250 | } | |
3251 | ||
3252 | /* As an optimization, if a thread from this target has some | |
3253 | status to report, handle it before requiring the target to | |
3254 | commit its resumed threads: handling the status might lead to | |
3255 | resuming more threads. */ | |
3df78436 | 3256 | if (!force_p && proc_target->has_resumed_with_pending_wait_status ()) |
1192f124 SM |
3257 | { |
3258 | infrun_debug_printf ("not requesting commit-resumed for target %s, a" | |
3259 | " thread has a pending waitstatus", | |
3260 | proc_target->shortname ()); | |
3261 | continue; | |
3262 | } | |
3263 | ||
b4b1a226 SM |
3264 | switch_to_inferior_no_thread (inf); |
3265 | ||
3df78436 | 3266 | if (!force_p && target_has_pending_events ()) |
b4b1a226 SM |
3267 | { |
3268 | infrun_debug_printf ("not requesting commit-resumed for target %s, " | |
3269 | "target has pending events", | |
3270 | proc_target->shortname ()); | |
3271 | continue; | |
3272 | } | |
3273 | ||
1192f124 SM |
3274 | infrun_debug_printf ("enabling commit-resumed for target %s", |
3275 | proc_target->shortname ()); | |
3276 | ||
3277 | proc_target->commit_resumed_state = true; | |
3278 | } | |
3279 | } | |
3280 | ||
3281 | /* See infrun.h. */ | |
3282 | ||
3283 | void | |
3284 | maybe_call_commit_resumed_all_targets () | |
5b6d1e4f PA |
3285 | { |
3286 | scoped_restore_current_thread restore_thread; | |
3287 | ||
1192f124 SM |
3288 | for (inferior *inf : all_non_exited_inferiors ()) |
3289 | { | |
3290 | process_stratum_target *proc_target = inf->process_target (); | |
3291 | ||
3292 | if (!proc_target->commit_resumed_state) | |
3293 | continue; | |
3294 | ||
3295 | switch_to_inferior_no_thread (inf); | |
3296 | ||
3297 | infrun_debug_printf ("calling commit_resumed for target %s", | |
3298 | proc_target->shortname()); | |
3299 | ||
3300 | target_commit_resumed (); | |
3301 | } | |
3302 | } | |
3303 | ||
3304 | /* To track nesting of scoped_disable_commit_resumed objects, ensuring | |
3305 | that only the outermost one attempts to re-enable | |
3306 | commit-resumed. */ | |
3307 | static bool enable_commit_resumed = true; | |
3308 | ||
3309 | /* See infrun.h. */ | |
3310 | ||
3311 | scoped_disable_commit_resumed::scoped_disable_commit_resumed | |
3312 | (const char *reason) | |
3313 | : m_reason (reason), | |
3314 | m_prev_enable_commit_resumed (enable_commit_resumed) | |
3315 | { | |
3316 | infrun_debug_printf ("reason=%s", m_reason); | |
3317 | ||
3318 | enable_commit_resumed = false; | |
5b6d1e4f PA |
3319 | |
3320 | for (inferior *inf : all_non_exited_inferiors ()) | |
1192f124 SM |
3321 | { |
3322 | process_stratum_target *proc_target = inf->process_target (); | |
5b6d1e4f | 3323 | |
1192f124 SM |
3324 | if (m_prev_enable_commit_resumed) |
3325 | { | |
3326 | /* This is the outermost instance: force all | |
3327 | COMMIT_RESUMED_STATE to false. */ | |
3328 | proc_target->commit_resumed_state = false; | |
3329 | } | |
3330 | else | |
3331 | { | |
3332 | /* This is not the outermost instance, we expect | |
3333 | COMMIT_RESUMED_STATE to have been cleared by the | |
3334 | outermost instance. */ | |
3335 | gdb_assert (!proc_target->commit_resumed_state); | |
3336 | } | |
3337 | } | |
3338 | } | |
3339 | ||
3340 | /* See infrun.h. */ | |
3341 | ||
3342 | void | |
3343 | scoped_disable_commit_resumed::reset () | |
3344 | { | |
3345 | if (m_reset) | |
3346 | return; | |
3347 | m_reset = true; | |
3348 | ||
3349 | infrun_debug_printf ("reason=%s", m_reason); | |
3350 | ||
3351 | gdb_assert (!enable_commit_resumed); | |
3352 | ||
3353 | enable_commit_resumed = m_prev_enable_commit_resumed; | |
3354 | ||
3355 | if (m_prev_enable_commit_resumed) | |
5b6d1e4f | 3356 | { |
1192f124 | 3357 | /* This is the outermost instance, re-enable |
287de656 | 3358 | COMMIT_RESUMED_STATE on the targets where it's possible. */ |
3df78436 | 3359 | maybe_set_commit_resumed_all_targets (false); |
1192f124 SM |
3360 | } |
3361 | else | |
3362 | { | |
3363 | /* This is not the outermost instance, we expect | |
3364 | COMMIT_RESUMED_STATE to still be false. */ | |
3365 | for (inferior *inf : all_non_exited_inferiors ()) | |
3366 | { | |
3367 | process_stratum_target *proc_target = inf->process_target (); | |
3368 | gdb_assert (!proc_target->commit_resumed_state); | |
3369 | } | |
3370 | } | |
3371 | } | |
3372 | ||
3373 | /* See infrun.h. */ | |
3374 | ||
3375 | scoped_disable_commit_resumed::~scoped_disable_commit_resumed () | |
3376 | { | |
3377 | reset (); | |
3378 | } | |
3379 | ||
3380 | /* See infrun.h. */ | |
3381 | ||
3382 | void | |
3383 | scoped_disable_commit_resumed::reset_and_commit () | |
3384 | { | |
3385 | reset (); | |
3386 | maybe_call_commit_resumed_all_targets (); | |
3387 | } | |
3388 | ||
3389 | /* See infrun.h. */ | |
3390 | ||
3391 | scoped_enable_commit_resumed::scoped_enable_commit_resumed | |
3df78436 | 3392 | (const char *reason, bool force_p) |
1192f124 SM |
3393 | : m_reason (reason), |
3394 | m_prev_enable_commit_resumed (enable_commit_resumed) | |
3395 | { | |
3396 | infrun_debug_printf ("reason=%s", m_reason); | |
3397 | ||
3398 | if (!enable_commit_resumed) | |
3399 | { | |
3400 | enable_commit_resumed = true; | |
3401 | ||
3402 | /* Re-enable COMMIT_RESUMED_STATE on the targets where it's | |
3403 | possible. */ | |
3df78436 | 3404 | maybe_set_commit_resumed_all_targets (force_p); |
1192f124 SM |
3405 | |
3406 | maybe_call_commit_resumed_all_targets (); | |
3407 | } | |
3408 | } | |
3409 | ||
3410 | /* See infrun.h. */ | |
3411 | ||
3412 | scoped_enable_commit_resumed::~scoped_enable_commit_resumed () | |
3413 | { | |
3414 | infrun_debug_printf ("reason=%s", m_reason); | |
3415 | ||
3416 | gdb_assert (enable_commit_resumed); | |
3417 | ||
3418 | enable_commit_resumed = m_prev_enable_commit_resumed; | |
3419 | ||
3420 | if (!enable_commit_resumed) | |
3421 | { | |
3422 | /* Force all COMMIT_RESUMED_STATE back to false. */ | |
3423 | for (inferior *inf : all_non_exited_inferiors ()) | |
3424 | { | |
3425 | process_stratum_target *proc_target = inf->process_target (); | |
3426 | proc_target->commit_resumed_state = false; | |
3427 | } | |
5b6d1e4f PA |
3428 | } |
3429 | } | |
3430 | ||
2f4fcf00 PA |
3431 | /* Check that all the targets we're about to resume are in non-stop |
3432 | mode. Ideally, we'd only care whether all targets support | |
3433 | target-async, but we're not there yet. E.g., stop_all_threads | |
3434 | doesn't know how to handle all-stop targets. Also, the remote | |
3435 | protocol in all-stop mode is synchronous, irrespective of | |
3436 | target-async, which means that things like a breakpoint re-set | |
3437 | triggered by one target would try to read memory from all targets | |
3438 | and fail. */ | |
3439 | ||
3440 | static void | |
3441 | check_multi_target_resumption (process_stratum_target *resume_target) | |
3442 | { | |
3443 | if (!non_stop && resume_target == nullptr) | |
3444 | { | |
3445 | scoped_restore_current_thread restore_thread; | |
3446 | ||
3447 | /* This is used to track whether we're resuming more than one | |
3448 | target. */ | |
3449 | process_stratum_target *first_connection = nullptr; | |
3450 | ||
3451 | /* The first inferior we see with a target that does not work in | |
3452 | always-non-stop mode. */ | |
3453 | inferior *first_not_non_stop = nullptr; | |
3454 | ||
f058c521 | 3455 | for (inferior *inf : all_non_exited_inferiors ()) |
2f4fcf00 PA |
3456 | { |
3457 | switch_to_inferior_no_thread (inf); | |
3458 | ||
55f6301a | 3459 | if (!target_has_execution ()) |
2f4fcf00 PA |
3460 | continue; |
3461 | ||
3462 | process_stratum_target *proc_target | |
3463 | = current_inferior ()->process_target(); | |
3464 | ||
3465 | if (!target_is_non_stop_p ()) | |
3466 | first_not_non_stop = inf; | |
3467 | ||
3468 | if (first_connection == nullptr) | |
3469 | first_connection = proc_target; | |
3470 | else if (first_connection != proc_target | |
3471 | && first_not_non_stop != nullptr) | |
3472 | { | |
3473 | switch_to_inferior_no_thread (first_not_non_stop); | |
3474 | ||
3475 | proc_target = current_inferior ()->process_target(); | |
3476 | ||
3477 | error (_("Connection %d (%s) does not support " | |
3478 | "multi-target resumption."), | |
3479 | proc_target->connection_number, | |
3480 | make_target_connection_string (proc_target).c_str ()); | |
3481 | } | |
3482 | } | |
3483 | } | |
3484 | } | |
3485 | ||
e07d892c MS |
3486 | /* Helper function for `proceed`. Check if thread TP is suitable for |
3487 | resuming, and, if it is, switch to the thread and call | |
3488 | `keep_going_pass_signal`. If TP is not suitable for resuming then this | |
3489 | function will just return without switching threads. */ | |
3490 | ||
3491 | static void | |
3492 | proceed_resume_thread_checked (thread_info *tp) | |
3493 | { | |
3494 | if (!tp->inf->has_execution ()) | |
3495 | { | |
3496 | infrun_debug_printf ("[%s] target has no execution", | |
3497 | tp->ptid.to_string ().c_str ()); | |
3498 | return; | |
3499 | } | |
3500 | ||
3501 | if (tp->resumed ()) | |
3502 | { | |
3503 | infrun_debug_printf ("[%s] resumed", | |
3504 | tp->ptid.to_string ().c_str ()); | |
3505 | gdb_assert (tp->executing () || tp->has_pending_waitstatus ()); | |
3506 | return; | |
3507 | } | |
3508 | ||
3509 | if (thread_is_in_step_over_chain (tp)) | |
3510 | { | |
3511 | infrun_debug_printf ("[%s] needs step-over", | |
3512 | tp->ptid.to_string ().c_str ()); | |
3513 | return; | |
3514 | } | |
3515 | ||
3516 | /* When handling a vfork GDB removes all breakpoints from the program | |
b1e0126e AB |
3517 | space in which the vfork is being handled. If we are following the |
3518 | parent then GDB will set the thread_waiting_for_vfork_done member of | |
3519 | the parent inferior. In this case we should take care to only resume | |
3520 | the vfork parent thread, the kernel will hold this thread suspended | |
3521 | until the vfork child has exited or execd, at which point the parent | |
3522 | will be resumed and a VFORK_DONE event sent to GDB. */ | |
e07d892c MS |
3523 | if (tp->inf->thread_waiting_for_vfork_done != nullptr) |
3524 | { | |
3525 | if (target_is_non_stop_p ()) | |
3526 | { | |
3527 | /* For non-stop targets, regardless of whether GDB is using | |
3528 | all-stop or non-stop mode, threads are controlled | |
3529 | individually. | |
3530 | ||
3531 | When a thread is handling a vfork, breakpoints are removed | |
3532 | from the inferior (well, program space in fact), so it is | |
3533 | critical that we don't try to resume any thread other than the | |
3534 | vfork parent. */ | |
3535 | if (tp != tp->inf->thread_waiting_for_vfork_done) | |
3536 | { | |
3537 | infrun_debug_printf ("[%s] thread %s of this inferior is " | |
3538 | "waiting for vfork-done", | |
3539 | tp->ptid.to_string ().c_str (), | |
3540 | tp->inf->thread_waiting_for_vfork_done | |
3541 | ->ptid.to_string ().c_str ()); | |
3542 | return; | |
3543 | } | |
3544 | } | |
3545 | else | |
3546 | { | |
3547 | /* For all-stop targets, when we attempt to resume the inferior, | |
3548 | we will only resume the vfork parent thread, this is handled | |
3549 | in internal_resume_ptid. | |
3550 | ||
3551 | Additionally, we will always be called with the vfork parent | |
3552 | thread as the current thread (TP) thanks to follow_fork, as | |
3553 | such the following assertion should hold. | |
3554 | ||
3555 | Beyond this there is nothing more that needs to be done | |
3556 | here. */ | |
3557 | gdb_assert (tp == tp->inf->thread_waiting_for_vfork_done); | |
3558 | } | |
3559 | } | |
3560 | ||
b1e0126e AB |
3561 | /* When handling a vfork GDB removes all breakpoints from the program |
3562 | space in which the vfork is being handled. If we are following the | |
3563 | child then GDB will set vfork_child member of the vfork parent | |
3564 | inferior. Once the child has either exited or execd then GDB will | |
3565 | detach from the parent process. Until that point GDB should not | |
3566 | resume any thread in the parent process. */ | |
3567 | if (tp->inf->vfork_child != nullptr) | |
3568 | { | |
3569 | infrun_debug_printf ("[%s] thread is part of a vfork parent, child is %d", | |
3570 | tp->ptid.to_string ().c_str (), | |
3571 | tp->inf->vfork_child->pid); | |
3572 | return; | |
3573 | } | |
3574 | ||
e07d892c MS |
3575 | infrun_debug_printf ("resuming %s", |
3576 | tp->ptid.to_string ().c_str ()); | |
3577 | ||
3578 | execution_control_state ecs (tp); | |
3579 | switch_to_thread (tp); | |
3580 | keep_going_pass_signal (&ecs); | |
3581 | if (!ecs.wait_some_more) | |
3582 | error (_("Command aborted.")); | |
3583 | } | |
3584 | ||
c906108c SS |
3585 | /* Basic routine for continuing the program in various fashions. |
3586 | ||
3587 | ADDR is the address to resume at, or -1 for resume where stopped. | |
aff4e175 AB |
3588 | SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none, |
3589 | or GDB_SIGNAL_DEFAULT for act according to how it stopped. | |
c906108c SS |
3590 | |
3591 | You should call clear_proceed_status before calling proceed. */ | |
3592 | ||
3593 | void | |
64ce06e4 | 3594 | proceed (CORE_ADDR addr, enum gdb_signal siggnal) |
c906108c | 3595 | { |
3ec3145c SM |
3596 | INFRUN_SCOPED_DEBUG_ENTER_EXIT; |
3597 | ||
e58b0e63 | 3598 | struct gdbarch *gdbarch; |
e58b0e63 | 3599 | CORE_ADDR pc; |
c906108c | 3600 | |
05e1cac2 AB |
3601 | /* If we're stopped at a fork/vfork, switch to either the parent or child |
3602 | thread as defined by the "set follow-fork-mode" command, or, if both | |
3603 | the parent and child are controlled by GDB, and schedule-multiple is | |
3604 | on, follow the child. If none of the above apply then we just proceed | |
e58b0e63 PA |
3605 | resuming the current thread. */ |
3606 | if (!follow_fork ()) | |
3607 | { | |
3608 | /* The target for some reason decided not to resume. */ | |
3609 | normal_stop (); | |
f148b27e | 3610 | if (target_can_async_p ()) |
b1a35af2 | 3611 | inferior_event_handler (INF_EXEC_COMPLETE); |
e58b0e63 PA |
3612 | return; |
3613 | } | |
3614 | ||
842951eb | 3615 | /* We'll update this if & when we switch to a new thread. */ |
a81871f7 | 3616 | update_previous_thread (); |
842951eb | 3617 | |
08036331 | 3618 | thread_info *cur_thr = inferior_thread (); |
b26b06dd AB |
3619 | infrun_debug_printf ("cur_thr = %s", cur_thr->ptid.to_string ().c_str ()); |
3620 | ||
9c742269 SM |
3621 | regcache *regcache = get_thread_regcache (cur_thr); |
3622 | gdbarch = regcache->arch (); | |
3623 | pc = regcache_read_pc_protected (regcache); | |
3624 | ||
99619bea | 3625 | /* Fill in with reasonable starting values. */ |
08036331 | 3626 | init_thread_stepping_state (cur_thr); |
99619bea | 3627 | |
08036331 | 3628 | gdb_assert (!thread_is_in_step_over_chain (cur_thr)); |
c2829269 | 3629 | |
5b6d1e4f PA |
3630 | ptid_t resume_ptid |
3631 | = user_visible_resume_ptid (cur_thr->control.stepping_command); | |
3632 | process_stratum_target *resume_target | |
3633 | = user_visible_resume_target (resume_ptid); | |
3634 | ||
2f4fcf00 PA |
3635 | check_multi_target_resumption (resume_target); |
3636 | ||
2acceee2 | 3637 | if (addr == (CORE_ADDR) -1) |
c906108c | 3638 | { |
f9582a22 | 3639 | const address_space *aspace = cur_thr->inf->aspace.get (); |
74387712 | 3640 | |
351031f2 AB |
3641 | if (cur_thr->stop_pc_p () |
3642 | && pc == cur_thr->stop_pc () | |
af48d08f | 3643 | && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here |
b2175913 | 3644 | && execution_direction != EXEC_REVERSE) |
3352ef37 AC |
3645 | /* There is a breakpoint at the address we will resume at, |
3646 | step one instruction before inserting breakpoints so that | |
3647 | we do not stop right away (and report a second hit at this | |
b2175913 MS |
3648 | breakpoint). |
3649 | ||
3650 | Note, we don't do this in reverse, because we won't | |
3651 | actually be executing the breakpoint insn anyway. | |
3652 | We'll be (un-)executing the previous instruction. */ | |
08036331 | 3653 | cur_thr->stepping_over_breakpoint = 1; |
515630c5 UW |
3654 | else if (gdbarch_single_step_through_delay_p (gdbarch) |
3655 | && gdbarch_single_step_through_delay (gdbarch, | |
3656 | get_current_frame ())) | |
3352ef37 AC |
3657 | /* We stepped onto an instruction that needs to be stepped |
3658 | again before re-inserting the breakpoint, do so. */ | |
08036331 | 3659 | cur_thr->stepping_over_breakpoint = 1; |
c906108c SS |
3660 | } |
3661 | else | |
3662 | { | |
515630c5 | 3663 | regcache_write_pc (regcache, addr); |
c906108c SS |
3664 | } |
3665 | ||
70509625 | 3666 | if (siggnal != GDB_SIGNAL_DEFAULT) |
1edb66d8 | 3667 | cur_thr->set_stop_signal (siggnal); |
70509625 | 3668 | |
4d9d9d04 PA |
3669 | /* If an exception is thrown from this point on, make sure to |
3670 | propagate GDB's knowledge of the executing state to the | |
3671 | frontend/user running state. */ | |
5b6d1e4f | 3672 | scoped_finish_thread_state finish_state (resume_target, resume_ptid); |
4d9d9d04 PA |
3673 | |
3674 | /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer | |
3675 | threads (e.g., we might need to set threads stepping over | |
3676 | breakpoints first), from the user/frontend's point of view, all | |
3677 | threads in RESUME_PTID are now running. Unless we're calling an | |
3678 | inferior function, as in that case we pretend the inferior | |
3679 | doesn't run at all. */ | |
08036331 | 3680 | if (!cur_thr->control.in_infcall) |
719546c4 | 3681 | set_running (resume_target, resume_ptid, true); |
17b2616c | 3682 | |
b26b06dd AB |
3683 | infrun_debug_printf ("addr=%s, signal=%s, resume_ptid=%s", |
3684 | paddress (gdbarch, addr), | |
3685 | gdb_signal_to_symbol_string (siggnal), | |
3686 | resume_ptid.to_string ().c_str ()); | |
527159b7 | 3687 | |
4d9d9d04 PA |
3688 | annotate_starting (); |
3689 | ||
3690 | /* Make sure that output from GDB appears before output from the | |
3691 | inferior. */ | |
3692 | gdb_flush (gdb_stdout); | |
3693 | ||
d930703d PA |
3694 | /* Since we've marked the inferior running, give it the terminal. A |
3695 | QUIT/Ctrl-C from here on is forwarded to the target (which can | |
3696 | still detect attempts to unblock a stuck connection with repeated | |
3697 | Ctrl-C from within target_pass_ctrlc). */ | |
3698 | target_terminal::inferior (); | |
3699 | ||
4d9d9d04 PA |
3700 | /* In a multi-threaded task we may select another thread and |
3701 | then continue or step. | |
3702 | ||
3703 | But if a thread that we're resuming had stopped at a breakpoint, | |
3704 | it will immediately cause another breakpoint stop without any | |
3705 | execution (i.e. it will report a breakpoint hit incorrectly). So | |
3706 | we must step over it first. | |
3707 | ||
3708 | Look for threads other than the current (TP) that reported a | |
3709 | breakpoint hit and haven't been resumed yet since. */ | |
3710 | ||
3711 | /* If scheduler locking applies, we can avoid iterating over all | |
3712 | threads. */ | |
08036331 | 3713 | if (!non_stop && !schedlock_applies (cur_thr)) |
94cc34af | 3714 | { |
5b6d1e4f PA |
3715 | for (thread_info *tp : all_non_exited_threads (resume_target, |
3716 | resume_ptid)) | |
08036331 | 3717 | { |
f3f8ece4 PA |
3718 | switch_to_thread_no_regs (tp); |
3719 | ||
4d9d9d04 PA |
3720 | /* Ignore the current thread here. It's handled |
3721 | afterwards. */ | |
08036331 | 3722 | if (tp == cur_thr) |
4d9d9d04 | 3723 | continue; |
c906108c | 3724 | |
4d9d9d04 PA |
3725 | if (!thread_still_needs_step_over (tp)) |
3726 | continue; | |
3727 | ||
3728 | gdb_assert (!thread_is_in_step_over_chain (tp)); | |
c906108c | 3729 | |
1eb8556f | 3730 | infrun_debug_printf ("need to step-over [%s] first", |
0fab7955 | 3731 | tp->ptid.to_string ().c_str ()); |
99619bea | 3732 | |
28d5518b | 3733 | global_thread_step_over_chain_enqueue (tp); |
2adfaa28 | 3734 | } |
f3f8ece4 PA |
3735 | |
3736 | switch_to_thread (cur_thr); | |
30852783 UW |
3737 | } |
3738 | ||
4d9d9d04 PA |
3739 | /* Enqueue the current thread last, so that we move all other |
3740 | threads over their breakpoints first. */ | |
08036331 | 3741 | if (cur_thr->stepping_over_breakpoint) |
28d5518b | 3742 | global_thread_step_over_chain_enqueue (cur_thr); |
30852783 | 3743 | |
4d9d9d04 PA |
3744 | /* If the thread isn't started, we'll still need to set its prev_pc, |
3745 | so that switch_back_to_stepped_thread knows the thread hasn't | |
3746 | advanced. Must do this before resuming any thread, as in | |
3747 | all-stop/remote, once we resume we can't send any other packet | |
3748 | until the target stops again. */ | |
fc75c28b | 3749 | cur_thr->prev_pc = regcache_read_pc_protected (regcache); |
99619bea | 3750 | |
a9bc57b9 | 3751 | { |
1192f124 | 3752 | scoped_disable_commit_resumed disable_commit_resumed ("proceeding"); |
8bf10e2e | 3753 | bool step_over_started = start_step_over (); |
c906108c | 3754 | |
a9bc57b9 TT |
3755 | if (step_over_info_valid_p ()) |
3756 | { | |
3757 | /* Either this thread started a new in-line step over, or some | |
3758 | other thread was already doing one. In either case, don't | |
3759 | resume anything else until the step-over is finished. */ | |
3760 | } | |
8bf10e2e | 3761 | else if (step_over_started && !target_is_non_stop_p ()) |
a9bc57b9 TT |
3762 | { |
3763 | /* A new displaced stepping sequence was started. In all-stop, | |
3764 | we can't talk to the target anymore until it next stops. */ | |
3765 | } | |
3766 | else if (!non_stop && target_is_non_stop_p ()) | |
3767 | { | |
3ec3145c SM |
3768 | INFRUN_SCOPED_DEBUG_START_END |
3769 | ("resuming threads, all-stop-on-top-of-non-stop"); | |
3770 | ||
a9bc57b9 TT |
3771 | /* In all-stop, but the target is always in non-stop mode. |
3772 | Start all other threads that are implicitly resumed too. */ | |
5b6d1e4f PA |
3773 | for (thread_info *tp : all_non_exited_threads (resume_target, |
3774 | resume_ptid)) | |
3775 | { | |
3776 | switch_to_thread_no_regs (tp); | |
e07d892c | 3777 | proceed_resume_thread_checked (tp); |
d5f5a83a | 3778 | } |
a9bc57b9 | 3779 | } |
e07d892c MS |
3780 | else |
3781 | proceed_resume_thread_checked (cur_thr); | |
c906108c | 3782 | |
1192f124 SM |
3783 | disable_commit_resumed.reset_and_commit (); |
3784 | } | |
85ad3aaf | 3785 | |
731f534f | 3786 | finish_state.release (); |
c906108c | 3787 | |
873657b9 PA |
3788 | /* If we've switched threads above, switch back to the previously |
3789 | current thread. We don't want the user to see a different | |
3790 | selected thread. */ | |
3791 | switch_to_thread (cur_thr); | |
3792 | ||
0b333c5e PA |
3793 | /* Tell the event loop to wait for it to stop. If the target |
3794 | supports asynchronous execution, it'll do this from within | |
3795 | target_resume. */ | |
362646f5 | 3796 | if (!target_can_async_p ()) |
0b333c5e | 3797 | mark_async_event_handler (infrun_async_inferior_event_token); |
c906108c | 3798 | } |
c906108c SS |
3799 | \f |
3800 | ||
3801 | /* Start remote-debugging of a machine over a serial link. */ | |
96baa820 | 3802 | |
c906108c | 3803 | void |
8621d6a9 | 3804 | start_remote (int from_tty) |
c906108c | 3805 | { |
5b6d1e4f PA |
3806 | inferior *inf = current_inferior (); |
3807 | inf->control.stop_soon = STOP_QUIETLY_REMOTE; | |
43ff13b4 | 3808 | |
1777feb0 | 3809 | /* Always go on waiting for the target, regardless of the mode. */ |
6426a772 | 3810 | /* FIXME: cagney/1999-09-23: At present it isn't possible to |
7e73cedf | 3811 | indicate to wait_for_inferior that a target should timeout if |
6426a772 JM |
3812 | nothing is returned (instead of just blocking). Because of this, |
3813 | targets expecting an immediate response need to, internally, set | |
3814 | things up so that the target_wait() is forced to eventually | |
1777feb0 | 3815 | timeout. */ |
6426a772 JM |
3816 | /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to |
3817 | differentiate to its caller what the state of the target is after | |
3818 | the initial open has been performed. Here we're assuming that | |
3819 | the target has stopped. It should be possible to eventually have | |
3820 | target_open() return to the caller an indication that the target | |
3821 | is currently running and GDB state should be set to the same as | |
1777feb0 | 3822 | for an async run. */ |
5b6d1e4f | 3823 | wait_for_inferior (inf); |
8621d6a9 DJ |
3824 | |
3825 | /* Now that the inferior has stopped, do any bookkeeping like | |
3826 | loading shared libraries. We want to do this before normal_stop, | |
3827 | so that the displayed frame is up to date. */ | |
3cb6bc13 | 3828 | post_create_inferior (from_tty, true); |
8621d6a9 | 3829 | |
6426a772 | 3830 | normal_stop (); |
c906108c SS |
3831 | } |
3832 | ||
3833 | /* Initialize static vars when a new inferior begins. */ | |
3834 | ||
3835 | void | |
96baa820 | 3836 | init_wait_for_inferior (void) |
c906108c SS |
3837 | { |
3838 | /* These are meaningless until the first time through wait_for_inferior. */ | |
c906108c | 3839 | |
f5928702 | 3840 | breakpoint_init_inferior (current_inferior (), inf_starting); |
c906108c | 3841 | |
70509625 | 3842 | clear_proceed_status (0); |
9f976b41 | 3843 | |
ab1ddbcf | 3844 | nullify_last_target_wait_ptid (); |
237fc4c9 | 3845 | |
a81871f7 | 3846 | update_previous_thread (); |
c906108c | 3847 | } |
237fc4c9 | 3848 | |
c906108c | 3849 | \f |
488f131b | 3850 | |
ec9499be | 3851 | static void handle_inferior_event (struct execution_control_state *ecs); |
cd0fc7c3 | 3852 | |
568d6575 UW |
3853 | static void handle_step_into_function (struct gdbarch *gdbarch, |
3854 | struct execution_control_state *ecs); | |
3855 | static void handle_step_into_function_backward (struct gdbarch *gdbarch, | |
3856 | struct execution_control_state *ecs); | |
4f5d7f63 | 3857 | static void handle_signal_stop (struct execution_control_state *ecs); |
186c406b | 3858 | static void check_exception_resume (struct execution_control_state *, |
8480a37e | 3859 | const frame_info_ptr &); |
611c83ae | 3860 | |
bdc36728 | 3861 | static void end_stepping_range (struct execution_control_state *ecs); |
22bcd14b | 3862 | static void stop_waiting (struct execution_control_state *ecs); |
d4f3574e | 3863 | static void keep_going (struct execution_control_state *ecs); |
94c57d6a | 3864 | static void process_event_stop_test (struct execution_control_state *ecs); |
c4464ade | 3865 | static bool switch_back_to_stepped_thread (struct execution_control_state *ecs); |
104c1213 | 3866 | |
252fbfc8 PA |
3867 | /* This function is attached as a "thread_stop_requested" observer. |
3868 | Cleanup local state that assumed the PTID was to be resumed, and | |
3869 | report the stop to the frontend. */ | |
3870 | ||
2c0b251b | 3871 | static void |
252fbfc8 PA |
3872 | infrun_thread_stop_requested (ptid_t ptid) |
3873 | { | |
5b6d1e4f PA |
3874 | process_stratum_target *curr_target = current_inferior ()->process_target (); |
3875 | ||
c65d6b55 PA |
3876 | /* PTID was requested to stop. If the thread was already stopped, |
3877 | but the user/frontend doesn't know about that yet (e.g., the | |
3878 | thread had been temporarily paused for some step-over), set up | |
3879 | for reporting the stop now. */ | |
5b6d1e4f | 3880 | for (thread_info *tp : all_threads (curr_target, ptid)) |
08036331 PA |
3881 | { |
3882 | if (tp->state != THREAD_RUNNING) | |
3883 | continue; | |
611841bb | 3884 | if (tp->executing ()) |
08036331 | 3885 | continue; |
c65d6b55 | 3886 | |
08036331 PA |
3887 | /* Remove matching threads from the step-over queue, so |
3888 | start_step_over doesn't try to resume them | |
3889 | automatically. */ | |
3890 | if (thread_is_in_step_over_chain (tp)) | |
28d5518b | 3891 | global_thread_step_over_chain_remove (tp); |
c65d6b55 | 3892 | |
08036331 PA |
3893 | /* If the thread is stopped, but the user/frontend doesn't |
3894 | know about that yet, queue a pending event, as if the | |
3895 | thread had just stopped now. Unless the thread already had | |
3896 | a pending event. */ | |
1edb66d8 | 3897 | if (!tp->has_pending_waitstatus ()) |
08036331 | 3898 | { |
1edb66d8 | 3899 | target_waitstatus ws; |
183be222 | 3900 | ws.set_stopped (GDB_SIGNAL_0); |
1edb66d8 | 3901 | tp->set_pending_waitstatus (ws); |
08036331 | 3902 | } |
c65d6b55 | 3903 | |
08036331 PA |
3904 | /* Clear the inline-frame state, since we're re-processing the |
3905 | stop. */ | |
5b6d1e4f | 3906 | clear_inline_frame_state (tp); |
c65d6b55 | 3907 | |
08036331 PA |
3908 | /* If this thread was paused because some other thread was |
3909 | doing an inline-step over, let that finish first. Once | |
3910 | that happens, we'll restart all threads and consume pending | |
3911 | stop events then. */ | |
3912 | if (step_over_info_valid_p ()) | |
3913 | continue; | |
3914 | ||
3915 | /* Otherwise we can process the (new) pending event now. Set | |
3916 | it so this pending event is considered by | |
3917 | do_target_wait. */ | |
7846f3aa | 3918 | tp->set_resumed (true); |
08036331 | 3919 | } |
252fbfc8 PA |
3920 | } |
3921 | ||
0cbcdb96 PA |
3922 | /* Delete the step resume, single-step and longjmp/exception resume |
3923 | breakpoints of TP. */ | |
4e1c45ea | 3924 | |
0cbcdb96 PA |
3925 | static void |
3926 | delete_thread_infrun_breakpoints (struct thread_info *tp) | |
4e1c45ea | 3927 | { |
0cbcdb96 PA |
3928 | delete_step_resume_breakpoint (tp); |
3929 | delete_exception_resume_breakpoint (tp); | |
34b7e8a6 | 3930 | delete_single_step_breakpoints (tp); |
4e1c45ea PA |
3931 | } |
3932 | ||
0cbcdb96 PA |
3933 | /* If the target still has execution, call FUNC for each thread that |
3934 | just stopped. In all-stop, that's all the non-exited threads; in | |
3935 | non-stop, that's the current thread, only. */ | |
3936 | ||
3937 | typedef void (*for_each_just_stopped_thread_callback_func) | |
3938 | (struct thread_info *tp); | |
4e1c45ea PA |
3939 | |
3940 | static void | |
0cbcdb96 | 3941 | for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func) |
4e1c45ea | 3942 | { |
55f6301a | 3943 | if (!target_has_execution () || inferior_ptid == null_ptid) |
4e1c45ea PA |
3944 | return; |
3945 | ||
fbea99ea | 3946 | if (target_is_non_stop_p ()) |
4e1c45ea | 3947 | { |
0cbcdb96 PA |
3948 | /* If in non-stop mode, only the current thread stopped. */ |
3949 | func (inferior_thread ()); | |
4e1c45ea PA |
3950 | } |
3951 | else | |
0cbcdb96 | 3952 | { |
0cbcdb96 | 3953 | /* In all-stop mode, all threads have stopped. */ |
08036331 PA |
3954 | for (thread_info *tp : all_non_exited_threads ()) |
3955 | func (tp); | |
0cbcdb96 PA |
3956 | } |
3957 | } | |
3958 | ||
3959 | /* Delete the step resume and longjmp/exception resume breakpoints of | |
3960 | the threads that just stopped. */ | |
3961 | ||
3962 | static void | |
3963 | delete_just_stopped_threads_infrun_breakpoints (void) | |
3964 | { | |
3965 | for_each_just_stopped_thread (delete_thread_infrun_breakpoints); | |
34b7e8a6 PA |
3966 | } |
3967 | ||
3968 | /* Delete the single-step breakpoints of the threads that just | |
3969 | stopped. */ | |
7c16b83e | 3970 | |
34b7e8a6 PA |
3971 | static void |
3972 | delete_just_stopped_threads_single_step_breakpoints (void) | |
3973 | { | |
3974 | for_each_just_stopped_thread (delete_single_step_breakpoints); | |
4e1c45ea PA |
3975 | } |
3976 | ||
221e1a37 | 3977 | /* See infrun.h. */ |
223698f8 | 3978 | |
221e1a37 | 3979 | void |
223698f8 | 3980 | print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid, |
3559d599 TBA |
3981 | const struct target_waitstatus &ws, |
3982 | process_stratum_target *proc_target) | |
223698f8 | 3983 | { |
17e971f7 SM |
3984 | infrun_debug_printf ("target_wait (%s [%s], status) =", |
3985 | waiton_ptid.to_string ().c_str (), | |
e71daf80 | 3986 | target_pid_to_str (waiton_ptid).c_str ()); |
17e971f7 SM |
3987 | infrun_debug_printf (" %s [%s],", |
3988 | result_ptid.to_string ().c_str (), | |
e71daf80 | 3989 | target_pid_to_str (result_ptid).c_str ()); |
c272a98c | 3990 | infrun_debug_printf (" %s", ws.to_string ().c_str ()); |
3559d599 TBA |
3991 | |
3992 | if (proc_target != nullptr) | |
3993 | infrun_debug_printf (" from target %d (%s)", | |
3994 | proc_target->connection_number, | |
3995 | proc_target->shortname ()); | |
3996 | } | |
3997 | ||
3998 | /* Wrapper for print_target_wait_results above for convenience. */ | |
3999 | ||
4000 | static void | |
4001 | print_target_wait_results (ptid_t waiton_ptid, | |
4002 | const execution_control_state &ecs) | |
4003 | { | |
4004 | print_target_wait_results (waiton_ptid, ecs.ptid, ecs.ws, ecs.target); | |
223698f8 DE |
4005 | } |
4006 | ||
372316f1 PA |
4007 | /* Select a thread at random, out of those which are resumed and have |
4008 | had events. */ | |
4009 | ||
4010 | static struct thread_info * | |
5b6d1e4f | 4011 | random_pending_event_thread (inferior *inf, ptid_t waiton_ptid) |
372316f1 | 4012 | { |
71a23490 SM |
4013 | process_stratum_target *proc_target = inf->process_target (); |
4014 | thread_info *thread | |
4015 | = proc_target->random_resumed_with_pending_wait_status (inf, waiton_ptid); | |
08036331 | 4016 | |
71a23490 | 4017 | if (thread == nullptr) |
08036331 | 4018 | { |
71a23490 SM |
4019 | infrun_debug_printf ("None found."); |
4020 | return nullptr; | |
4021 | } | |
372316f1 | 4022 | |
0fab7955 | 4023 | infrun_debug_printf ("Found %s.", thread->ptid.to_string ().c_str ()); |
71a23490 SM |
4024 | gdb_assert (thread->resumed ()); |
4025 | gdb_assert (thread->has_pending_waitstatus ()); | |
372316f1 | 4026 | |
71a23490 | 4027 | return thread; |
372316f1 PA |
4028 | } |
4029 | ||
4030 | /* Wrapper for target_wait that first checks whether threads have | |
4031 | pending statuses to report before actually asking the target for | |
5b6d1e4f PA |
4032 | more events. INF is the inferior we're using to call target_wait |
4033 | on. */ | |
372316f1 PA |
4034 | |
4035 | static ptid_t | |
5b6d1e4f | 4036 | do_target_wait_1 (inferior *inf, ptid_t ptid, |
b60cea74 | 4037 | target_waitstatus *status, target_wait_flags options) |
372316f1 | 4038 | { |
372316f1 PA |
4039 | struct thread_info *tp; |
4040 | ||
24ed6739 AB |
4041 | /* We know that we are looking for an event in the target of inferior |
4042 | INF, but we don't know which thread the event might come from. As | |
4043 | such we want to make sure that INFERIOR_PTID is reset so that none of | |
4044 | the wait code relies on it - doing so is always a mistake. */ | |
4045 | switch_to_inferior_no_thread (inf); | |
4046 | ||
372316f1 PA |
4047 | /* First check if there is a resumed thread with a wait status |
4048 | pending. */ | |
d7e15655 | 4049 | if (ptid == minus_one_ptid || ptid.is_pid ()) |
372316f1 | 4050 | { |
5b6d1e4f | 4051 | tp = random_pending_event_thread (inf, ptid); |
372316f1 PA |
4052 | } |
4053 | else | |
4054 | { | |
1eb8556f | 4055 | infrun_debug_printf ("Waiting for specific thread %s.", |
0fab7955 | 4056 | ptid.to_string ().c_str ()); |
372316f1 PA |
4057 | |
4058 | /* We have a specific thread to check. */ | |
3c8af02f | 4059 | tp = inf->find_thread (ptid); |
03acd4d8 | 4060 | gdb_assert (tp != nullptr); |
1edb66d8 | 4061 | if (!tp->has_pending_waitstatus ()) |
03acd4d8 | 4062 | tp = nullptr; |
372316f1 PA |
4063 | } |
4064 | ||
03acd4d8 | 4065 | if (tp != nullptr |
1edb66d8 SM |
4066 | && (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT |
4067 | || tp->stop_reason () == TARGET_STOPPED_BY_HW_BREAKPOINT)) | |
372316f1 | 4068 | { |
00431a78 | 4069 | struct regcache *regcache = get_thread_regcache (tp); |
ac7936df | 4070 | struct gdbarch *gdbarch = regcache->arch (); |
372316f1 PA |
4071 | CORE_ADDR pc; |
4072 | int discard = 0; | |
4073 | ||
4074 | pc = regcache_read_pc (regcache); | |
4075 | ||
1edb66d8 | 4076 | if (pc != tp->stop_pc ()) |
372316f1 | 4077 | { |
1eb8556f | 4078 | infrun_debug_printf ("PC of %s changed. was=%s, now=%s", |
0fab7955 | 4079 | tp->ptid.to_string ().c_str (), |
1edb66d8 | 4080 | paddress (gdbarch, tp->stop_pc ()), |
1eb8556f | 4081 | paddress (gdbarch, pc)); |
372316f1 PA |
4082 | discard = 1; |
4083 | } | |
f9582a22 | 4084 | else if (!breakpoint_inserted_here_p (tp->inf->aspace.get (), pc)) |
372316f1 | 4085 | { |
1eb8556f | 4086 | infrun_debug_printf ("previous breakpoint of %s, at %s gone", |
0fab7955 | 4087 | tp->ptid.to_string ().c_str (), |
1eb8556f | 4088 | paddress (gdbarch, pc)); |
372316f1 PA |
4089 | |
4090 | discard = 1; | |
4091 | } | |
4092 | ||
4093 | if (discard) | |
4094 | { | |
1eb8556f | 4095 | infrun_debug_printf ("pending event of %s cancelled.", |
0fab7955 | 4096 | tp->ptid.to_string ().c_str ()); |
372316f1 | 4097 | |
1edb66d8 SM |
4098 | tp->clear_pending_waitstatus (); |
4099 | target_waitstatus ws; | |
183be222 | 4100 | ws.set_spurious (); |
1edb66d8 SM |
4101 | tp->set_pending_waitstatus (ws); |
4102 | tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON); | |
372316f1 PA |
4103 | } |
4104 | } | |
4105 | ||
03acd4d8 | 4106 | if (tp != nullptr) |
372316f1 | 4107 | { |
1eb8556f | 4108 | infrun_debug_printf ("Using pending wait status %s for %s.", |
7dca2ea7 | 4109 | tp->pending_waitstatus ().to_string ().c_str (), |
0fab7955 | 4110 | tp->ptid.to_string ().c_str ()); |
372316f1 PA |
4111 | |
4112 | /* Now that we've selected our final event LWP, un-adjust its PC | |
4113 | if it was a software breakpoint (and the target doesn't | |
4114 | always adjust the PC itself). */ | |
1edb66d8 | 4115 | if (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT |
372316f1 PA |
4116 | && !target_supports_stopped_by_sw_breakpoint ()) |
4117 | { | |
4118 | struct regcache *regcache; | |
4119 | struct gdbarch *gdbarch; | |
4120 | int decr_pc; | |
4121 | ||
00431a78 | 4122 | regcache = get_thread_regcache (tp); |
ac7936df | 4123 | gdbarch = regcache->arch (); |
372316f1 PA |
4124 | |
4125 | decr_pc = gdbarch_decr_pc_after_break (gdbarch); | |
4126 | if (decr_pc != 0) | |
4127 | { | |
4128 | CORE_ADDR pc; | |
4129 | ||
4130 | pc = regcache_read_pc (regcache); | |
4131 | regcache_write_pc (regcache, pc + decr_pc); | |
4132 | } | |
4133 | } | |
4134 | ||
1edb66d8 SM |
4135 | tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON); |
4136 | *status = tp->pending_waitstatus (); | |
4137 | tp->clear_pending_waitstatus (); | |
372316f1 PA |
4138 | |
4139 | /* Wake up the event loop again, until all pending events are | |
4140 | processed. */ | |
4141 | if (target_is_async_p ()) | |
4142 | mark_async_event_handler (infrun_async_inferior_event_token); | |
4143 | return tp->ptid; | |
4144 | } | |
4145 | ||
4146 | /* But if we don't find one, we'll have to wait. */ | |
4147 | ||
d3a07122 SM |
4148 | /* We can't ask a non-async target to do a non-blocking wait, so this will be |
4149 | a blocking wait. */ | |
71247709 | 4150 | if (!target_can_async_p ()) |
d3a07122 SM |
4151 | options &= ~TARGET_WNOHANG; |
4152 | ||
fb85cece | 4153 | return target_wait (ptid, status, options); |
372316f1 PA |
4154 | } |
4155 | ||
5b6d1e4f PA |
4156 | /* Wrapper for target_wait that first checks whether threads have |
4157 | pending statuses to report before actually asking the target for | |
b3e3a4c1 | 4158 | more events. Polls for events from all inferiors/targets. */ |
5b6d1e4f PA |
4159 | |
4160 | static bool | |
07505b61 AB |
4161 | do_target_wait (ptid_t wait_ptid, execution_control_state *ecs, |
4162 | target_wait_flags options) | |
5b6d1e4f PA |
4163 | { |
4164 | int num_inferiors = 0; | |
4165 | int random_selector; | |
4166 | ||
b3e3a4c1 SM |
4167 | /* For fairness, we pick the first inferior/target to poll at random |
4168 | out of all inferiors that may report events, and then continue | |
4169 | polling the rest of the inferior list starting from that one in a | |
4170 | circular fashion until the whole list is polled once. */ | |
5b6d1e4f | 4171 | |
3df78436 AB |
4172 | ptid_t wait_ptid_pid {wait_ptid.pid ()}; |
4173 | auto inferior_matches = [&wait_ptid_pid] (inferior *inf) | |
5b6d1e4f | 4174 | { |
07505b61 | 4175 | return (inf->process_target () != nullptr |
3df78436 | 4176 | && ptid_t (inf->pid).matches (wait_ptid_pid)); |
5b6d1e4f PA |
4177 | }; |
4178 | ||
b3e3a4c1 | 4179 | /* First see how many matching inferiors we have. */ |
5b6d1e4f PA |
4180 | for (inferior *inf : all_inferiors ()) |
4181 | if (inferior_matches (inf)) | |
4182 | num_inferiors++; | |
4183 | ||
4184 | if (num_inferiors == 0) | |
4185 | { | |
183be222 | 4186 | ecs->ws.set_ignore (); |
5b6d1e4f PA |
4187 | return false; |
4188 | } | |
4189 | ||
b3e3a4c1 | 4190 | /* Now randomly pick an inferior out of those that matched. */ |
5b6d1e4f PA |
4191 | random_selector = (int) |
4192 | ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0)); | |
4193 | ||
1eb8556f SM |
4194 | if (num_inferiors > 1) |
4195 | infrun_debug_printf ("Found %d inferiors, starting at #%d", | |
4196 | num_inferiors, random_selector); | |
5b6d1e4f | 4197 | |
b3e3a4c1 | 4198 | /* Select the Nth inferior that matched. */ |
5b6d1e4f PA |
4199 | |
4200 | inferior *selected = nullptr; | |
4201 | ||
4202 | for (inferior *inf : all_inferiors ()) | |
4203 | if (inferior_matches (inf)) | |
4204 | if (random_selector-- == 0) | |
4205 | { | |
4206 | selected = inf; | |
4207 | break; | |
4208 | } | |
4209 | ||
b3e3a4c1 | 4210 | /* Now poll for events out of each of the matching inferior's |
5b6d1e4f PA |
4211 | targets, starting from the selected one. */ |
4212 | ||
4213 | auto do_wait = [&] (inferior *inf) | |
4214 | { | |
07505b61 | 4215 | ecs->ptid = do_target_wait_1 (inf, wait_ptid, &ecs->ws, options); |
5b6d1e4f | 4216 | ecs->target = inf->process_target (); |
183be222 | 4217 | return (ecs->ws.kind () != TARGET_WAITKIND_IGNORE); |
5b6d1e4f PA |
4218 | }; |
4219 | ||
b3e3a4c1 SM |
4220 | /* Needed in 'all-stop + target-non-stop' mode, because we end up |
4221 | here spuriously after the target is all stopped and we've already | |
5b6d1e4f PA |
4222 | reported the stop to the user, polling for events. */ |
4223 | scoped_restore_current_thread restore_thread; | |
4224 | ||
08bdefb5 PA |
4225 | intrusive_list_iterator<inferior> start |
4226 | = inferior_list.iterator_to (*selected); | |
4227 | ||
4228 | for (intrusive_list_iterator<inferior> it = start; | |
4229 | it != inferior_list.end (); | |
4230 | ++it) | |
4231 | { | |
4232 | inferior *inf = &*it; | |
4233 | ||
4234 | if (inferior_matches (inf) && do_wait (inf)) | |
5b6d1e4f | 4235 | return true; |
08bdefb5 | 4236 | } |
5b6d1e4f | 4237 | |
08bdefb5 PA |
4238 | for (intrusive_list_iterator<inferior> it = inferior_list.begin (); |
4239 | it != start; | |
4240 | ++it) | |
4241 | { | |
4242 | inferior *inf = &*it; | |
4243 | ||
4244 | if (inferior_matches (inf) && do_wait (inf)) | |
5b6d1e4f | 4245 | return true; |
08bdefb5 | 4246 | } |
5b6d1e4f | 4247 | |
183be222 | 4248 | ecs->ws.set_ignore (); |
5b6d1e4f PA |
4249 | return false; |
4250 | } | |
4251 | ||
8ff53139 PA |
4252 | /* An event reported by wait_one. */ |
4253 | ||
4254 | struct wait_one_event | |
4255 | { | |
4256 | /* The target the event came out of. */ | |
4257 | process_stratum_target *target; | |
4258 | ||
4259 | /* The PTID the event was for. */ | |
4260 | ptid_t ptid; | |
4261 | ||
4262 | /* The waitstatus. */ | |
4263 | target_waitstatus ws; | |
4264 | }; | |
4265 | ||
4266 | static bool handle_one (const wait_one_event &event); | |
21d48304 | 4267 | static int finish_step_over (struct execution_control_state *ecs); |
8ff53139 | 4268 | |
24291992 PA |
4269 | /* Prepare and stabilize the inferior for detaching it. E.g., |
4270 | detaching while a thread is displaced stepping is a recipe for | |
4271 | crashing it, as nothing would readjust the PC out of the scratch | |
4272 | pad. */ | |
4273 | ||
4274 | void | |
4275 | prepare_for_detach (void) | |
4276 | { | |
4277 | struct inferior *inf = current_inferior (); | |
f2907e49 | 4278 | ptid_t pid_ptid = ptid_t (inf->pid); |
8ff53139 | 4279 | scoped_restore_current_thread restore_thread; |
24291992 | 4280 | |
9bcb1f16 | 4281 | scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true); |
24291992 | 4282 | |
8ff53139 PA |
4283 | /* Remove all threads of INF from the global step-over chain. We |
4284 | want to stop any ongoing step-over, not start any new one. */ | |
8b6a69b2 SM |
4285 | thread_step_over_list_safe_range range |
4286 | = make_thread_step_over_list_safe_range (global_thread_step_over_list); | |
4287 | ||
4288 | for (thread_info *tp : range) | |
4289 | if (tp->inf == inf) | |
4290 | { | |
4291 | infrun_debug_printf ("removing thread %s from global step over chain", | |
0fab7955 | 4292 | tp->ptid.to_string ().c_str ()); |
8ff53139 | 4293 | global_thread_step_over_chain_remove (tp); |
8b6a69b2 | 4294 | } |
24291992 | 4295 | |
ac7d717c PA |
4296 | /* If we were already in the middle of an inline step-over, and the |
4297 | thread stepping belongs to the inferior we're detaching, we need | |
4298 | to restart the threads of other inferiors. */ | |
4299 | if (step_over_info.thread != -1) | |
4300 | { | |
4301 | infrun_debug_printf ("inline step-over in-process while detaching"); | |
4302 | ||
4303 | thread_info *thr = find_thread_global_id (step_over_info.thread); | |
4304 | if (thr->inf == inf) | |
4305 | { | |
4306 | /* Since we removed threads of INF from the step-over chain, | |
4307 | we know this won't start a step-over for INF. */ | |
4308 | clear_step_over_info (); | |
4309 | ||
4310 | if (target_is_non_stop_p ()) | |
4311 | { | |
4312 | /* Start a new step-over in another thread if there's | |
4313 | one that needs it. */ | |
4314 | start_step_over (); | |
4315 | ||
4316 | /* Restart all other threads (except the | |
4317 | previously-stepping thread, since that one is still | |
4318 | running). */ | |
4319 | if (!step_over_info_valid_p ()) | |
4320 | restart_threads (thr); | |
4321 | } | |
4322 | } | |
4323 | } | |
4324 | ||
8ff53139 PA |
4325 | if (displaced_step_in_progress (inf)) |
4326 | { | |
4327 | infrun_debug_printf ("displaced-stepping in-process while detaching"); | |
24291992 | 4328 | |
8ff53139 | 4329 | /* Stop threads currently displaced stepping, aborting it. */ |
24291992 | 4330 | |
8ff53139 PA |
4331 | for (thread_info *thr : inf->non_exited_threads ()) |
4332 | { | |
4333 | if (thr->displaced_step_state.in_progress ()) | |
4334 | { | |
611841bb | 4335 | if (thr->executing ()) |
8ff53139 PA |
4336 | { |
4337 | if (!thr->stop_requested) | |
4338 | { | |
4339 | target_stop (thr->ptid); | |
4340 | thr->stop_requested = true; | |
4341 | } | |
4342 | } | |
4343 | else | |
7846f3aa | 4344 | thr->set_resumed (false); |
8ff53139 PA |
4345 | } |
4346 | } | |
24291992 | 4347 | |
8ff53139 PA |
4348 | while (displaced_step_in_progress (inf)) |
4349 | { | |
4350 | wait_one_event event; | |
24291992 | 4351 | |
8ff53139 PA |
4352 | event.target = inf->process_target (); |
4353 | event.ptid = do_target_wait_1 (inf, pid_ptid, &event.ws, 0); | |
24291992 | 4354 | |
8ff53139 | 4355 | if (debug_infrun) |
3559d599 TBA |
4356 | print_target_wait_results (pid_ptid, event.ptid, event.ws, |
4357 | event.target); | |
24291992 | 4358 | |
8ff53139 PA |
4359 | handle_one (event); |
4360 | } | |
24291992 | 4361 | |
8ff53139 PA |
4362 | /* It's OK to leave some of the threads of INF stopped, since |
4363 | they'll be detached shortly. */ | |
24291992 | 4364 | } |
24291992 PA |
4365 | } |
4366 | ||
e0c01ce6 PA |
4367 | /* If all-stop, but there exists a non-stop target, stop all threads |
4368 | now that we're presenting the stop to the user. */ | |
4369 | ||
4370 | static void | |
4371 | stop_all_threads_if_all_stop_mode () | |
4372 | { | |
4373 | if (!non_stop && exists_non_stop_target ()) | |
4374 | stop_all_threads ("presenting stop to user in all-stop"); | |
4375 | } | |
4376 | ||
cd0fc7c3 | 4377 | /* Wait for control to return from inferior to debugger. |
ae123ec6 | 4378 | |
cd0fc7c3 SS |
4379 | If inferior gets a signal, we may decide to start it up again |
4380 | instead of returning. That is why there is a loop in this function. | |
4381 | When this function actually returns it means the inferior | |
4382 | should be left stopped and GDB should read more commands. */ | |
4383 | ||
5b6d1e4f PA |
4384 | static void |
4385 | wait_for_inferior (inferior *inf) | |
cd0fc7c3 | 4386 | { |
1eb8556f | 4387 | infrun_debug_printf ("wait_for_inferior ()"); |
527159b7 | 4388 | |
4c41382a | 4389 | SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); }; |
cd0fc7c3 | 4390 | |
e6f5c25b PA |
4391 | /* If an error happens while handling the event, propagate GDB's |
4392 | knowledge of the executing state to the frontend/user running | |
4393 | state. */ | |
5b6d1e4f PA |
4394 | scoped_finish_thread_state finish_state |
4395 | (inf->process_target (), minus_one_ptid); | |
e6f5c25b | 4396 | |
c906108c SS |
4397 | while (1) |
4398 | { | |
aa563d16 | 4399 | execution_control_state ecs; |
29f49a6a | 4400 | |
ec9499be | 4401 | overlay_cache_invalid = 1; |
ec9499be | 4402 | |
f15cb84a YQ |
4403 | /* Flush target cache before starting to handle each event. |
4404 | Target was running and cache could be stale. This is just a | |
4405 | heuristic. Running threads may modify target memory, but we | |
4406 | don't get any event. */ | |
41336620 | 4407 | target_dcache_invalidate (current_program_space->aspace); |
f15cb84a | 4408 | |
aa563d16 TT |
4409 | ecs.ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs.ws, 0); |
4410 | ecs.target = inf->process_target (); | |
c906108c | 4411 | |
f00150c9 | 4412 | if (debug_infrun) |
3559d599 | 4413 | print_target_wait_results (minus_one_ptid, ecs); |
f00150c9 | 4414 | |
cd0fc7c3 | 4415 | /* Now figure out what to do with the result of the result. */ |
aa563d16 | 4416 | handle_inferior_event (&ecs); |
c906108c | 4417 | |
aa563d16 | 4418 | if (!ecs.wait_some_more) |
cd0fc7c3 SS |
4419 | break; |
4420 | } | |
4e1c45ea | 4421 | |
e0c01ce6 PA |
4422 | stop_all_threads_if_all_stop_mode (); |
4423 | ||
e6f5c25b | 4424 | /* No error, don't finish the state yet. */ |
731f534f | 4425 | finish_state.release (); |
cd0fc7c3 | 4426 | } |
c906108c | 4427 | |
d3d4baed PA |
4428 | /* Cleanup that reinstalls the readline callback handler, if the |
4429 | target is running in the background. If while handling the target | |
4430 | event something triggered a secondary prompt, like e.g., a | |
4431 | pagination prompt, we'll have removed the callback handler (see | |
4432 | gdb_readline_wrapper_line). Need to do this as we go back to the | |
4433 | event loop, ready to process further input. Note this has no | |
4434 | effect if the handler hasn't actually been removed, because calling | |
4435 | rl_callback_handler_install resets the line buffer, thus losing | |
4436 | input. */ | |
4437 | ||
4438 | static void | |
d238133d | 4439 | reinstall_readline_callback_handler_cleanup () |
d3d4baed | 4440 | { |
3b12939d PA |
4441 | struct ui *ui = current_ui; |
4442 | ||
4443 | if (!ui->async) | |
6c400b59 PA |
4444 | { |
4445 | /* We're not going back to the top level event loop yet. Don't | |
4446 | install the readline callback, as it'd prep the terminal, | |
4447 | readline-style (raw, noecho) (e.g., --batch). We'll install | |
4448 | it the next time the prompt is displayed, when we're ready | |
4449 | for input. */ | |
4450 | return; | |
4451 | } | |
4452 | ||
3b12939d | 4453 | if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED) |
d3d4baed PA |
4454 | gdb_rl_callback_handler_reinstall (); |
4455 | } | |
4456 | ||
243a9253 | 4457 | /* Clean up the FSMs of threads that are now stopped. In non-stop, |
7730e5c6 PA |
4458 | that's just the event thread. In all-stop, that's all threads. In |
4459 | all-stop, threads that had a pending exit no longer have a reason | |
4460 | to be around, as their FSMs/commands are canceled, so we delete | |
4461 | them. This avoids "info threads" listing such threads as if they | |
4462 | were alive (and failing to read their registers), the user being | |
4463 | able to select and resume them (and that failing), etc. */ | |
243a9253 PA |
4464 | |
4465 | static void | |
4466 | clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs) | |
4467 | { | |
22517040 SM |
4468 | /* The first clean_up call below assumes the event thread is the current |
4469 | one. */ | |
4470 | if (ecs->event_thread != nullptr) | |
4471 | gdb_assert (ecs->event_thread == inferior_thread ()); | |
4472 | ||
573269a8 LS |
4473 | if (ecs->event_thread != nullptr |
4474 | && ecs->event_thread->thread_fsm () != nullptr) | |
4475 | ecs->event_thread->thread_fsm ()->clean_up (ecs->event_thread); | |
243a9253 PA |
4476 | |
4477 | if (!non_stop) | |
4478 | { | |
22517040 SM |
4479 | scoped_restore_current_thread restore_thread; |
4480 | ||
7730e5c6 | 4481 | for (thread_info *thr : all_threads_safe ()) |
dda83cd7 | 4482 | { |
7730e5c6 | 4483 | if (thr->state == THREAD_EXITED) |
243a9253 | 4484 | continue; |
7730e5c6 | 4485 | |
243a9253 PA |
4486 | if (thr == ecs->event_thread) |
4487 | continue; | |
4488 | ||
7730e5c6 PA |
4489 | if (thr->thread_fsm () != nullptr) |
4490 | { | |
4491 | switch_to_thread (thr); | |
4492 | thr->thread_fsm ()->clean_up (thr); | |
4493 | } | |
4494 | ||
4495 | /* As we are cancelling the command/FSM of this thread, | |
4496 | whatever was the reason we needed to report a thread | |
4497 | exited event to the user, that reason is gone. Delete | |
4498 | the thread, so that the user doesn't see it in the thread | |
4499 | list, the next proceed doesn't try to resume it, etc. */ | |
4500 | if (thr->has_pending_waitstatus () | |
4501 | && (thr->pending_waitstatus ().kind () | |
4502 | == TARGET_WAITKIND_THREAD_EXITED)) | |
4503 | delete_thread (thr); | |
243a9253 | 4504 | } |
243a9253 PA |
4505 | } |
4506 | } | |
4507 | ||
3b12939d PA |
4508 | /* Helper for all_uis_check_sync_execution_done that works on the |
4509 | current UI. */ | |
4510 | ||
4511 | static void | |
4512 | check_curr_ui_sync_execution_done (void) | |
4513 | { | |
4514 | struct ui *ui = current_ui; | |
4515 | ||
4516 | if (ui->prompt_state == PROMPT_NEEDED | |
4517 | && ui->async | |
4518 | && !gdb_in_secondary_prompt_p (ui)) | |
4519 | { | |
223ffa71 | 4520 | target_terminal::ours (); |
c3d321de | 4521 | top_level_interpreter ()->on_sync_execution_done (); |
8f7f9b3a | 4522 | ui->register_file_handler (); |
3b12939d PA |
4523 | } |
4524 | } | |
4525 | ||
4526 | /* See infrun.h. */ | |
4527 | ||
4528 | void | |
4529 | all_uis_check_sync_execution_done (void) | |
4530 | { | |
0e454242 | 4531 | SWITCH_THRU_ALL_UIS () |
3b12939d PA |
4532 | { |
4533 | check_curr_ui_sync_execution_done (); | |
4534 | } | |
4535 | } | |
4536 | ||
a8836c93 PA |
4537 | /* See infrun.h. */ |
4538 | ||
4539 | void | |
4540 | all_uis_on_sync_execution_starting (void) | |
4541 | { | |
0e454242 | 4542 | SWITCH_THRU_ALL_UIS () |
a8836c93 PA |
4543 | { |
4544 | if (current_ui->prompt_state == PROMPT_NEEDED) | |
4545 | async_disable_stdin (); | |
4546 | } | |
4547 | } | |
4548 | ||
0ace6ace PA |
4549 | /* A quit_handler callback installed while we're handling inferior |
4550 | events. */ | |
4551 | ||
4552 | static void | |
4553 | infrun_quit_handler () | |
4554 | { | |
4555 | if (target_terminal::is_ours ()) | |
4556 | { | |
4557 | /* Do nothing. | |
4558 | ||
4559 | default_quit_handler would throw a quit in this case, but if | |
4560 | we're handling an event while we have the terminal, it means | |
4561 | the target is running a background execution command, and | |
4562 | thus when users press Ctrl-C, they're wanting to interrupt | |
4563 | whatever command they were executing in the command line. | |
4564 | E.g.: | |
4565 | ||
4566 | (gdb) c& | |
4567 | (gdb) foo bar whatever<ctrl-c> | |
4568 | ||
4569 | That Ctrl-C should clear the input line, not interrupt event | |
4570 | handling if it happens that the user types Ctrl-C at just the | |
4571 | "wrong" time! | |
4572 | ||
4573 | It's as-if background event handling was handled by a | |
4574 | separate background thread. | |
4575 | ||
4576 | To be clear, the Ctrl-C is not lost -- it will be processed | |
4577 | by the next QUIT call once we're out of fetch_inferior_event | |
4578 | again. */ | |
4579 | } | |
4580 | else | |
4581 | { | |
4582 | if (check_quit_flag ()) | |
4583 | target_pass_ctrlc (); | |
4584 | } | |
4585 | } | |
4586 | ||
1777feb0 | 4587 | /* Asynchronous version of wait_for_inferior. It is called by the |
43ff13b4 | 4588 | event loop whenever a change of state is detected on the file |
1777feb0 MS |
4589 | descriptor corresponding to the target. It can be called more than |
4590 | once to complete a single execution command. In such cases we need | |
4591 | to keep the state in a global variable ECSS. If it is the last time | |
a474d7c2 PA |
4592 | that this function is called for a single execution command, then |
4593 | report to the user that the inferior has stopped, and do the | |
1777feb0 | 4594 | necessary cleanups. */ |
43ff13b4 JM |
4595 | |
4596 | void | |
b1a35af2 | 4597 | fetch_inferior_event () |
43ff13b4 | 4598 | { |
3ec3145c SM |
4599 | INFRUN_SCOPED_DEBUG_ENTER_EXIT; |
4600 | ||
aa563d16 | 4601 | execution_control_state ecs; |
0f641c01 | 4602 | int cmd_done = 0; |
43ff13b4 | 4603 | |
c61db772 PA |
4604 | /* Events are always processed with the main UI as current UI. This |
4605 | way, warnings, debug output, etc. are always consistently sent to | |
4606 | the main console. */ | |
4b6749b9 | 4607 | scoped_restore save_ui = make_scoped_restore (¤t_ui, main_ui); |
c61db772 | 4608 | |
b78b3a29 TBA |
4609 | /* Temporarily disable pagination. Otherwise, the user would be |
4610 | given an option to press 'q' to quit, which would cause an early | |
4611 | exit and could leave GDB in a half-baked state. */ | |
4612 | scoped_restore save_pagination | |
4613 | = make_scoped_restore (&pagination_enabled, false); | |
4614 | ||
0ace6ace PA |
4615 | /* Install a quit handler that does nothing if we have the terminal |
4616 | (meaning the target is running a background execution command), | |
4617 | so that Ctrl-C never interrupts GDB before the event is fully | |
4618 | handled. */ | |
4619 | scoped_restore restore_quit_handler | |
4620 | = make_scoped_restore (&quit_handler, infrun_quit_handler); | |
4621 | ||
141cd158 PA |
4622 | /* Make sure a SIGINT does not interrupt an extension language while |
4623 | we're handling an event. That could interrupt a Python unwinder | |
4624 | or a Python observer or some such. A Ctrl-C should either be | |
4625 | forwarded to the inferior if the inferior has the terminal, or, | |
4626 | if GDB has the terminal, should interrupt the command the user is | |
4627 | typing in the CLI. */ | |
4628 | scoped_disable_cooperative_sigint_handling restore_coop_sigint; | |
4629 | ||
d3d4baed | 4630 | /* End up with readline processing input, if necessary. */ |
d238133d TT |
4631 | { |
4632 | SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); }; | |
4633 | ||
4634 | /* We're handling a live event, so make sure we're doing live | |
4635 | debugging. If we're looking at traceframes while the target is | |
4636 | running, we're going to need to get back to that mode after | |
4637 | handling the event. */ | |
6b09f134 | 4638 | std::optional<scoped_restore_current_traceframe> maybe_restore_traceframe; |
d238133d TT |
4639 | if (non_stop) |
4640 | { | |
4641 | maybe_restore_traceframe.emplace (); | |
4642 | set_current_traceframe (-1); | |
4643 | } | |
43ff13b4 | 4644 | |
873657b9 PA |
4645 | /* The user/frontend should not notice a thread switch due to |
4646 | internal events. Make sure we revert to the user selected | |
4647 | thread and frame after handling the event and running any | |
4648 | breakpoint commands. */ | |
4649 | scoped_restore_current_thread restore_thread; | |
d238133d TT |
4650 | |
4651 | overlay_cache_invalid = 1; | |
4652 | /* Flush target cache before starting to handle each event. Target | |
4653 | was running and cache could be stale. This is just a heuristic. | |
4654 | Running threads may modify target memory, but we don't get any | |
4655 | event. */ | |
41336620 | 4656 | target_dcache_invalidate (current_program_space->aspace); |
d238133d TT |
4657 | |
4658 | scoped_restore save_exec_dir | |
4659 | = make_scoped_restore (&execution_direction, | |
4660 | target_execution_direction ()); | |
4661 | ||
1192f124 SM |
4662 | /* Allow targets to pause their resumed threads while we handle |
4663 | the event. */ | |
4664 | scoped_disable_commit_resumed disable_commit_resumed ("handling event"); | |
4665 | ||
3df78436 AB |
4666 | /* Is the current thread performing an inferior function call as part |
4667 | of a breakpoint condition evaluation? */ | |
4668 | bool in_cond_eval = (inferior_ptid != null_ptid | |
4669 | && inferior_thread ()->control.in_cond_eval); | |
4670 | ||
4671 | /* If the thread is in the middle of the condition evaluation, wait for | |
4672 | an event from the current thread. Otherwise, wait for an event from | |
4673 | any thread. */ | |
4674 | ptid_t waiton_ptid = in_cond_eval ? inferior_ptid : minus_one_ptid; | |
4675 | ||
4676 | if (!do_target_wait (waiton_ptid, &ecs, TARGET_WNOHANG)) | |
1192f124 SM |
4677 | { |
4678 | infrun_debug_printf ("do_target_wait returned no event"); | |
4679 | disable_commit_resumed.reset_and_commit (); | |
4680 | return; | |
4681 | } | |
5b6d1e4f | 4682 | |
aa563d16 | 4683 | gdb_assert (ecs.ws.kind () != TARGET_WAITKIND_IGNORE); |
5b6d1e4f | 4684 | |
9145fd43 SM |
4685 | /* Switch to the inferior that generated the event, so we can do |
4686 | target calls. If the event was not associated to a ptid, */ | |
4687 | if (ecs.ptid != null_ptid | |
4688 | && ecs.ptid != minus_one_ptid) | |
4689 | switch_to_inferior_no_thread (find_inferior_ptid (ecs.target, ecs.ptid)); | |
4690 | else | |
4691 | switch_to_target_no_thread (ecs.target); | |
d238133d TT |
4692 | |
4693 | if (debug_infrun) | |
3559d599 | 4694 | print_target_wait_results (minus_one_ptid, ecs); |
d238133d TT |
4695 | |
4696 | /* If an error happens while handling the event, propagate GDB's | |
4697 | knowledge of the executing state to the frontend/user running | |
4698 | state. */ | |
aa563d16 TT |
4699 | ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs.ptid; |
4700 | scoped_finish_thread_state finish_state (ecs.target, finish_ptid); | |
d238133d | 4701 | |
979a0d13 | 4702 | /* Get executed before scoped_restore_current_thread above to apply |
d238133d TT |
4703 | still for the thread which has thrown the exception. */ |
4704 | auto defer_bpstat_clear | |
4705 | = make_scope_exit (bpstat_clear_actions); | |
4706 | auto defer_delete_threads | |
4707 | = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints); | |
4708 | ||
b1c0ab20 AB |
4709 | int stop_id = get_stop_id (); |
4710 | ||
d238133d | 4711 | /* Now figure out what to do with the result of the result. */ |
aa563d16 | 4712 | handle_inferior_event (&ecs); |
d238133d | 4713 | |
aa563d16 | 4714 | if (!ecs.wait_some_more) |
d238133d | 4715 | { |
aa563d16 | 4716 | struct inferior *inf = find_inferior_ptid (ecs.target, ecs.ptid); |
758cb810 | 4717 | bool should_stop = true; |
aa563d16 | 4718 | struct thread_info *thr = ecs.event_thread; |
d6b48e9c | 4719 | |
d238133d | 4720 | delete_just_stopped_threads_infrun_breakpoints (); |
f107f563 | 4721 | |
573269a8 LS |
4722 | if (thr != nullptr && thr->thread_fsm () != nullptr) |
4723 | should_stop = thr->thread_fsm ()->should_stop (thr); | |
243a9253 | 4724 | |
d238133d TT |
4725 | if (!should_stop) |
4726 | { | |
aa563d16 | 4727 | keep_going (&ecs); |
d238133d TT |
4728 | } |
4729 | else | |
4730 | { | |
46e3ed7f | 4731 | bool should_notify_stop = true; |
8dd08de7 | 4732 | bool proceeded = false; |
1840d81a | 4733 | |
3df78436 AB |
4734 | /* If the thread that stopped just completed an inferior |
4735 | function call as part of a condition evaluation, then we | |
4736 | don't want to stop all the other threads. */ | |
4737 | if (ecs.event_thread == nullptr | |
4738 | || !ecs.event_thread->control.in_cond_eval) | |
4739 | stop_all_threads_if_all_stop_mode (); | |
e0c01ce6 | 4740 | |
aa563d16 | 4741 | clean_up_just_stopped_threads_fsms (&ecs); |
243a9253 | 4742 | |
b1c0ab20 AB |
4743 | if (stop_id != get_stop_id ()) |
4744 | { | |
4745 | /* If the stop-id has changed then a stop has already been | |
4746 | presented to the user in handle_inferior_event, this is | |
4747 | likely a failed inferior call. As the stop has already | |
4748 | been announced then we should not notify again. | |
4749 | ||
4750 | Also, if the prompt state is not PROMPT_NEEDED then GDB | |
4751 | will not be ready for user input after this function. */ | |
4752 | should_notify_stop = false; | |
4753 | gdb_assert (current_ui->prompt_state == PROMPT_NEEDED); | |
4754 | } | |
4755 | else if (thr != nullptr && thr->thread_fsm () != nullptr) | |
573269a8 LS |
4756 | should_notify_stop |
4757 | = thr->thread_fsm ()->should_notify_stop (); | |
388a7084 | 4758 | |
d238133d TT |
4759 | if (should_notify_stop) |
4760 | { | |
4761 | /* We may not find an inferior if this was a process exit. */ | |
03acd4d8 | 4762 | if (inf == nullptr || inf->control.stop_soon == NO_STOP_QUIETLY) |
d238133d TT |
4763 | proceeded = normal_stop (); |
4764 | } | |
243a9253 | 4765 | |
3df78436 | 4766 | if (!proceeded && !in_cond_eval) |
d238133d | 4767 | { |
b1a35af2 | 4768 | inferior_event_handler (INF_EXEC_COMPLETE); |
d238133d TT |
4769 | cmd_done = 1; |
4770 | } | |
873657b9 PA |
4771 | |
4772 | /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the | |
4773 | previously selected thread is gone. We have two | |
4774 | choices - switch to no thread selected, or restore the | |
4775 | previously selected thread (now exited). We chose the | |
4776 | later, just because that's what GDB used to do. After | |
4777 | this, "info threads" says "The current thread <Thread | |
4778 | ID 2> has terminated." instead of "No thread | |
4779 | selected.". */ | |
4780 | if (!non_stop | |
4781 | && cmd_done | |
aa563d16 | 4782 | && ecs.ws.kind () != TARGET_WAITKIND_NO_RESUMED) |
873657b9 | 4783 | restore_thread.dont_restore (); |
d238133d TT |
4784 | } |
4785 | } | |
4f8d22e3 | 4786 | |
d238133d TT |
4787 | defer_delete_threads.release (); |
4788 | defer_bpstat_clear.release (); | |
29f49a6a | 4789 | |
d238133d TT |
4790 | /* No error, don't finish the thread states yet. */ |
4791 | finish_state.release (); | |
731f534f | 4792 | |
1192f124 SM |
4793 | disable_commit_resumed.reset_and_commit (); |
4794 | ||
d238133d TT |
4795 | /* This scope is used to ensure that readline callbacks are |
4796 | reinstalled here. */ | |
4797 | } | |
4f8d22e3 | 4798 | |
152a1749 SM |
4799 | /* Handling this event might have caused some inferiors to become prunable. |
4800 | For example, the exit of an inferior that was automatically added. Try | |
4801 | to get rid of them. Keeping those around slows down things linearly. | |
4802 | ||
4803 | Note that this never removes the current inferior. Therefore, call this | |
4804 | after RESTORE_THREAD went out of scope, in case the event inferior (which was | |
4805 | temporarily made the current inferior) is meant to be deleted. | |
4806 | ||
4807 | Call this before all_uis_check_sync_execution_done, so that notifications about | |
4808 | removed inferiors appear before the prompt. */ | |
4809 | prune_inferiors (); | |
4810 | ||
3b12939d PA |
4811 | /* If a UI was in sync execution mode, and now isn't, restore its |
4812 | prompt (a synchronous execution command has finished, and we're | |
4813 | ready for input). */ | |
4814 | all_uis_check_sync_execution_done (); | |
0f641c01 PA |
4815 | |
4816 | if (cmd_done | |
0f641c01 | 4817 | && exec_done_display_p |
00431a78 PA |
4818 | && (inferior_ptid == null_ptid |
4819 | || inferior_thread ()->state != THREAD_RUNNING)) | |
6cb06a8c | 4820 | gdb_printf (_("completed.\n")); |
43ff13b4 JM |
4821 | } |
4822 | ||
29734269 SM |
4823 | /* See infrun.h. */ |
4824 | ||
edb3359d | 4825 | void |
8480a37e | 4826 | set_step_info (thread_info *tp, const frame_info_ptr &frame, |
29734269 | 4827 | struct symtab_and_line sal) |
edb3359d | 4828 | { |
29734269 SM |
4829 | /* This can be removed once this function no longer implicitly relies on the |
4830 | inferior_ptid value. */ | |
4831 | gdb_assert (inferior_ptid == tp->ptid); | |
edb3359d | 4832 | |
16c381f0 JK |
4833 | tp->control.step_frame_id = get_frame_id (frame); |
4834 | tp->control.step_stack_frame_id = get_stack_frame_id (frame); | |
edb3359d DJ |
4835 | |
4836 | tp->current_symtab = sal.symtab; | |
4837 | tp->current_line = sal.line; | |
c8353d68 AB |
4838 | |
4839 | infrun_debug_printf | |
4840 | ("symtab = %s, line = %d, step_frame_id = %s, step_stack_frame_id = %s", | |
b7e07722 PA |
4841 | tp->current_symtab != nullptr ? tp->current_symtab->filename : "<null>", |
4842 | tp->current_line, | |
c8353d68 AB |
4843 | tp->control.step_frame_id.to_string ().c_str (), |
4844 | tp->control.step_stack_frame_id.to_string ().c_str ()); | |
edb3359d DJ |
4845 | } |
4846 | ||
0d1e5fa7 PA |
4847 | /* Clear context switchable stepping state. */ |
4848 | ||
4849 | void | |
4e1c45ea | 4850 | init_thread_stepping_state (struct thread_info *tss) |
0d1e5fa7 | 4851 | { |
7f5ef605 | 4852 | tss->stepped_breakpoint = 0; |
0d1e5fa7 | 4853 | tss->stepping_over_breakpoint = 0; |
963f9c80 | 4854 | tss->stepping_over_watchpoint = 0; |
0d1e5fa7 | 4855 | tss->step_after_step_resume_breakpoint = 0; |
cd0fc7c3 SS |
4856 | } |
4857 | ||
ab1ddbcf | 4858 | /* See infrun.h. */ |
c32c64b7 | 4859 | |
6efcd9a8 | 4860 | void |
5b6d1e4f | 4861 | set_last_target_status (process_stratum_target *target, ptid_t ptid, |
183be222 | 4862 | const target_waitstatus &status) |
c32c64b7 | 4863 | { |
5b6d1e4f | 4864 | target_last_proc_target = target; |
c32c64b7 DE |
4865 | target_last_wait_ptid = ptid; |
4866 | target_last_waitstatus = status; | |
4867 | } | |
4868 | ||
ab1ddbcf | 4869 | /* See infrun.h. */ |
e02bc4cc DS |
4870 | |
4871 | void | |
5b6d1e4f PA |
4872 | get_last_target_status (process_stratum_target **target, ptid_t *ptid, |
4873 | target_waitstatus *status) | |
e02bc4cc | 4874 | { |
5b6d1e4f PA |
4875 | if (target != nullptr) |
4876 | *target = target_last_proc_target; | |
ab1ddbcf PA |
4877 | if (ptid != nullptr) |
4878 | *ptid = target_last_wait_ptid; | |
4879 | if (status != nullptr) | |
4880 | *status = target_last_waitstatus; | |
e02bc4cc DS |
4881 | } |
4882 | ||
ab1ddbcf PA |
4883 | /* See infrun.h. */ |
4884 | ||
ac264b3b MS |
4885 | void |
4886 | nullify_last_target_wait_ptid (void) | |
4887 | { | |
5b6d1e4f | 4888 | target_last_proc_target = nullptr; |
ac264b3b | 4889 | target_last_wait_ptid = minus_one_ptid; |
ab1ddbcf | 4890 | target_last_waitstatus = {}; |
ac264b3b MS |
4891 | } |
4892 | ||
dcf4fbde | 4893 | /* Switch thread contexts. */ |
dd80620e MS |
4894 | |
4895 | static void | |
00431a78 | 4896 | context_switch (execution_control_state *ecs) |
dd80620e | 4897 | { |
1eb8556f | 4898 | if (ecs->ptid != inferior_ptid |
5b6d1e4f PA |
4899 | && (inferior_ptid == null_ptid |
4900 | || ecs->event_thread != inferior_thread ())) | |
fd48f117 | 4901 | { |
1eb8556f | 4902 | infrun_debug_printf ("Switching context from %s to %s", |
0fab7955 SM |
4903 | inferior_ptid.to_string ().c_str (), |
4904 | ecs->ptid.to_string ().c_str ()); | |
fd48f117 DJ |
4905 | } |
4906 | ||
00431a78 | 4907 | switch_to_thread (ecs->event_thread); |
dd80620e MS |
4908 | } |
4909 | ||
d8dd4d5f PA |
4910 | /* If the target can't tell whether we've hit breakpoints |
4911 | (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP, | |
4912 | check whether that could have been caused by a breakpoint. If so, | |
4913 | adjust the PC, per gdbarch_decr_pc_after_break. */ | |
4914 | ||
4fa8626c | 4915 | static void |
d8dd4d5f | 4916 | adjust_pc_after_break (struct thread_info *thread, |
c272a98c | 4917 | const target_waitstatus &ws) |
4fa8626c | 4918 | { |
24a73cce UW |
4919 | struct regcache *regcache; |
4920 | struct gdbarch *gdbarch; | |
118e6252 | 4921 | CORE_ADDR breakpoint_pc, decr_pc; |
4fa8626c | 4922 | |
4fa8626c DJ |
4923 | /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If |
4924 | we aren't, just return. | |
9709f61c DJ |
4925 | |
4926 | We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not | |
b798847d UW |
4927 | affected by gdbarch_decr_pc_after_break. Other waitkinds which are |
4928 | implemented by software breakpoints should be handled through the normal | |
4929 | breakpoint layer. | |
8fb3e588 | 4930 | |
4fa8626c DJ |
4931 | NOTE drow/2004-01-31: On some targets, breakpoints may generate |
4932 | different signals (SIGILL or SIGEMT for instance), but it is less | |
4933 | clear where the PC is pointing afterwards. It may not match | |
b798847d UW |
4934 | gdbarch_decr_pc_after_break. I don't know any specific target that |
4935 | generates these signals at breakpoints (the code has been in GDB since at | |
4936 | least 1992) so I can not guess how to handle them here. | |
8fb3e588 | 4937 | |
e6cf7916 UW |
4938 | In earlier versions of GDB, a target with |
4939 | gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a | |
b798847d UW |
4940 | watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any |
4941 | target with both of these set in GDB history, and it seems unlikely to be | |
4942 | correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */ | |
4fa8626c | 4943 | |
c272a98c | 4944 | if (ws.kind () != TARGET_WAITKIND_STOPPED) |
4fa8626c DJ |
4945 | return; |
4946 | ||
c272a98c | 4947 | if (ws.sig () != GDB_SIGNAL_TRAP) |
4fa8626c DJ |
4948 | return; |
4949 | ||
4058b839 PA |
4950 | /* In reverse execution, when a breakpoint is hit, the instruction |
4951 | under it has already been de-executed. The reported PC always | |
4952 | points at the breakpoint address, so adjusting it further would | |
4953 | be wrong. E.g., consider this case on a decr_pc_after_break == 1 | |
4954 | architecture: | |
4955 | ||
4956 | B1 0x08000000 : INSN1 | |
4957 | B2 0x08000001 : INSN2 | |
4958 | 0x08000002 : INSN3 | |
4959 | PC -> 0x08000003 : INSN4 | |
4960 | ||
4961 | Say you're stopped at 0x08000003 as above. Reverse continuing | |
4962 | from that point should hit B2 as below. Reading the PC when the | |
4963 | SIGTRAP is reported should read 0x08000001 and INSN2 should have | |
4964 | been de-executed already. | |
4965 | ||
4966 | B1 0x08000000 : INSN1 | |
4967 | B2 PC -> 0x08000001 : INSN2 | |
4968 | 0x08000002 : INSN3 | |
4969 | 0x08000003 : INSN4 | |
4970 | ||
4971 | We can't apply the same logic as for forward execution, because | |
4972 | we would wrongly adjust the PC to 0x08000000, since there's a | |
4973 | breakpoint at PC - 1. We'd then report a hit on B1, although | |
4974 | INSN1 hadn't been de-executed yet. Doing nothing is the correct | |
ac51afb5 | 4975 | behavior. */ |
4058b839 PA |
4976 | if (execution_direction == EXEC_REVERSE) |
4977 | return; | |
4978 | ||
1cf4d951 PA |
4979 | /* If the target can tell whether the thread hit a SW breakpoint, |
4980 | trust it. Targets that can tell also adjust the PC | |
4981 | themselves. */ | |
4982 | if (target_supports_stopped_by_sw_breakpoint ()) | |
4983 | return; | |
4984 | ||
4985 | /* Note that relying on whether a breakpoint is planted in memory to | |
4986 | determine this can fail. E.g,. the breakpoint could have been | |
4987 | removed since. Or the thread could have been told to step an | |
4988 | instruction the size of a breakpoint instruction, and only | |
4989 | _after_ was a breakpoint inserted at its address. */ | |
4990 | ||
24a73cce UW |
4991 | /* If this target does not decrement the PC after breakpoints, then |
4992 | we have nothing to do. */ | |
00431a78 | 4993 | regcache = get_thread_regcache (thread); |
ac7936df | 4994 | gdbarch = regcache->arch (); |
118e6252 | 4995 | |
527a273a | 4996 | decr_pc = gdbarch_decr_pc_after_break (gdbarch); |
118e6252 | 4997 | if (decr_pc == 0) |
24a73cce UW |
4998 | return; |
4999 | ||
f9582a22 | 5000 | const address_space *aspace = thread->inf->aspace.get (); |
6c95b8df | 5001 | |
8aad930b AC |
5002 | /* Find the location where (if we've hit a breakpoint) the |
5003 | breakpoint would be. */ | |
118e6252 | 5004 | breakpoint_pc = regcache_read_pc (regcache) - decr_pc; |
8aad930b | 5005 | |
1cf4d951 PA |
5006 | /* If the target can't tell whether a software breakpoint triggered, |
5007 | fallback to figuring it out based on breakpoints we think were | |
5008 | inserted in the target, and on whether the thread was stepped or | |
5009 | continued. */ | |
5010 | ||
1c5cfe86 PA |
5011 | /* Check whether there actually is a software breakpoint inserted at |
5012 | that location. | |
5013 | ||
5014 | If in non-stop mode, a race condition is possible where we've | |
5015 | removed a breakpoint, but stop events for that breakpoint were | |
5016 | already queued and arrive later. To suppress those spurious | |
5017 | SIGTRAPs, we keep a list of such breakpoint locations for a bit, | |
1cf4d951 PA |
5018 | and retire them after a number of stop events are reported. Note |
5019 | this is an heuristic and can thus get confused. The real fix is | |
5020 | to get the "stopped by SW BP and needs adjustment" info out of | |
5021 | the target/kernel (and thus never reach here; see above). */ | |
6c95b8df | 5022 | if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc) |
fbea99ea PA |
5023 | || (target_is_non_stop_p () |
5024 | && moribund_breakpoint_here_p (aspace, breakpoint_pc))) | |
8aad930b | 5025 | { |
6b09f134 | 5026 | std::optional<scoped_restore_tmpl<int>> restore_operation_disable; |
abbb1732 | 5027 | |
8213266a | 5028 | if (record_full_is_used ()) |
07036511 TT |
5029 | restore_operation_disable.emplace |
5030 | (record_full_gdb_operation_disable_set ()); | |
96429cc8 | 5031 | |
1c0fdd0e UW |
5032 | /* When using hardware single-step, a SIGTRAP is reported for both |
5033 | a completed single-step and a software breakpoint. Need to | |
5034 | differentiate between the two, as the latter needs adjusting | |
5035 | but the former does not. | |
5036 | ||
5037 | The SIGTRAP can be due to a completed hardware single-step only if | |
5038 | - we didn't insert software single-step breakpoints | |
1c0fdd0e UW |
5039 | - this thread is currently being stepped |
5040 | ||
5041 | If any of these events did not occur, we must have stopped due | |
5042 | to hitting a software breakpoint, and have to back up to the | |
5043 | breakpoint address. | |
5044 | ||
5045 | As a special case, we could have hardware single-stepped a | |
5046 | software breakpoint. In this case (prev_pc == breakpoint_pc), | |
5047 | we also need to back up to the breakpoint address. */ | |
5048 | ||
d8dd4d5f PA |
5049 | if (thread_has_single_step_breakpoints_set (thread) |
5050 | || !currently_stepping (thread) | |
5051 | || (thread->stepped_breakpoint | |
5052 | && thread->prev_pc == breakpoint_pc)) | |
515630c5 | 5053 | regcache_write_pc (regcache, breakpoint_pc); |
8aad930b | 5054 | } |
4fa8626c DJ |
5055 | } |
5056 | ||
c4464ade | 5057 | static bool |
8480a37e | 5058 | stepped_in_from (const frame_info_ptr &initial_frame, frame_id step_frame_id) |
edb3359d | 5059 | { |
8480a37e SM |
5060 | frame_info_ptr frame = initial_frame; |
5061 | ||
edb3359d | 5062 | for (frame = get_prev_frame (frame); |
03acd4d8 | 5063 | frame != nullptr; |
edb3359d DJ |
5064 | frame = get_prev_frame (frame)) |
5065 | { | |
a0cbd650 | 5066 | if (get_frame_id (frame) == step_frame_id) |
c4464ade SM |
5067 | return true; |
5068 | ||
edb3359d DJ |
5069 | if (get_frame_type (frame) != INLINE_FRAME) |
5070 | break; | |
5071 | } | |
5072 | ||
c4464ade | 5073 | return false; |
edb3359d DJ |
5074 | } |
5075 | ||
4a4c04f1 BE |
5076 | /* Look for an inline frame that is marked for skip. |
5077 | If PREV_FRAME is TRUE start at the previous frame, | |
5078 | otherwise start at the current frame. Stop at the | |
5079 | first non-inline frame, or at the frame where the | |
5080 | step started. */ | |
5081 | ||
5082 | static bool | |
5083 | inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp) | |
5084 | { | |
bd2b40ac | 5085 | frame_info_ptr frame = get_current_frame (); |
4a4c04f1 BE |
5086 | |
5087 | if (prev_frame) | |
5088 | frame = get_prev_frame (frame); | |
5089 | ||
03acd4d8 | 5090 | for (; frame != nullptr; frame = get_prev_frame (frame)) |
4a4c04f1 | 5091 | { |
03acd4d8 | 5092 | const char *fn = nullptr; |
4a4c04f1 BE |
5093 | symtab_and_line sal; |
5094 | struct symbol *sym; | |
5095 | ||
a0cbd650 | 5096 | if (get_frame_id (frame) == tp->control.step_frame_id) |
4a4c04f1 BE |
5097 | break; |
5098 | if (get_frame_type (frame) != INLINE_FRAME) | |
5099 | break; | |
5100 | ||
5101 | sal = find_frame_sal (frame); | |
5102 | sym = get_frame_function (frame); | |
5103 | ||
03acd4d8 | 5104 | if (sym != nullptr) |
4a4c04f1 BE |
5105 | fn = sym->print_name (); |
5106 | ||
5107 | if (sal.line != 0 | |
5108 | && function_name_is_marked_for_skip (fn, sal)) | |
5109 | return true; | |
5110 | } | |
5111 | ||
5112 | return false; | |
5113 | } | |
5114 | ||
c65d6b55 PA |
5115 | /* If the event thread has the stop requested flag set, pretend it |
5116 | stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to | |
5117 | target_stop). */ | |
5118 | ||
5119 | static bool | |
5120 | handle_stop_requested (struct execution_control_state *ecs) | |
5121 | { | |
5122 | if (ecs->event_thread->stop_requested) | |
5123 | { | |
183be222 | 5124 | ecs->ws.set_stopped (GDB_SIGNAL_0); |
c65d6b55 PA |
5125 | handle_signal_stop (ecs); |
5126 | return true; | |
5127 | } | |
5128 | return false; | |
5129 | } | |
5130 | ||
a96d9b2e | 5131 | /* Auxiliary function that handles syscall entry/return events. |
c4464ade SM |
5132 | It returns true if the inferior should keep going (and GDB |
5133 | should ignore the event), or false if the event deserves to be | |
a96d9b2e | 5134 | processed. */ |
ca2163eb | 5135 | |
c4464ade | 5136 | static bool |
ca2163eb | 5137 | handle_syscall_event (struct execution_control_state *ecs) |
a96d9b2e | 5138 | { |
ca2163eb | 5139 | struct regcache *regcache; |
ca2163eb PA |
5140 | int syscall_number; |
5141 | ||
00431a78 | 5142 | context_switch (ecs); |
ca2163eb | 5143 | |
00431a78 | 5144 | regcache = get_thread_regcache (ecs->event_thread); |
183be222 | 5145 | syscall_number = ecs->ws.syscall_number (); |
1edb66d8 | 5146 | ecs->event_thread->set_stop_pc (regcache_read_pc (regcache)); |
ca2163eb | 5147 | |
f087eb27 | 5148 | if (catch_syscall_enabled () |
9fe3819e | 5149 | && catching_syscall_number (syscall_number)) |
a96d9b2e | 5150 | { |
1eb8556f | 5151 | infrun_debug_printf ("syscall number=%d", syscall_number); |
a96d9b2e | 5152 | |
16c381f0 | 5153 | ecs->event_thread->control.stop_bpstat |
f9582a22 | 5154 | = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (), |
d37e0847 PA |
5155 | ecs->event_thread->stop_pc (), |
5156 | ecs->event_thread, ecs->ws); | |
ab04a2af | 5157 | |
c65d6b55 | 5158 | if (handle_stop_requested (ecs)) |
c4464ade | 5159 | return false; |
c65d6b55 | 5160 | |
ce12b012 | 5161 | if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat)) |
ca2163eb PA |
5162 | { |
5163 | /* Catchpoint hit. */ | |
c4464ade | 5164 | return false; |
ca2163eb | 5165 | } |
a96d9b2e | 5166 | } |
ca2163eb | 5167 | |
c65d6b55 | 5168 | if (handle_stop_requested (ecs)) |
c4464ade | 5169 | return false; |
c65d6b55 | 5170 | |
ca2163eb | 5171 | /* If no catchpoint triggered for this, then keep going. */ |
ca2163eb | 5172 | keep_going (ecs); |
c4464ade SM |
5173 | |
5174 | return true; | |
a96d9b2e SDJ |
5175 | } |
5176 | ||
7e324e48 GB |
5177 | /* Lazily fill in the execution_control_state's stop_func_* fields. */ |
5178 | ||
5179 | static void | |
5180 | fill_in_stop_func (struct gdbarch *gdbarch, | |
5181 | struct execution_control_state *ecs) | |
5182 | { | |
5183 | if (!ecs->stop_func_filled_in) | |
5184 | { | |
98a617f8 | 5185 | const block *block; |
fe830662 | 5186 | const general_symbol_info *gsi; |
98a617f8 | 5187 | |
7e324e48 GB |
5188 | /* Don't care about return value; stop_func_start and stop_func_name |
5189 | will both be 0 if it doesn't work. */ | |
1edb66d8 | 5190 | find_pc_partial_function_sym (ecs->event_thread->stop_pc (), |
fe830662 TT |
5191 | &gsi, |
5192 | &ecs->stop_func_start, | |
5193 | &ecs->stop_func_end, | |
5194 | &block); | |
5195 | ecs->stop_func_name = gsi == nullptr ? nullptr : gsi->print_name (); | |
98a617f8 KB |
5196 | |
5197 | /* The call to find_pc_partial_function, above, will set | |
5198 | stop_func_start and stop_func_end to the start and end | |
5199 | of the range containing the stop pc. If this range | |
5200 | contains the entry pc for the block (which is always the | |
5201 | case for contiguous blocks), advance stop_func_start past | |
5202 | the function's start offset and entrypoint. Note that | |
5203 | stop_func_start is NOT advanced when in a range of a | |
5204 | non-contiguous block that does not contain the entry pc. */ | |
5205 | if (block != nullptr | |
6395b628 SM |
5206 | && ecs->stop_func_start <= block->entry_pc () |
5207 | && block->entry_pc () < ecs->stop_func_end) | |
98a617f8 KB |
5208 | { |
5209 | ecs->stop_func_start | |
5210 | += gdbarch_deprecated_function_start_offset (gdbarch); | |
5211 | ||
2a8339b7 CL |
5212 | /* PowerPC functions have a Local Entry Point (LEP) and a Global |
5213 | Entry Point (GEP). There is only one Entry Point (GEP = LEP) for | |
5214 | other architectures. */ | |
5215 | ecs->stop_func_alt_start = ecs->stop_func_start; | |
5216 | ||
98a617f8 KB |
5217 | if (gdbarch_skip_entrypoint_p (gdbarch)) |
5218 | ecs->stop_func_start | |
5219 | = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start); | |
5220 | } | |
591a12a1 | 5221 | |
7e324e48 GB |
5222 | ecs->stop_func_filled_in = 1; |
5223 | } | |
5224 | } | |
5225 | ||
4f5d7f63 | 5226 | |
00431a78 | 5227 | /* Return the STOP_SOON field of the inferior pointed at by ECS. */ |
4f5d7f63 PA |
5228 | |
5229 | static enum stop_kind | |
00431a78 | 5230 | get_inferior_stop_soon (execution_control_state *ecs) |
4f5d7f63 | 5231 | { |
5b6d1e4f | 5232 | struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid); |
4f5d7f63 | 5233 | |
03acd4d8 | 5234 | gdb_assert (inf != nullptr); |
4f5d7f63 PA |
5235 | return inf->control.stop_soon; |
5236 | } | |
5237 | ||
5b6d1e4f PA |
5238 | /* Poll for one event out of the current target. Store the resulting |
5239 | waitstatus in WS, and return the event ptid. Does not block. */ | |
372316f1 PA |
5240 | |
5241 | static ptid_t | |
5b6d1e4f | 5242 | poll_one_curr_target (struct target_waitstatus *ws) |
372316f1 PA |
5243 | { |
5244 | ptid_t event_ptid; | |
372316f1 PA |
5245 | |
5246 | overlay_cache_invalid = 1; | |
5247 | ||
5248 | /* Flush target cache before starting to handle each event. | |
5249 | Target was running and cache could be stale. This is just a | |
5250 | heuristic. Running threads may modify target memory, but we | |
5251 | don't get any event. */ | |
41336620 | 5252 | target_dcache_invalidate (current_program_space->aspace); |
372316f1 | 5253 | |
fb85cece | 5254 | event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG); |
372316f1 PA |
5255 | |
5256 | if (debug_infrun) | |
3559d599 TBA |
5257 | print_target_wait_results (minus_one_ptid, event_ptid, *ws, |
5258 | current_inferior ()->process_target ()); | |
372316f1 PA |
5259 | |
5260 | return event_ptid; | |
5261 | } | |
5262 | ||
5b6d1e4f PA |
5263 | /* Wait for one event out of any target. */ |
5264 | ||
5265 | static wait_one_event | |
5266 | wait_one () | |
5267 | { | |
5268 | while (1) | |
5269 | { | |
5270 | for (inferior *inf : all_inferiors ()) | |
5271 | { | |
5272 | process_stratum_target *target = inf->process_target (); | |
03acd4d8 | 5273 | if (target == nullptr |
5b6d1e4f PA |
5274 | || !target->is_async_p () |
5275 | || !target->threads_executing) | |
5276 | continue; | |
5277 | ||
5278 | switch_to_inferior_no_thread (inf); | |
5279 | ||
5280 | wait_one_event event; | |
5281 | event.target = target; | |
5282 | event.ptid = poll_one_curr_target (&event.ws); | |
5283 | ||
183be222 | 5284 | if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED) |
5b6d1e4f PA |
5285 | { |
5286 | /* If nothing is resumed, remove the target from the | |
5287 | event loop. */ | |
4a570176 | 5288 | target_async (false); |
5b6d1e4f | 5289 | } |
183be222 | 5290 | else if (event.ws.kind () != TARGET_WAITKIND_IGNORE) |
5b6d1e4f PA |
5291 | return event; |
5292 | } | |
5293 | ||
5294 | /* Block waiting for some event. */ | |
5295 | ||
5296 | fd_set readfds; | |
5297 | int nfds = 0; | |
5298 | ||
5299 | FD_ZERO (&readfds); | |
5300 | ||
5301 | for (inferior *inf : all_inferiors ()) | |
5302 | { | |
5303 | process_stratum_target *target = inf->process_target (); | |
03acd4d8 | 5304 | if (target == nullptr |
5b6d1e4f PA |
5305 | || !target->is_async_p () |
5306 | || !target->threads_executing) | |
5307 | continue; | |
5308 | ||
5309 | int fd = target->async_wait_fd (); | |
5310 | FD_SET (fd, &readfds); | |
5311 | if (nfds <= fd) | |
5312 | nfds = fd + 1; | |
5313 | } | |
5314 | ||
5315 | if (nfds == 0) | |
5316 | { | |
5317 | /* No waitable targets left. All must be stopped. */ | |
d828dbed PA |
5318 | infrun_debug_printf ("no waitable targets left"); |
5319 | ||
183be222 SM |
5320 | target_waitstatus ws; |
5321 | ws.set_no_resumed (); | |
03acd4d8 | 5322 | return {nullptr, minus_one_ptid, std::move (ws)}; |
5b6d1e4f PA |
5323 | } |
5324 | ||
5325 | QUIT; | |
5326 | ||
03acd4d8 | 5327 | int numfds = interruptible_select (nfds, &readfds, 0, nullptr, 0); |
5b6d1e4f PA |
5328 | if (numfds < 0) |
5329 | { | |
5330 | if (errno == EINTR) | |
5331 | continue; | |
5332 | else | |
5333 | perror_with_name ("interruptible_select"); | |
5334 | } | |
5335 | } | |
5336 | } | |
5337 | ||
372316f1 PA |
5338 | /* Save the thread's event and stop reason to process it later. */ |
5339 | ||
5340 | static void | |
c272a98c | 5341 | save_waitstatus (struct thread_info *tp, const target_waitstatus &ws) |
372316f1 | 5342 | { |
96bbe3ef | 5343 | infrun_debug_printf ("saving status %s for %s", |
c272a98c | 5344 | ws.to_string ().c_str (), |
96bbe3ef | 5345 | tp->ptid.to_string ().c_str ()); |
372316f1 PA |
5346 | |
5347 | /* Record for later. */ | |
c272a98c | 5348 | tp->set_pending_waitstatus (ws); |
372316f1 | 5349 | |
c272a98c SM |
5350 | if (ws.kind () == TARGET_WAITKIND_STOPPED |
5351 | && ws.sig () == GDB_SIGNAL_TRAP) | |
372316f1 | 5352 | { |
89ba430c | 5353 | struct regcache *regcache = get_thread_regcache (tp); |
f9582a22 | 5354 | const address_space *aspace = tp->inf->aspace.get (); |
372316f1 PA |
5355 | CORE_ADDR pc = regcache_read_pc (regcache); |
5356 | ||
c272a98c | 5357 | adjust_pc_after_break (tp, tp->pending_waitstatus ()); |
372316f1 | 5358 | |
18493a00 PA |
5359 | scoped_restore_current_thread restore_thread; |
5360 | switch_to_thread (tp); | |
5361 | ||
5362 | if (target_stopped_by_watchpoint ()) | |
1edb66d8 | 5363 | tp->set_stop_reason (TARGET_STOPPED_BY_WATCHPOINT); |
372316f1 | 5364 | else if (target_supports_stopped_by_sw_breakpoint () |
18493a00 | 5365 | && target_stopped_by_sw_breakpoint ()) |
1edb66d8 | 5366 | tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT); |
372316f1 | 5367 | else if (target_supports_stopped_by_hw_breakpoint () |
18493a00 | 5368 | && target_stopped_by_hw_breakpoint ()) |
1edb66d8 | 5369 | tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT); |
372316f1 | 5370 | else if (!target_supports_stopped_by_hw_breakpoint () |
1edb66d8 SM |
5371 | && hardware_breakpoint_inserted_here_p (aspace, pc)) |
5372 | tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT); | |
372316f1 | 5373 | else if (!target_supports_stopped_by_sw_breakpoint () |
1edb66d8 SM |
5374 | && software_breakpoint_inserted_here_p (aspace, pc)) |
5375 | tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT); | |
372316f1 PA |
5376 | else if (!thread_has_single_step_breakpoints_set (tp) |
5377 | && currently_stepping (tp)) | |
1edb66d8 | 5378 | tp->set_stop_reason (TARGET_STOPPED_BY_SINGLE_STEP); |
372316f1 PA |
5379 | } |
5380 | } | |
5381 | ||
293b3ebc TBA |
5382 | /* Mark the non-executing threads accordingly. In all-stop, all |
5383 | threads of all processes are stopped when we get any event | |
5384 | reported. In non-stop mode, only the event thread stops. */ | |
5385 | ||
5386 | static void | |
5387 | mark_non_executing_threads (process_stratum_target *target, | |
5388 | ptid_t event_ptid, | |
183be222 | 5389 | const target_waitstatus &ws) |
293b3ebc TBA |
5390 | { |
5391 | ptid_t mark_ptid; | |
5392 | ||
5393 | if (!target_is_non_stop_p ()) | |
5394 | mark_ptid = minus_one_ptid; | |
183be222 SM |
5395 | else if (ws.kind () == TARGET_WAITKIND_SIGNALLED |
5396 | || ws.kind () == TARGET_WAITKIND_EXITED) | |
293b3ebc TBA |
5397 | { |
5398 | /* If we're handling a process exit in non-stop mode, even | |
5399 | though threads haven't been deleted yet, one would think | |
5400 | that there is nothing to do, as threads of the dead process | |
5401 | will be soon deleted, and threads of any other process were | |
5402 | left running. However, on some targets, threads survive a | |
5403 | process exit event. E.g., for the "checkpoint" command, | |
5404 | when the current checkpoint/fork exits, linux-fork.c | |
5405 | automatically switches to another fork from within | |
5406 | target_mourn_inferior, by associating the same | |
5407 | inferior/thread to another fork. We haven't mourned yet at | |
5408 | this point, but we must mark any threads left in the | |
5409 | process as not-executing so that finish_thread_state marks | |
5410 | them stopped (in the user's perspective) if/when we present | |
5411 | the stop to the user. */ | |
5412 | mark_ptid = ptid_t (event_ptid.pid ()); | |
5413 | } | |
5414 | else | |
5415 | mark_ptid = event_ptid; | |
5416 | ||
5417 | set_executing (target, mark_ptid, false); | |
5418 | ||
5419 | /* Likewise the resumed flag. */ | |
5420 | set_resumed (target, mark_ptid, false); | |
5421 | } | |
5422 | ||
d758e62c PA |
5423 | /* Handle one event after stopping threads. If the eventing thread |
5424 | reports back any interesting event, we leave it pending. If the | |
5425 | eventing thread was in the middle of a displaced step, we | |
8ff53139 PA |
5426 | cancel/finish it, and unless the thread's inferior is being |
5427 | detached, put the thread back in the step-over chain. Returns true | |
5428 | if there are no resumed threads left in the target (thus there's no | |
5429 | point in waiting further), false otherwise. */ | |
d758e62c PA |
5430 | |
5431 | static bool | |
5432 | handle_one (const wait_one_event &event) | |
5433 | { | |
5434 | infrun_debug_printf | |
7dca2ea7 | 5435 | ("%s %s", event.ws.to_string ().c_str (), |
0fab7955 | 5436 | event.ptid.to_string ().c_str ()); |
d758e62c | 5437 | |
183be222 | 5438 | if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED) |
d758e62c PA |
5439 | { |
5440 | /* All resumed threads exited. */ | |
5441 | return true; | |
5442 | } | |
183be222 SM |
5443 | else if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED |
5444 | || event.ws.kind () == TARGET_WAITKIND_EXITED | |
5445 | || event.ws.kind () == TARGET_WAITKIND_SIGNALLED) | |
d758e62c PA |
5446 | { |
5447 | /* One thread/process exited/signalled. */ | |
5448 | ||
5449 | thread_info *t = nullptr; | |
5450 | ||
5451 | /* The target may have reported just a pid. If so, try | |
5452 | the first non-exited thread. */ | |
5453 | if (event.ptid.is_pid ()) | |
5454 | { | |
5455 | int pid = event.ptid.pid (); | |
5456 | inferior *inf = find_inferior_pid (event.target, pid); | |
5457 | for (thread_info *tp : inf->non_exited_threads ()) | |
5458 | { | |
5459 | t = tp; | |
5460 | break; | |
5461 | } | |
5462 | ||
5463 | /* If there is no available thread, the event would | |
5464 | have to be appended to a per-inferior event list, | |
5465 | which does not exist (and if it did, we'd have | |
5466 | to adjust run control command to be able to | |
5467 | resume such an inferior). We assert here instead | |
5468 | of going into an infinite loop. */ | |
5469 | gdb_assert (t != nullptr); | |
5470 | ||
5471 | infrun_debug_printf | |
0fab7955 | 5472 | ("using %s", t->ptid.to_string ().c_str ()); |
d758e62c PA |
5473 | } |
5474 | else | |
5475 | { | |
9213a6d7 | 5476 | t = event.target->find_thread (event.ptid); |
d758e62c PA |
5477 | /* Check if this is the first time we see this thread. |
5478 | Don't bother adding if it individually exited. */ | |
5479 | if (t == nullptr | |
183be222 | 5480 | && event.ws.kind () != TARGET_WAITKIND_THREAD_EXITED) |
d758e62c PA |
5481 | t = add_thread (event.target, event.ptid); |
5482 | } | |
5483 | ||
5484 | if (t != nullptr) | |
5485 | { | |
5486 | /* Set the threads as non-executing to avoid | |
5487 | another stop attempt on them. */ | |
5488 | switch_to_thread_no_regs (t); | |
5489 | mark_non_executing_threads (event.target, event.ptid, | |
5490 | event.ws); | |
c272a98c | 5491 | save_waitstatus (t, event.ws); |
d758e62c | 5492 | t->stop_requested = false; |
21d48304 PA |
5493 | |
5494 | if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED) | |
5495 | { | |
5496 | if (displaced_step_finish (t, event.ws) | |
5497 | != DISPLACED_STEP_FINISH_STATUS_OK) | |
5498 | { | |
5499 | gdb_assert_not_reached ("displaced_step_finish on " | |
5500 | "exited thread failed"); | |
5501 | } | |
5502 | } | |
d758e62c PA |
5503 | } |
5504 | } | |
5505 | else | |
5506 | { | |
9213a6d7 | 5507 | thread_info *t = event.target->find_thread (event.ptid); |
03acd4d8 | 5508 | if (t == nullptr) |
d758e62c PA |
5509 | t = add_thread (event.target, event.ptid); |
5510 | ||
956bbb55 | 5511 | t->stop_requested = false; |
611841bb | 5512 | t->set_executing (false); |
7846f3aa | 5513 | t->set_resumed (false); |
d758e62c PA |
5514 | t->control.may_range_step = 0; |
5515 | ||
5516 | /* This may be the first time we see the inferior report | |
5517 | a stop. */ | |
3db13541 | 5518 | if (t->inf->needs_setup) |
d758e62c PA |
5519 | { |
5520 | switch_to_thread_no_regs (t); | |
5521 | setup_inferior (0); | |
5522 | } | |
5523 | ||
183be222 SM |
5524 | if (event.ws.kind () == TARGET_WAITKIND_STOPPED |
5525 | && event.ws.sig () == GDB_SIGNAL_0) | |
d758e62c PA |
5526 | { |
5527 | /* We caught the event that we intended to catch, so | |
1edb66d8 | 5528 | there's no event to save as pending. */ |
d758e62c | 5529 | |
58c01087 | 5530 | if (displaced_step_finish (t, event.ws) |
d758e62c PA |
5531 | == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED) |
5532 | { | |
5533 | /* Add it back to the step-over queue. */ | |
5534 | infrun_debug_printf | |
5535 | ("displaced-step of %s canceled", | |
0fab7955 | 5536 | t->ptid.to_string ().c_str ()); |
d758e62c PA |
5537 | |
5538 | t->control.trap_expected = 0; | |
8ff53139 PA |
5539 | if (!t->inf->detaching) |
5540 | global_thread_step_over_chain_enqueue (t); | |
d758e62c PA |
5541 | } |
5542 | } | |
5543 | else | |
5544 | { | |
d758e62c PA |
5545 | struct regcache *regcache; |
5546 | ||
5547 | infrun_debug_printf | |
96bbe3ef | 5548 | ("target_wait %s, saving status for %s", |
7dca2ea7 | 5549 | event.ws.to_string ().c_str (), |
96bbe3ef | 5550 | t->ptid.to_string ().c_str ()); |
d758e62c PA |
5551 | |
5552 | /* Record for later. */ | |
c272a98c | 5553 | save_waitstatus (t, event.ws); |
d758e62c | 5554 | |
58c01087 | 5555 | if (displaced_step_finish (t, event.ws) |
d758e62c PA |
5556 | == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED) |
5557 | { | |
5558 | /* Add it back to the step-over queue. */ | |
5559 | t->control.trap_expected = 0; | |
8ff53139 PA |
5560 | if (!t->inf->detaching) |
5561 | global_thread_step_over_chain_enqueue (t); | |
d758e62c PA |
5562 | } |
5563 | ||
5564 | regcache = get_thread_regcache (t); | |
1edb66d8 | 5565 | t->set_stop_pc (regcache_read_pc (regcache)); |
d758e62c PA |
5566 | |
5567 | infrun_debug_printf ("saved stop_pc=%s for %s " | |
5568 | "(currently_stepping=%d)", | |
99d9c3b9 SM |
5569 | paddress (current_inferior ()->arch (), |
5570 | t->stop_pc ()), | |
0fab7955 | 5571 | t->ptid.to_string ().c_str (), |
d758e62c PA |
5572 | currently_stepping (t)); |
5573 | } | |
5574 | } | |
5575 | ||
5576 | return false; | |
5577 | } | |
5578 | ||
d828dbed PA |
5579 | /* Helper for stop_all_threads. wait_one waits for events until it |
5580 | sees a TARGET_WAITKIND_NO_RESUMED event. When it sees one, it | |
5581 | disables target_async for the target to stop waiting for events | |
5582 | from it. TARGET_WAITKIND_NO_RESUMED can be delayed though, | |
5583 | consider, debugging against gdbserver: | |
5584 | ||
5585 | #1 - Threads 1-5 are running, and thread 1 hits a breakpoint. | |
5586 | ||
5587 | #2 - gdb processes the breakpoint hit for thread 1, stops all | |
5588 | threads, and steps thread 1 over the breakpoint. while | |
5589 | stopping threads, some other threads reported interesting | |
5590 | events, which were left pending in the thread's objects | |
5591 | (infrun's queue). | |
5592 | ||
5593 | #2 - Thread 1 exits (it stepped an exit syscall), and gdbserver | |
5594 | reports the thread exit for thread 1. The event ends up in | |
5595 | remote's stop reply queue. | |
5596 | ||
5597 | #3 - That was the last resumed thread, so gdbserver reports | |
5598 | no-resumed, and that event also ends up in remote's stop | |
5599 | reply queue, queued after the thread exit from #2. | |
5600 | ||
5601 | #4 - gdb processes the thread exit event, which finishes the | |
5602 | step-over, and so gdb restarts all threads (threads with | |
5603 | pending events are left marked resumed, but aren't set | |
5604 | executing). The no-resumed event is still left pending in | |
5605 | the remote stop reply queue. | |
5606 | ||
5607 | #5 - Since there are now resumed threads with pending breakpoint | |
5608 | hits, gdb picks one at random to process next. | |
5609 | ||
5610 | #5 - gdb picks the breakpoint hit for thread 2 this time, and that | |
5611 | breakpoint also needs to be stepped over, so gdb stops all | |
5612 | threads again. | |
5613 | ||
5614 | #6 - stop_all_threads counts number of expected stops and calls | |
5615 | wait_one once for each. | |
5616 | ||
5617 | #7 - The first wait_one call collects the no-resumed event from #3 | |
5618 | above. | |
5619 | ||
5620 | #9 - Seeing the no-resumed event, wait_one disables target async | |
5621 | for the remote target, to stop waiting for events from it. | |
5622 | wait_one from here on always return no-resumed directly | |
5623 | without reaching the target. | |
5624 | ||
5625 | #10 - stop_all_threads still hasn't seen all the stops it expects, | |
5626 | so it does another pass. | |
5627 | ||
5628 | #11 - Since the remote target is not async (disabled in #9), | |
5629 | wait_one doesn't wait on it, so it won't see the expected | |
5630 | stops, and instead returns no-resumed directly. | |
5631 | ||
5632 | #12 - stop_all_threads still haven't seen all the stops, so it | |
5633 | does another pass. goto #11, looping forever. | |
5634 | ||
5635 | To handle this, we explicitly (re-)enable target async on all | |
5636 | targets that can async every time stop_all_threads goes wait for | |
5637 | the expected stops. */ | |
5638 | ||
5639 | static void | |
5640 | reenable_target_async () | |
5641 | { | |
5642 | for (inferior *inf : all_inferiors ()) | |
5643 | { | |
5644 | process_stratum_target *target = inf->process_target (); | |
5645 | if (target != nullptr | |
5646 | && target->threads_executing | |
5647 | && target->can_async_p () | |
5648 | && !target->is_async_p ()) | |
5649 | { | |
5650 | switch_to_inferior_no_thread (inf); | |
5651 | target_async (1); | |
5652 | } | |
5653 | } | |
5654 | } | |
5655 | ||
6efcd9a8 | 5656 | /* See infrun.h. */ |
372316f1 | 5657 | |
6efcd9a8 | 5658 | void |
148cf134 | 5659 | stop_all_threads (const char *reason, inferior *inf) |
372316f1 PA |
5660 | { |
5661 | /* We may need multiple passes to discover all threads. */ | |
5662 | int pass; | |
5663 | int iterations = 0; | |
372316f1 | 5664 | |
53cccef1 | 5665 | gdb_assert (exists_non_stop_target ()); |
372316f1 | 5666 | |
148cf134 SM |
5667 | INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason, |
5668 | inf != nullptr ? inf->num : -1); | |
372316f1 | 5669 | |
1f9d9e32 AB |
5670 | infrun_debug_show_threads ("non-exited threads", |
5671 | all_non_exited_threads ()); | |
5672 | ||
00431a78 | 5673 | scoped_restore_current_thread restore_thread; |
372316f1 | 5674 | |
148cf134 | 5675 | /* Enable thread events on relevant targets. */ |
6ad82919 TBA |
5676 | for (auto *target : all_non_exited_process_targets ()) |
5677 | { | |
148cf134 SM |
5678 | if (inf != nullptr && inf->process_target () != target) |
5679 | continue; | |
5680 | ||
6ad82919 TBA |
5681 | switch_to_target_no_thread (target); |
5682 | target_thread_events (true); | |
5683 | } | |
5684 | ||
5685 | SCOPE_EXIT | |
5686 | { | |
148cf134 | 5687 | /* Disable thread events on relevant targets. */ |
6ad82919 TBA |
5688 | for (auto *target : all_non_exited_process_targets ()) |
5689 | { | |
148cf134 SM |
5690 | if (inf != nullptr && inf->process_target () != target) |
5691 | continue; | |
5692 | ||
6ad82919 TBA |
5693 | switch_to_target_no_thread (target); |
5694 | target_thread_events (false); | |
5695 | } | |
5696 | ||
17417fb0 | 5697 | /* Use debug_prefixed_printf directly to get a meaningful function |
dda83cd7 | 5698 | name. */ |
6ad82919 | 5699 | if (debug_infrun) |
17417fb0 | 5700 | debug_prefixed_printf ("infrun", "stop_all_threads", "done"); |
6ad82919 | 5701 | }; |
65706a29 | 5702 | |
372316f1 PA |
5703 | /* Request threads to stop, and then wait for the stops. Because |
5704 | threads we already know about can spawn more threads while we're | |
5705 | trying to stop them, and we only learn about new threads when we | |
5706 | update the thread list, do this in a loop, and keep iterating | |
5707 | until two passes find no threads that need to be stopped. */ | |
5708 | for (pass = 0; pass < 2; pass++, iterations++) | |
5709 | { | |
1eb8556f | 5710 | infrun_debug_printf ("pass=%d, iterations=%d", pass, iterations); |
372316f1 PA |
5711 | while (1) |
5712 | { | |
29d6859f | 5713 | int waits_needed = 0; |
372316f1 | 5714 | |
a05575d3 TBA |
5715 | for (auto *target : all_non_exited_process_targets ()) |
5716 | { | |
148cf134 SM |
5717 | if (inf != nullptr && inf->process_target () != target) |
5718 | continue; | |
5719 | ||
a05575d3 TBA |
5720 | switch_to_target_no_thread (target); |
5721 | update_thread_list (); | |
5722 | } | |
372316f1 PA |
5723 | |
5724 | /* Go through all threads looking for threads that we need | |
5725 | to tell the target to stop. */ | |
08036331 | 5726 | for (thread_info *t : all_non_exited_threads ()) |
372316f1 | 5727 | { |
148cf134 SM |
5728 | if (inf != nullptr && t->inf != inf) |
5729 | continue; | |
5730 | ||
53cccef1 TBA |
5731 | /* For a single-target setting with an all-stop target, |
5732 | we would not even arrive here. For a multi-target | |
5733 | setting, until GDB is able to handle a mixture of | |
5734 | all-stop and non-stop targets, simply skip all-stop | |
5735 | targets' threads. This should be fine due to the | |
5736 | protection of 'check_multi_target_resumption'. */ | |
5737 | ||
5738 | switch_to_thread_no_regs (t); | |
5739 | if (!target_is_non_stop_p ()) | |
5740 | continue; | |
5741 | ||
611841bb | 5742 | if (t->executing ()) |
372316f1 PA |
5743 | { |
5744 | /* If already stopping, don't request a stop again. | |
5745 | We just haven't seen the notification yet. */ | |
5746 | if (!t->stop_requested) | |
5747 | { | |
1eb8556f | 5748 | infrun_debug_printf (" %s executing, need stop", |
0fab7955 | 5749 | t->ptid.to_string ().c_str ()); |
372316f1 | 5750 | target_stop (t->ptid); |
956bbb55 | 5751 | t->stop_requested = true; |
372316f1 PA |
5752 | } |
5753 | else | |
5754 | { | |
1eb8556f | 5755 | infrun_debug_printf (" %s executing, already stopping", |
0fab7955 | 5756 | t->ptid.to_string ().c_str ()); |
372316f1 PA |
5757 | } |
5758 | ||
5759 | if (t->stop_requested) | |
29d6859f | 5760 | waits_needed++; |
372316f1 PA |
5761 | } |
5762 | else | |
5763 | { | |
1eb8556f | 5764 | infrun_debug_printf (" %s not executing", |
0fab7955 | 5765 | t->ptid.to_string ().c_str ()); |
372316f1 PA |
5766 | |
5767 | /* The thread may be not executing, but still be | |
5768 | resumed with a pending status to process. */ | |
7846f3aa | 5769 | t->set_resumed (false); |
372316f1 PA |
5770 | } |
5771 | } | |
5772 | ||
29d6859f | 5773 | if (waits_needed == 0) |
372316f1 PA |
5774 | break; |
5775 | ||
5776 | /* If we find new threads on the second iteration, restart | |
5777 | over. We want to see two iterations in a row with all | |
5778 | threads stopped. */ | |
5779 | if (pass > 0) | |
5780 | pass = -1; | |
5781 | ||
d828dbed PA |
5782 | reenable_target_async (); |
5783 | ||
29d6859f | 5784 | for (int i = 0; i < waits_needed; i++) |
c29705b7 | 5785 | { |
29d6859f | 5786 | wait_one_event event = wait_one (); |
d758e62c PA |
5787 | if (handle_one (event)) |
5788 | break; | |
372316f1 PA |
5789 | } |
5790 | } | |
5791 | } | |
372316f1 PA |
5792 | } |
5793 | ||
21d48304 PA |
5794 | /* Handle a TARGET_WAITKIND_NO_RESUMED event. Return true if we |
5795 | handled the event and should continue waiting. Return false if we | |
5796 | should stop and report the event to the user. */ | |
f4836ba9 | 5797 | |
c4464ade | 5798 | static bool |
f4836ba9 PA |
5799 | handle_no_resumed (struct execution_control_state *ecs) |
5800 | { | |
3b12939d | 5801 | if (target_can_async_p ()) |
f4836ba9 | 5802 | { |
c4464ade | 5803 | bool any_sync = false; |
f4836ba9 | 5804 | |
2dab0c7b | 5805 | for (ui *ui : all_uis ()) |
3b12939d PA |
5806 | { |
5807 | if (ui->prompt_state == PROMPT_BLOCKED) | |
5808 | { | |
c4464ade | 5809 | any_sync = true; |
3b12939d PA |
5810 | break; |
5811 | } | |
5812 | } | |
5813 | if (!any_sync) | |
5814 | { | |
5815 | /* There were no unwaited-for children left in the target, but, | |
5816 | we're not synchronously waiting for events either. Just | |
5817 | ignore. */ | |
5818 | ||
1eb8556f | 5819 | infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)"); |
3b12939d | 5820 | prepare_to_wait (ecs); |
c4464ade | 5821 | return true; |
3b12939d | 5822 | } |
f4836ba9 PA |
5823 | } |
5824 | ||
5825 | /* Otherwise, if we were running a synchronous execution command, we | |
5826 | may need to cancel it and give the user back the terminal. | |
5827 | ||
5828 | In non-stop mode, the target can't tell whether we've already | |
5829 | consumed previous stop events, so it can end up sending us a | |
5830 | no-resumed event like so: | |
5831 | ||
5832 | #0 - thread 1 is left stopped | |
5833 | ||
5834 | #1 - thread 2 is resumed and hits breakpoint | |
dda83cd7 | 5835 | -> TARGET_WAITKIND_STOPPED |
f4836ba9 PA |
5836 | |
5837 | #2 - thread 3 is resumed and exits | |
dda83cd7 | 5838 | this is the last resumed thread, so |
f4836ba9 PA |
5839 | -> TARGET_WAITKIND_NO_RESUMED |
5840 | ||
5841 | #3 - gdb processes stop for thread 2 and decides to re-resume | |
dda83cd7 | 5842 | it. |
f4836ba9 PA |
5843 | |
5844 | #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event. | |
dda83cd7 | 5845 | thread 2 is now resumed, so the event should be ignored. |
f4836ba9 PA |
5846 | |
5847 | IOW, if the stop for thread 2 doesn't end a foreground command, | |
5848 | then we need to ignore the following TARGET_WAITKIND_NO_RESUMED | |
5849 | event. But it could be that the event meant that thread 2 itself | |
5850 | (or whatever other thread was the last resumed thread) exited. | |
5851 | ||
5852 | To address this we refresh the thread list and check whether we | |
5853 | have resumed threads _now_. In the example above, this removes | |
5854 | thread 3 from the thread list. If thread 2 was re-resumed, we | |
5855 | ignore this event. If we find no thread resumed, then we cancel | |
7d3badc6 PA |
5856 | the synchronous command and show "no unwaited-for " to the |
5857 | user. */ | |
f4836ba9 | 5858 | |
d6cc5d98 | 5859 | inferior *curr_inf = current_inferior (); |
7d3badc6 | 5860 | |
d6cc5d98 | 5861 | scoped_restore_current_thread restore_thread; |
1e864019 | 5862 | update_thread_list (); |
d6cc5d98 PA |
5863 | |
5864 | /* If: | |
5865 | ||
5866 | - the current target has no thread executing, and | |
5867 | - the current inferior is native, and | |
5868 | - the current inferior is the one which has the terminal, and | |
5869 | - we did nothing, | |
5870 | ||
5871 | then a Ctrl-C from this point on would remain stuck in the | |
5872 | kernel, until a thread resumes and dequeues it. That would | |
5873 | result in the GDB CLI not reacting to Ctrl-C, not able to | |
5874 | interrupt the program. To address this, if the current inferior | |
5875 | no longer has any thread executing, we give the terminal to some | |
5876 | other inferior that has at least one thread executing. */ | |
5877 | bool swap_terminal = true; | |
5878 | ||
5879 | /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or | |
5880 | whether to report it to the user. */ | |
5881 | bool ignore_event = false; | |
7d3badc6 PA |
5882 | |
5883 | for (thread_info *thread : all_non_exited_threads ()) | |
f4836ba9 | 5884 | { |
611841bb | 5885 | if (swap_terminal && thread->executing ()) |
d6cc5d98 PA |
5886 | { |
5887 | if (thread->inf != curr_inf) | |
5888 | { | |
5889 | target_terminal::ours (); | |
5890 | ||
5891 | switch_to_thread (thread); | |
5892 | target_terminal::inferior (); | |
5893 | } | |
5894 | swap_terminal = false; | |
5895 | } | |
5896 | ||
4d772ea2 | 5897 | if (!ignore_event && thread->resumed ()) |
f4836ba9 | 5898 | { |
7d3badc6 PA |
5899 | /* Either there were no unwaited-for children left in the |
5900 | target at some point, but there are now, or some target | |
5901 | other than the eventing one has unwaited-for children | |
5902 | left. Just ignore. */ | |
1eb8556f SM |
5903 | infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED " |
5904 | "(ignoring: found resumed)"); | |
d6cc5d98 PA |
5905 | |
5906 | ignore_event = true; | |
f4836ba9 | 5907 | } |
d6cc5d98 PA |
5908 | |
5909 | if (ignore_event && !swap_terminal) | |
5910 | break; | |
5911 | } | |
5912 | ||
5913 | if (ignore_event) | |
5914 | { | |
5915 | switch_to_inferior_no_thread (curr_inf); | |
5916 | prepare_to_wait (ecs); | |
c4464ade | 5917 | return true; |
f4836ba9 PA |
5918 | } |
5919 | ||
5920 | /* Go ahead and report the event. */ | |
c4464ade | 5921 | return false; |
f4836ba9 PA |
5922 | } |
5923 | ||
21d48304 PA |
5924 | /* Handle a TARGET_WAITKIND_THREAD_EXITED event. Return true if we |
5925 | handled the event and should continue waiting. Return false if we | |
5926 | should stop and report the event to the user. */ | |
5927 | ||
5928 | static bool | |
5929 | handle_thread_exited (execution_control_state *ecs) | |
5930 | { | |
5931 | context_switch (ecs); | |
5932 | ||
5933 | /* Clear these so we don't re-start the thread stepping over a | |
5934 | breakpoint/watchpoint. */ | |
5935 | ecs->event_thread->stepping_over_breakpoint = 0; | |
5936 | ecs->event_thread->stepping_over_watchpoint = 0; | |
5937 | ||
9488c327 PA |
5938 | /* If the thread had an FSM, then abort the command. But only after |
5939 | finishing the step over, as in non-stop mode, aborting this | |
5940 | thread's command should not interfere with other threads. We | |
5941 | must check this before finish_step over, however, which may | |
5942 | update the thread list and delete the event thread. */ | |
5943 | bool abort_cmd = (ecs->event_thread->thread_fsm () != nullptr); | |
5944 | ||
45fd40cf PA |
5945 | /* Mark the thread exited right now, because finish_step_over may |
5946 | update the thread list and that may delete the thread silently | |
5947 | (depending on target), while we always want to emit the "[Thread | |
5948 | ... exited]" notification. Don't actually delete the thread yet, | |
5949 | because we need to pass its pointer down to finish_step_over. */ | |
5950 | set_thread_exited (ecs->event_thread); | |
5951 | ||
21d48304 PA |
5952 | /* Maybe the thread was doing a step-over, if so release |
5953 | resources and start any further pending step-overs. | |
5954 | ||
5955 | If we are on a non-stop target and the thread was doing an | |
5956 | in-line step, this also restarts the other threads. */ | |
5957 | int ret = finish_step_over (ecs); | |
5958 | ||
5959 | /* finish_step_over returns true if it moves ecs' wait status | |
5960 | back into the thread, so that we go handle another pending | |
5961 | event before this one. But we know it never does that if | |
5962 | the event thread has exited. */ | |
5963 | gdb_assert (ret == 0); | |
5964 | ||
9488c327 PA |
5965 | if (abort_cmd) |
5966 | { | |
d0b59149 PA |
5967 | /* We're stopping for the thread exit event. Switch to the |
5968 | event thread again, as finish_step_over may have switched | |
5969 | threads. */ | |
5970 | switch_to_thread (ecs->event_thread); | |
9488c327 PA |
5971 | ecs->event_thread = nullptr; |
5972 | return false; | |
5973 | } | |
5974 | ||
21d48304 PA |
5975 | /* If finish_step_over started a new in-line step-over, don't |
5976 | try to restart anything else. */ | |
5977 | if (step_over_info_valid_p ()) | |
5978 | { | |
5979 | delete_thread (ecs->event_thread); | |
5980 | return true; | |
5981 | } | |
5982 | ||
5983 | /* Maybe we are on an all-stop target and we got this event | |
5984 | while doing a step-like command on another thread. If so, | |
5985 | go back to doing that. If this thread was stepping, | |
5986 | switch_back_to_stepped_thread will consider that the thread | |
5987 | was interrupted mid-step and will try keep stepping it. We | |
5988 | don't want that, the thread is gone. So clear the proceed | |
5989 | status so it doesn't do that. */ | |
5990 | clear_proceed_status_thread (ecs->event_thread); | |
5991 | if (switch_back_to_stepped_thread (ecs)) | |
5992 | { | |
5993 | delete_thread (ecs->event_thread); | |
5994 | return true; | |
5995 | } | |
5996 | ||
5997 | inferior *inf = ecs->event_thread->inf; | |
5998 | bool slock_applies = schedlock_applies (ecs->event_thread); | |
5999 | ||
6000 | delete_thread (ecs->event_thread); | |
6001 | ecs->event_thread = nullptr; | |
6002 | ||
6003 | /* Continue handling the event as if we had gotten a | |
6004 | TARGET_WAITKIND_NO_RESUMED. */ | |
6005 | auto handle_as_no_resumed = [ecs] () | |
6006 | { | |
6007 | /* handle_no_resumed doesn't really look at the event kind, but | |
6008 | normal_stop does. */ | |
6009 | ecs->ws.set_no_resumed (); | |
6010 | ecs->event_thread = nullptr; | |
6011 | ecs->ptid = minus_one_ptid; | |
6012 | ||
6013 | /* Re-record the last target status. */ | |
6014 | set_last_target_status (ecs->target, ecs->ptid, ecs->ws); | |
6015 | ||
6016 | return handle_no_resumed (ecs); | |
6017 | }; | |
6018 | ||
6019 | /* If we are on an all-stop target, the target has stopped all | |
6020 | threads to report the event. We don't actually want to | |
6021 | stop, so restart the threads. */ | |
6022 | if (!target_is_non_stop_p ()) | |
6023 | { | |
6024 | if (slock_applies) | |
6025 | { | |
6026 | /* Since the target is !non-stop, then everything is stopped | |
6027 | at this point, and we can't assume we'll get further | |
6028 | events until we resume the target again. Handle this | |
6029 | event like if it were a TARGET_WAITKIND_NO_RESUMED. Note | |
6030 | this refreshes the thread list and checks whether there | |
6031 | are other resumed threads before deciding whether to | |
6032 | print "no-unwaited-for left". This is important because | |
6033 | the user could have done: | |
6034 | ||
6035 | (gdb) set scheduler-locking on | |
6036 | (gdb) thread 1 | |
6037 | (gdb) c& | |
6038 | (gdb) thread 2 | |
6039 | (gdb) c | |
6040 | ||
6041 | ... and only one of the threads exited. */ | |
6042 | return handle_as_no_resumed (); | |
6043 | } | |
6044 | else | |
6045 | { | |
6046 | /* Switch to the first non-exited thread we can find, and | |
6047 | resume. */ | |
6048 | auto range = inf->non_exited_threads (); | |
6049 | if (range.begin () == range.end ()) | |
6050 | { | |
6051 | /* Looks like the target reported a | |
6052 | TARGET_WAITKIND_THREAD_EXITED for its last known | |
6053 | thread. */ | |
6054 | return handle_as_no_resumed (); | |
6055 | } | |
6056 | thread_info *non_exited_thread = *range.begin (); | |
6057 | switch_to_thread (non_exited_thread); | |
6058 | insert_breakpoints (); | |
6059 | resume (GDB_SIGNAL_0); | |
6060 | } | |
6061 | } | |
6062 | ||
6063 | prepare_to_wait (ecs); | |
6064 | return true; | |
6065 | } | |
6066 | ||
05ba8510 PA |
6067 | /* Given an execution control state that has been freshly filled in by |
6068 | an event from the inferior, figure out what it means and take | |
6069 | appropriate action. | |
6070 | ||
6071 | The alternatives are: | |
6072 | ||
22bcd14b | 6073 | 1) stop_waiting and return; to really stop and return to the |
05ba8510 PA |
6074 | debugger. |
6075 | ||
6076 | 2) keep_going and return; to wait for the next event (set | |
6077 | ecs->event_thread->stepping_over_breakpoint to 1 to single step | |
6078 | once). */ | |
c906108c | 6079 | |
ec9499be | 6080 | static void |
595915c1 | 6081 | handle_inferior_event (struct execution_control_state *ecs) |
cd0fc7c3 | 6082 | { |
595915c1 TT |
6083 | /* Make sure that all temporary struct value objects that were |
6084 | created during the handling of the event get deleted at the | |
6085 | end. */ | |
6086 | scoped_value_mark free_values; | |
6087 | ||
7dca2ea7 | 6088 | infrun_debug_printf ("%s", ecs->ws.to_string ().c_str ()); |
c29705b7 | 6089 | |
183be222 | 6090 | if (ecs->ws.kind () == TARGET_WAITKIND_IGNORE) |
28736962 PA |
6091 | { |
6092 | /* We had an event in the inferior, but we are not interested in | |
6093 | handling it at this level. The lower layers have already | |
6094 | done what needs to be done, if anything. | |
6095 | ||
6096 | One of the possible circumstances for this is when the | |
6097 | inferior produces output for the console. The inferior has | |
6098 | not stopped, and we are ignoring the event. Another possible | |
6099 | circumstance is any event which the lower level knows will be | |
6100 | reported multiple times without an intervening resume. */ | |
28736962 PA |
6101 | prepare_to_wait (ecs); |
6102 | return; | |
6103 | } | |
6104 | ||
183be222 | 6105 | if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED |
f4836ba9 PA |
6106 | && handle_no_resumed (ecs)) |
6107 | return; | |
0e5bf2a8 | 6108 | |
5b6d1e4f PA |
6109 | /* Cache the last target/ptid/waitstatus. */ |
6110 | set_last_target_status (ecs->target, ecs->ptid, ecs->ws); | |
e02bc4cc | 6111 | |
ca005067 | 6112 | /* Always clear state belonging to the previous time we stopped. */ |
aa7d318d | 6113 | stop_stack_dummy = STOP_NONE; |
ca005067 | 6114 | |
183be222 | 6115 | if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED) |
0e5bf2a8 PA |
6116 | { |
6117 | /* No unwaited-for children left. IOW, all resumed children | |
6118 | have exited. */ | |
22bcd14b | 6119 | stop_waiting (ecs); |
0e5bf2a8 PA |
6120 | return; |
6121 | } | |
6122 | ||
183be222 SM |
6123 | if (ecs->ws.kind () != TARGET_WAITKIND_EXITED |
6124 | && ecs->ws.kind () != TARGET_WAITKIND_SIGNALLED) | |
359f5fe6 | 6125 | { |
9213a6d7 | 6126 | ecs->event_thread = ecs->target->find_thread (ecs->ptid); |
359f5fe6 | 6127 | /* If it's a new thread, add it to the thread database. */ |
03acd4d8 | 6128 | if (ecs->event_thread == nullptr) |
5b6d1e4f | 6129 | ecs->event_thread = add_thread (ecs->target, ecs->ptid); |
c1e36e3e PA |
6130 | |
6131 | /* Disable range stepping. If the next step request could use a | |
6132 | range, this will be end up re-enabled then. */ | |
6133 | ecs->event_thread->control.may_range_step = 0; | |
359f5fe6 | 6134 | } |
88ed393a JK |
6135 | |
6136 | /* Dependent on valid ECS->EVENT_THREAD. */ | |
c272a98c | 6137 | adjust_pc_after_break (ecs->event_thread, ecs->ws); |
88ed393a JK |
6138 | |
6139 | /* Dependent on the current PC value modified by adjust_pc_after_break. */ | |
6140 | reinit_frame_cache (); | |
6141 | ||
28736962 PA |
6142 | breakpoint_retire_moribund (); |
6143 | ||
2b009048 DJ |
6144 | /* First, distinguish signals caused by the debugger from signals |
6145 | that have to do with the program's own actions. Note that | |
6146 | breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending | |
6147 | on the operating system version. Here we detect when a SIGILL or | |
6148 | SIGEMT is really a breakpoint and change it to SIGTRAP. We do | |
6149 | something similar for SIGSEGV, since a SIGSEGV will be generated | |
6150 | when we're trying to execute a breakpoint instruction on a | |
6151 | non-executable stack. This happens for call dummy breakpoints | |
6152 | for architectures like SPARC that place call dummies on the | |
6153 | stack. */ | |
183be222 SM |
6154 | if (ecs->ws.kind () == TARGET_WAITKIND_STOPPED |
6155 | && (ecs->ws.sig () == GDB_SIGNAL_ILL | |
6156 | || ecs->ws.sig () == GDB_SIGNAL_SEGV | |
6157 | || ecs->ws.sig () == GDB_SIGNAL_EMT)) | |
2b009048 | 6158 | { |
00431a78 | 6159 | struct regcache *regcache = get_thread_regcache (ecs->event_thread); |
de0a0249 | 6160 | |
f9582a22 | 6161 | if (breakpoint_inserted_here_p (ecs->event_thread->inf->aspace.get (), |
de0a0249 UW |
6162 | regcache_read_pc (regcache))) |
6163 | { | |
1eb8556f | 6164 | infrun_debug_printf ("Treating signal as SIGTRAP"); |
183be222 | 6165 | ecs->ws.set_stopped (GDB_SIGNAL_TRAP); |
de0a0249 | 6166 | } |
2b009048 DJ |
6167 | } |
6168 | ||
293b3ebc | 6169 | mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws); |
8c90c137 | 6170 | |
183be222 | 6171 | switch (ecs->ws.kind ()) |
488f131b JB |
6172 | { |
6173 | case TARGET_WAITKIND_LOADED: | |
72d383bb SM |
6174 | { |
6175 | context_switch (ecs); | |
6176 | /* Ignore gracefully during startup of the inferior, as it might | |
6177 | be the shell which has just loaded some objects, otherwise | |
6178 | add the symbols for the newly loaded objects. Also ignore at | |
6179 | the beginning of an attach or remote session; we will query | |
6180 | the full list of libraries once the connection is | |
6181 | established. */ | |
6182 | ||
6183 | stop_kind stop_soon = get_inferior_stop_soon (ecs); | |
6184 | if (stop_soon == NO_STOP_QUIETLY) | |
6185 | { | |
6186 | struct regcache *regcache; | |
edcc5120 | 6187 | |
72d383bb | 6188 | regcache = get_thread_regcache (ecs->event_thread); |
edcc5120 | 6189 | |
72d383bb | 6190 | handle_solib_event (); |
ab04a2af | 6191 | |
9279eb5c | 6192 | ecs->event_thread->set_stop_pc (regcache_read_pc (regcache)); |
f9582a22 | 6193 | address_space *aspace = ecs->event_thread->inf->aspace.get (); |
72d383bb | 6194 | ecs->event_thread->control.stop_bpstat |
f9582a22 | 6195 | = bpstat_stop_status_nowatch (aspace, |
d37e0847 PA |
6196 | ecs->event_thread->stop_pc (), |
6197 | ecs->event_thread, ecs->ws); | |
c65d6b55 | 6198 | |
72d383bb | 6199 | if (handle_stop_requested (ecs)) |
94c57d6a | 6200 | return; |
488f131b | 6201 | |
72d383bb SM |
6202 | if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat)) |
6203 | { | |
6204 | /* A catchpoint triggered. */ | |
6205 | process_event_stop_test (ecs); | |
6206 | return; | |
6207 | } | |
55409f9d | 6208 | |
72d383bb SM |
6209 | /* If requested, stop when the dynamic linker notifies |
6210 | gdb of events. This allows the user to get control | |
6211 | and place breakpoints in initializer routines for | |
6212 | dynamically loaded objects (among other things). */ | |
1edb66d8 | 6213 | ecs->event_thread->set_stop_signal (GDB_SIGNAL_0); |
72d383bb SM |
6214 | if (stop_on_solib_events) |
6215 | { | |
6216 | /* Make sure we print "Stopped due to solib-event" in | |
6217 | normal_stop. */ | |
6218 | stop_print_frame = true; | |
b0f4b84b | 6219 | |
72d383bb SM |
6220 | stop_waiting (ecs); |
6221 | return; | |
6222 | } | |
6223 | } | |
b0f4b84b | 6224 | |
72d383bb SM |
6225 | /* If we are skipping through a shell, or through shared library |
6226 | loading that we aren't interested in, resume the program. If | |
6227 | we're running the program normally, also resume. */ | |
6228 | if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY) | |
6229 | { | |
6230 | /* Loading of shared libraries might have changed breakpoint | |
6231 | addresses. Make sure new breakpoints are inserted. */ | |
6232 | if (stop_soon == NO_STOP_QUIETLY) | |
6233 | insert_breakpoints (); | |
6234 | resume (GDB_SIGNAL_0); | |
6235 | prepare_to_wait (ecs); | |
6236 | return; | |
6237 | } | |
5c09a2c5 | 6238 | |
72d383bb SM |
6239 | /* But stop if we're attaching or setting up a remote |
6240 | connection. */ | |
6241 | if (stop_soon == STOP_QUIETLY_NO_SIGSTOP | |
6242 | || stop_soon == STOP_QUIETLY_REMOTE) | |
6243 | { | |
6244 | infrun_debug_printf ("quietly stopped"); | |
6245 | stop_waiting (ecs); | |
6246 | return; | |
6247 | } | |
6248 | ||
f34652de | 6249 | internal_error (_("unhandled stop_soon: %d"), (int) stop_soon); |
72d383bb | 6250 | } |
c5aa993b | 6251 | |
488f131b | 6252 | case TARGET_WAITKIND_SPURIOUS: |
c65d6b55 PA |
6253 | if (handle_stop_requested (ecs)) |
6254 | return; | |
00431a78 | 6255 | context_switch (ecs); |
64ce06e4 | 6256 | resume (GDB_SIGNAL_0); |
488f131b JB |
6257 | prepare_to_wait (ecs); |
6258 | return; | |
c5aa993b | 6259 | |
65706a29 | 6260 | case TARGET_WAITKIND_THREAD_CREATED: |
c65d6b55 PA |
6261 | if (handle_stop_requested (ecs)) |
6262 | return; | |
00431a78 | 6263 | context_switch (ecs); |
65706a29 PA |
6264 | if (!switch_back_to_stepped_thread (ecs)) |
6265 | keep_going (ecs); | |
6266 | return; | |
6267 | ||
21d48304 PA |
6268 | case TARGET_WAITKIND_THREAD_EXITED: |
6269 | if (handle_thread_exited (ecs)) | |
6270 | return; | |
6271 | stop_waiting (ecs); | |
6272 | break; | |
6273 | ||
488f131b | 6274 | case TARGET_WAITKIND_EXITED: |
940c3c06 | 6275 | case TARGET_WAITKIND_SIGNALLED: |
18493a00 PA |
6276 | { |
6277 | /* Depending on the system, ecs->ptid may point to a thread or | |
6278 | to a process. On some targets, target_mourn_inferior may | |
6279 | need to have access to the just-exited thread. That is the | |
6280 | case of GNU/Linux's "checkpoint" support, for example. | |
6281 | Call the switch_to_xxx routine as appropriate. */ | |
9213a6d7 | 6282 | thread_info *thr = ecs->target->find_thread (ecs->ptid); |
18493a00 PA |
6283 | if (thr != nullptr) |
6284 | switch_to_thread (thr); | |
6285 | else | |
6286 | { | |
6287 | inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid); | |
6288 | switch_to_inferior_no_thread (inf); | |
6289 | } | |
6290 | } | |
6c95b8df | 6291 | handle_vfork_child_exec_or_exit (0); |
223ffa71 | 6292 | target_terminal::ours (); /* Must do this before mourn anyway. */ |
488f131b | 6293 | |
0c557179 SDJ |
6294 | /* Clearing any previous state of convenience variables. */ |
6295 | clear_exit_convenience_vars (); | |
6296 | ||
183be222 | 6297 | if (ecs->ws.kind () == TARGET_WAITKIND_EXITED) |
940c3c06 PA |
6298 | { |
6299 | /* Record the exit code in the convenience variable $_exitcode, so | |
6300 | that the user can inspect this again later. */ | |
6301 | set_internalvar_integer (lookup_internalvar ("_exitcode"), | |
183be222 | 6302 | (LONGEST) ecs->ws.exit_status ()); |
940c3c06 PA |
6303 | |
6304 | /* Also record this in the inferior itself. */ | |
30220b46 | 6305 | current_inferior ()->has_exit_code = true; |
183be222 | 6306 | current_inferior ()->exit_code = (LONGEST) ecs->ws.exit_status (); |
8cf64490 | 6307 | |
98eb56a4 | 6308 | /* Support the --return-child-result option. */ |
183be222 | 6309 | return_child_result_value = ecs->ws.exit_status (); |
98eb56a4 | 6310 | |
bf64d1d5 | 6311 | interps_notify_exited (ecs->ws.exit_status ()); |
940c3c06 PA |
6312 | } |
6313 | else | |
0c557179 | 6314 | { |
27b1f19f | 6315 | struct gdbarch *gdbarch = current_inferior ()->arch (); |
0c557179 SDJ |
6316 | |
6317 | if (gdbarch_gdb_signal_to_target_p (gdbarch)) | |
6318 | { | |
6319 | /* Set the value of the internal variable $_exitsignal, | |
6320 | which holds the signal uncaught by the inferior. */ | |
6321 | set_internalvar_integer (lookup_internalvar ("_exitsignal"), | |
6322 | gdbarch_gdb_signal_to_target (gdbarch, | |
183be222 | 6323 | ecs->ws.sig ())); |
0c557179 SDJ |
6324 | } |
6325 | else | |
6326 | { | |
6327 | /* We don't have access to the target's method used for | |
6328 | converting between signal numbers (GDB's internal | |
6329 | representation <-> target's representation). | |
6330 | Therefore, we cannot do a good job at displaying this | |
6331 | information to the user. It's better to just warn | |
6332 | her about it (if infrun debugging is enabled), and | |
6333 | give up. */ | |
1eb8556f SM |
6334 | infrun_debug_printf ("Cannot fill $_exitsignal with the correct " |
6335 | "signal number."); | |
0c557179 SDJ |
6336 | } |
6337 | ||
d6bd2ef5 | 6338 | interps_notify_signal_exited (ecs->ws.sig ()); |
0c557179 | 6339 | } |
8cf64490 | 6340 | |
488f131b | 6341 | gdb_flush (gdb_stdout); |
bc1e6c81 | 6342 | target_mourn_inferior (inferior_ptid); |
c4464ade | 6343 | stop_print_frame = false; |
22bcd14b | 6344 | stop_waiting (ecs); |
488f131b | 6345 | return; |
c5aa993b | 6346 | |
488f131b | 6347 | case TARGET_WAITKIND_FORKED: |
deb3b17b | 6348 | case TARGET_WAITKIND_VFORKED: |
0d36baa9 PA |
6349 | case TARGET_WAITKIND_THREAD_CLONED: |
6350 | ||
6351 | displaced_step_finish (ecs->event_thread, ecs->ws); | |
6352 | ||
6353 | /* Start a new step-over in another thread if there's one that | |
6354 | needs it. */ | |
6355 | start_step_over (); | |
e2d96639 | 6356 | |
00431a78 | 6357 | context_switch (ecs); |
5a2901d9 | 6358 | |
b242c3c2 PA |
6359 | /* Immediately detach breakpoints from the child before there's |
6360 | any chance of letting the user delete breakpoints from the | |
6361 | breakpoint lists. If we don't do this early, it's easy to | |
6362 | leave left over traps in the child, vis: "break foo; catch | |
6363 | fork; c; <fork>; del; c; <child calls foo>". We only follow | |
6364 | the fork on the last `continue', and by that time the | |
6365 | breakpoint at "foo" is long gone from the breakpoint table. | |
6366 | If we vforked, then we don't need to unpatch here, since both | |
6367 | parent and child are sharing the same memory pages; we'll | |
6368 | need to unpatch at follow/detach time instead to be certain | |
6369 | that new breakpoints added between catchpoint hit time and | |
6370 | vfork follow are detached. */ | |
0d36baa9 | 6371 | if (ecs->ws.kind () == TARGET_WAITKIND_FORKED) |
b242c3c2 | 6372 | { |
b242c3c2 PA |
6373 | /* This won't actually modify the breakpoint list, but will |
6374 | physically remove the breakpoints from the child. */ | |
183be222 | 6375 | detach_breakpoints (ecs->ws.child_ptid ()); |
b242c3c2 PA |
6376 | } |
6377 | ||
34b7e8a6 | 6378 | delete_just_stopped_threads_single_step_breakpoints (); |
d03285ec | 6379 | |
e58b0e63 PA |
6380 | /* In case the event is caught by a catchpoint, remember that |
6381 | the event is to be followed at the next resume of the thread, | |
6382 | and not immediately. */ | |
6383 | ecs->event_thread->pending_follow = ecs->ws; | |
6384 | ||
1edb66d8 SM |
6385 | ecs->event_thread->set_stop_pc |
6386 | (regcache_read_pc (get_thread_regcache (ecs->event_thread))); | |
675bf4cb | 6387 | |
16c381f0 | 6388 | ecs->event_thread->control.stop_bpstat |
f9582a22 | 6389 | = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (), |
d37e0847 PA |
6390 | ecs->event_thread->stop_pc (), |
6391 | ecs->event_thread, ecs->ws); | |
675bf4cb | 6392 | |
c65d6b55 PA |
6393 | if (handle_stop_requested (ecs)) |
6394 | return; | |
6395 | ||
ce12b012 PA |
6396 | /* If no catchpoint triggered for this, then keep going. Note |
6397 | that we're interested in knowing the bpstat actually causes a | |
6398 | stop, not just if it may explain the signal. Software | |
6399 | watchpoints, for example, always appear in the bpstat. */ | |
6400 | if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat)) | |
04e68871 | 6401 | { |
5ab2fbf1 | 6402 | bool follow_child |
0d36baa9 PA |
6403 | = (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED |
6404 | && follow_fork_mode_string == follow_fork_mode_child); | |
e58b0e63 | 6405 | |
1edb66d8 | 6406 | ecs->event_thread->set_stop_signal (GDB_SIGNAL_0); |
e58b0e63 | 6407 | |
5b6d1e4f PA |
6408 | process_stratum_target *targ |
6409 | = ecs->event_thread->inf->process_target (); | |
6410 | ||
0d36baa9 PA |
6411 | bool should_resume; |
6412 | if (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED) | |
6413 | should_resume = follow_fork (); | |
6414 | else | |
6415 | { | |
6416 | should_resume = true; | |
6417 | inferior *inf = ecs->event_thread->inf; | |
6418 | inf->top_target ()->follow_clone (ecs->ws.child_ptid ()); | |
6419 | ecs->event_thread->pending_follow.set_spurious (); | |
6420 | } | |
e58b0e63 | 6421 | |
5b6d1e4f PA |
6422 | /* Note that one of these may be an invalid pointer, |
6423 | depending on detach_fork. */ | |
00431a78 | 6424 | thread_info *parent = ecs->event_thread; |
9213a6d7 | 6425 | thread_info *child = targ->find_thread (ecs->ws.child_ptid ()); |
6c95b8df | 6426 | |
a2077e25 PA |
6427 | /* At this point, the parent is marked running, and the |
6428 | child is marked stopped. */ | |
6429 | ||
6430 | /* If not resuming the parent, mark it stopped. */ | |
0d36baa9 PA |
6431 | if (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED |
6432 | && follow_child && !detach_fork && !non_stop && !sched_multi) | |
00431a78 | 6433 | parent->set_running (false); |
a2077e25 PA |
6434 | |
6435 | /* If resuming the child, mark it running. */ | |
7ac958f2 PA |
6436 | if ((ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED |
6437 | && !schedlock_applies (ecs->event_thread)) | |
6438 | || (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED | |
6439 | && (follow_child | |
6440 | || (!detach_fork && (non_stop || sched_multi))))) | |
00431a78 | 6441 | child->set_running (true); |
a2077e25 | 6442 | |
6c95b8df | 6443 | /* In non-stop mode, also resume the other branch. */ |
0d36baa9 | 6444 | if ((ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED |
7ac958f2 PA |
6445 | && target_is_non_stop_p () |
6446 | && !schedlock_applies (ecs->event_thread)) | |
6447 | || (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED | |
6448 | && (!detach_fork && (non_stop | |
6449 | || (sched_multi | |
6450 | && target_is_non_stop_p ()))))) | |
6c95b8df PA |
6451 | { |
6452 | if (follow_child) | |
6453 | switch_to_thread (parent); | |
6454 | else | |
6455 | switch_to_thread (child); | |
6456 | ||
6457 | ecs->event_thread = inferior_thread (); | |
6458 | ecs->ptid = inferior_ptid; | |
6459 | keep_going (ecs); | |
6460 | } | |
6461 | ||
6462 | if (follow_child) | |
6463 | switch_to_thread (child); | |
6464 | else | |
6465 | switch_to_thread (parent); | |
6466 | ||
e58b0e63 PA |
6467 | ecs->event_thread = inferior_thread (); |
6468 | ecs->ptid = inferior_ptid; | |
6469 | ||
6470 | if (should_resume) | |
27f9f649 SM |
6471 | { |
6472 | /* Never call switch_back_to_stepped_thread if we are waiting for | |
287de656 | 6473 | vfork-done (waiting for an external vfork child to exec or |
27f9f649 SM |
6474 | exit). We will resume only the vforking thread for the purpose |
6475 | of collecting the vfork-done event, and we will restart any | |
6476 | step once the critical shared address space window is done. */ | |
6477 | if ((!follow_child | |
6478 | && detach_fork | |
6479 | && parent->inf->thread_waiting_for_vfork_done != nullptr) | |
6480 | || !switch_back_to_stepped_thread (ecs)) | |
6481 | keep_going (ecs); | |
6482 | } | |
e58b0e63 | 6483 | else |
22bcd14b | 6484 | stop_waiting (ecs); |
04e68871 DJ |
6485 | return; |
6486 | } | |
94c57d6a PA |
6487 | process_event_stop_test (ecs); |
6488 | return; | |
488f131b | 6489 | |
6c95b8df PA |
6490 | case TARGET_WAITKIND_VFORK_DONE: |
6491 | /* Done with the shared memory region. Re-insert breakpoints in | |
6492 | the parent, and keep going. */ | |
6493 | ||
00431a78 | 6494 | context_switch (ecs); |
6c95b8df | 6495 | |
d8bbae6e SM |
6496 | handle_vfork_done (ecs->event_thread); |
6497 | gdb_assert (inferior_thread () == ecs->event_thread); | |
c65d6b55 PA |
6498 | |
6499 | if (handle_stop_requested (ecs)) | |
6500 | return; | |
6501 | ||
27f9f649 SM |
6502 | if (!switch_back_to_stepped_thread (ecs)) |
6503 | { | |
6504 | gdb_assert (inferior_thread () == ecs->event_thread); | |
6505 | /* This also takes care of reinserting breakpoints in the | |
6506 | previously locked inferior. */ | |
6507 | keep_going (ecs); | |
6508 | } | |
6c95b8df PA |
6509 | return; |
6510 | ||
488f131b | 6511 | case TARGET_WAITKIND_EXECD: |
488f131b | 6512 | |
cbd2b4e3 PA |
6513 | /* Note we can't read registers yet (the stop_pc), because we |
6514 | don't yet know the inferior's post-exec architecture. | |
6515 | 'stop_pc' is explicitly read below instead. */ | |
00431a78 | 6516 | switch_to_thread_no_regs (ecs->event_thread); |
5a2901d9 | 6517 | |
6c95b8df PA |
6518 | /* Do whatever is necessary to the parent branch of the vfork. */ |
6519 | handle_vfork_child_exec_or_exit (1); | |
6520 | ||
795e548f | 6521 | /* This causes the eventpoints and symbol table to be reset. |
dda83cd7 SM |
6522 | Must do this now, before trying to determine whether to |
6523 | stop. */ | |
183be222 | 6524 | follow_exec (inferior_ptid, ecs->ws.execd_pathname ()); |
795e548f | 6525 | |
17d8546e DB |
6526 | /* In follow_exec we may have deleted the original thread and |
6527 | created a new one. Make sure that the event thread is the | |
6528 | execd thread for that case (this is a nop otherwise). */ | |
6529 | ecs->event_thread = inferior_thread (); | |
6530 | ||
1edb66d8 SM |
6531 | ecs->event_thread->set_stop_pc |
6532 | (regcache_read_pc (get_thread_regcache (ecs->event_thread))); | |
ecdc3a72 | 6533 | |
16c381f0 | 6534 | ecs->event_thread->control.stop_bpstat |
f9582a22 | 6535 | = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (), |
d37e0847 PA |
6536 | ecs->event_thread->stop_pc (), |
6537 | ecs->event_thread, ecs->ws); | |
795e548f | 6538 | |
c65d6b55 PA |
6539 | if (handle_stop_requested (ecs)) |
6540 | return; | |
6541 | ||
04e68871 | 6542 | /* If no catchpoint triggered for this, then keep going. */ |
ce12b012 | 6543 | if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat)) |
04e68871 | 6544 | { |
1edb66d8 | 6545 | ecs->event_thread->set_stop_signal (GDB_SIGNAL_0); |
04e68871 DJ |
6546 | keep_going (ecs); |
6547 | return; | |
6548 | } | |
94c57d6a PA |
6549 | process_event_stop_test (ecs); |
6550 | return; | |
488f131b | 6551 | |
b4dc5ffa | 6552 | /* Be careful not to try to gather much state about a thread |
dda83cd7 | 6553 | that's in a syscall. It's frequently a losing proposition. */ |
488f131b | 6554 | case TARGET_WAITKIND_SYSCALL_ENTRY: |
1777feb0 | 6555 | /* Getting the current syscall number. */ |
94c57d6a PA |
6556 | if (handle_syscall_event (ecs) == 0) |
6557 | process_event_stop_test (ecs); | |
6558 | return; | |
c906108c | 6559 | |
488f131b | 6560 | /* Before examining the threads further, step this thread to |
dda83cd7 SM |
6561 | get it entirely out of the syscall. (We get notice of the |
6562 | event when the thread is just on the verge of exiting a | |
6563 | syscall. Stepping one instruction seems to get it back | |
6564 | into user code.) */ | |
488f131b | 6565 | case TARGET_WAITKIND_SYSCALL_RETURN: |
94c57d6a PA |
6566 | if (handle_syscall_event (ecs) == 0) |
6567 | process_event_stop_test (ecs); | |
6568 | return; | |
c906108c | 6569 | |
488f131b | 6570 | case TARGET_WAITKIND_STOPPED: |
4f5d7f63 PA |
6571 | handle_signal_stop (ecs); |
6572 | return; | |
c906108c | 6573 | |
b2175913 MS |
6574 | case TARGET_WAITKIND_NO_HISTORY: |
6575 | /* Reverse execution: target ran out of history info. */ | |
eab402df | 6576 | |
d1988021 | 6577 | /* Switch to the stopped thread. */ |
00431a78 | 6578 | context_switch (ecs); |
1eb8556f | 6579 | infrun_debug_printf ("stopped"); |
d1988021 | 6580 | |
34b7e8a6 | 6581 | delete_just_stopped_threads_single_step_breakpoints (); |
1edb66d8 SM |
6582 | ecs->event_thread->set_stop_pc |
6583 | (regcache_read_pc (get_thread_regcache (inferior_thread ()))); | |
c65d6b55 PA |
6584 | |
6585 | if (handle_stop_requested (ecs)) | |
6586 | return; | |
6587 | ||
2e5dbfab | 6588 | interps_notify_no_history (); |
22bcd14b | 6589 | stop_waiting (ecs); |
b2175913 | 6590 | return; |
488f131b | 6591 | } |
4f5d7f63 PA |
6592 | } |
6593 | ||
372316f1 | 6594 | /* Restart threads back to what they were trying to do back when we |
148cf134 SM |
6595 | paused them (because of an in-line step-over or vfork, for example). |
6596 | The EVENT_THREAD thread is ignored (not restarted). | |
6597 | ||
6598 | If INF is non-nullptr, only resume threads from INF. */ | |
4d9d9d04 PA |
6599 | |
6600 | static void | |
148cf134 | 6601 | restart_threads (struct thread_info *event_thread, inferior *inf) |
372316f1 | 6602 | { |
148cf134 SM |
6603 | INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d", |
6604 | event_thread->ptid.to_string ().c_str (), | |
6605 | inf != nullptr ? inf->num : -1); | |
6606 | ||
2b718529 LS |
6607 | gdb_assert (!step_over_info_valid_p ()); |
6608 | ||
372316f1 PA |
6609 | /* In case the instruction just stepped spawned a new thread. */ |
6610 | update_thread_list (); | |
6611 | ||
08036331 | 6612 | for (thread_info *tp : all_non_exited_threads ()) |
372316f1 | 6613 | { |
148cf134 SM |
6614 | if (inf != nullptr && tp->inf != inf) |
6615 | continue; | |
6616 | ||
ac7d717c PA |
6617 | if (tp->inf->detaching) |
6618 | { | |
6619 | infrun_debug_printf ("restart threads: [%s] inferior detaching", | |
0fab7955 | 6620 | tp->ptid.to_string ().c_str ()); |
ac7d717c PA |
6621 | continue; |
6622 | } | |
6623 | ||
f3f8ece4 PA |
6624 | switch_to_thread_no_regs (tp); |
6625 | ||
372316f1 PA |
6626 | if (tp == event_thread) |
6627 | { | |
1eb8556f | 6628 | infrun_debug_printf ("restart threads: [%s] is event thread", |
0fab7955 | 6629 | tp->ptid.to_string ().c_str ()); |
372316f1 PA |
6630 | continue; |
6631 | } | |
6632 | ||
6633 | if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall)) | |
6634 | { | |
1eb8556f | 6635 | infrun_debug_printf ("restart threads: [%s] not meant to be running", |
0fab7955 | 6636 | tp->ptid.to_string ().c_str ()); |
372316f1 PA |
6637 | continue; |
6638 | } | |
6639 | ||
7846f3aa | 6640 | if (tp->resumed ()) |
372316f1 | 6641 | { |
1eb8556f | 6642 | infrun_debug_printf ("restart threads: [%s] resumed", |
0fab7955 | 6643 | tp->ptid.to_string ().c_str ()); |
611841bb | 6644 | gdb_assert (tp->executing () || tp->has_pending_waitstatus ()); |
372316f1 PA |
6645 | continue; |
6646 | } | |
6647 | ||
6648 | if (thread_is_in_step_over_chain (tp)) | |
6649 | { | |
1eb8556f | 6650 | infrun_debug_printf ("restart threads: [%s] needs step-over", |
0fab7955 | 6651 | tp->ptid.to_string ().c_str ()); |
7846f3aa | 6652 | gdb_assert (!tp->resumed ()); |
372316f1 PA |
6653 | continue; |
6654 | } | |
6655 | ||
6656 | ||
1edb66d8 | 6657 | if (tp->has_pending_waitstatus ()) |
372316f1 | 6658 | { |
1eb8556f | 6659 | infrun_debug_printf ("restart threads: [%s] has pending status", |
0fab7955 | 6660 | tp->ptid.to_string ().c_str ()); |
7846f3aa | 6661 | tp->set_resumed (true); |
372316f1 PA |
6662 | continue; |
6663 | } | |
6664 | ||
c65d6b55 PA |
6665 | gdb_assert (!tp->stop_requested); |
6666 | ||
372316f1 PA |
6667 | /* If some thread needs to start a step-over at this point, it |
6668 | should still be in the step-over queue, and thus skipped | |
6669 | above. */ | |
6670 | if (thread_still_needs_step_over (tp)) | |
6671 | { | |
f34652de | 6672 | internal_error ("thread [%s] needs a step-over, but not in " |
372316f1 | 6673 | "step-over queue\n", |
0fab7955 | 6674 | tp->ptid.to_string ().c_str ()); |
372316f1 PA |
6675 | } |
6676 | ||
6677 | if (currently_stepping (tp)) | |
6678 | { | |
1eb8556f | 6679 | infrun_debug_printf ("restart threads: [%s] was stepping", |
0fab7955 | 6680 | tp->ptid.to_string ().c_str ()); |
372316f1 PA |
6681 | keep_going_stepped_thread (tp); |
6682 | } | |
6683 | else | |
6684 | { | |
1eb8556f | 6685 | infrun_debug_printf ("restart threads: [%s] continuing", |
0fab7955 | 6686 | tp->ptid.to_string ().c_str ()); |
aa563d16 | 6687 | execution_control_state ecs (tp); |
00431a78 | 6688 | switch_to_thread (tp); |
aa563d16 | 6689 | keep_going_pass_signal (&ecs); |
372316f1 PA |
6690 | } |
6691 | } | |
6692 | } | |
6693 | ||
6694 | /* Callback for iterate_over_threads. Find a resumed thread that has | |
6695 | a pending waitstatus. */ | |
6696 | ||
58984e4a TT |
6697 | static bool |
6698 | resumed_thread_with_pending_status (struct thread_info *tp) | |
372316f1 | 6699 | { |
1edb66d8 | 6700 | return tp->resumed () && tp->has_pending_waitstatus (); |
372316f1 PA |
6701 | } |
6702 | ||
6703 | /* Called when we get an event that may finish an in-line or | |
6704 | out-of-line (displaced stepping) step-over started previously. | |
6705 | Return true if the event is processed and we should go back to the | |
6706 | event loop; false if the caller should continue processing the | |
6707 | event. */ | |
6708 | ||
6709 | static int | |
4d9d9d04 PA |
6710 | finish_step_over (struct execution_control_state *ecs) |
6711 | { | |
58c01087 | 6712 | displaced_step_finish (ecs->event_thread, ecs->ws); |
4d9d9d04 | 6713 | |
c4464ade | 6714 | bool had_step_over_info = step_over_info_valid_p (); |
372316f1 PA |
6715 | |
6716 | if (had_step_over_info) | |
4d9d9d04 PA |
6717 | { |
6718 | /* If we're stepping over a breakpoint with all threads locked, | |
6719 | then only the thread that was stepped should be reporting | |
6720 | back an event. */ | |
6721 | gdb_assert (ecs->event_thread->control.trap_expected); | |
6722 | ||
21d48304 | 6723 | update_thread_events_after_step_over (ecs->event_thread, ecs->ws); |
65c459ab | 6724 | |
c65d6b55 | 6725 | clear_step_over_info (); |
4d9d9d04 PA |
6726 | } |
6727 | ||
fbea99ea | 6728 | if (!target_is_non_stop_p ()) |
372316f1 | 6729 | return 0; |
4d9d9d04 PA |
6730 | |
6731 | /* Start a new step-over in another thread if there's one that | |
6732 | needs it. */ | |
6733 | start_step_over (); | |
372316f1 PA |
6734 | |
6735 | /* If we were stepping over a breakpoint before, and haven't started | |
6736 | a new in-line step-over sequence, then restart all other threads | |
6737 | (except the event thread). We can't do this in all-stop, as then | |
6738 | e.g., we wouldn't be able to issue any other remote packet until | |
6739 | these other threads stop. */ | |
6740 | if (had_step_over_info && !step_over_info_valid_p ()) | |
6741 | { | |
6742 | struct thread_info *pending; | |
6743 | ||
6744 | /* If we only have threads with pending statuses, the restart | |
6745 | below won't restart any thread and so nothing re-inserts the | |
6746 | breakpoint we just stepped over. But we need it inserted | |
6747 | when we later process the pending events, otherwise if | |
6748 | another thread has a pending event for this breakpoint too, | |
6749 | we'd discard its event (because the breakpoint that | |
6750 | originally caused the event was no longer inserted). */ | |
00431a78 | 6751 | context_switch (ecs); |
372316f1 PA |
6752 | insert_breakpoints (); |
6753 | ||
6754 | restart_threads (ecs->event_thread); | |
6755 | ||
6756 | /* If we have events pending, go through handle_inferior_event | |
6757 | again, picking up a pending event at random. This avoids | |
6758 | thread starvation. */ | |
6759 | ||
6760 | /* But not if we just stepped over a watchpoint in order to let | |
6761 | the instruction execute so we can evaluate its expression. | |
6762 | The set of watchpoints that triggered is recorded in the | |
6763 | breakpoint objects themselves (see bp->watchpoint_triggered). | |
6764 | If we processed another event first, that other event could | |
6765 | clobber this info. */ | |
6766 | if (ecs->event_thread->stepping_over_watchpoint) | |
6767 | return 0; | |
6768 | ||
21d48304 PA |
6769 | /* The code below is meant to avoid one thread hogging the event |
6770 | loop by doing constant in-line step overs. If the stepping | |
6771 | thread exited, there's no risk for this to happen, so we can | |
6772 | safely let our caller process the event immediately. */ | |
6773 | if (ecs->ws.kind () == TARGET_WAITKIND_THREAD_EXITED) | |
6774 | return 0; | |
6775 | ||
58984e4a | 6776 | pending = iterate_over_threads (resumed_thread_with_pending_status); |
03acd4d8 | 6777 | if (pending != nullptr) |
372316f1 PA |
6778 | { |
6779 | struct thread_info *tp = ecs->event_thread; | |
6780 | struct regcache *regcache; | |
6781 | ||
1eb8556f SM |
6782 | infrun_debug_printf ("found resumed threads with " |
6783 | "pending events, saving status"); | |
372316f1 PA |
6784 | |
6785 | gdb_assert (pending != tp); | |
6786 | ||
6787 | /* Record the event thread's event for later. */ | |
c272a98c | 6788 | save_waitstatus (tp, ecs->ws); |
372316f1 PA |
6789 | /* This was cleared early, by handle_inferior_event. Set it |
6790 | so this pending event is considered by | |
6791 | do_target_wait. */ | |
7846f3aa | 6792 | tp->set_resumed (true); |
372316f1 | 6793 | |
611841bb | 6794 | gdb_assert (!tp->executing ()); |
372316f1 | 6795 | |
00431a78 | 6796 | regcache = get_thread_regcache (tp); |
1edb66d8 | 6797 | tp->set_stop_pc (regcache_read_pc (regcache)); |
372316f1 | 6798 | |
1eb8556f SM |
6799 | infrun_debug_printf ("saved stop_pc=%s for %s " |
6800 | "(currently_stepping=%d)", | |
99d9c3b9 SM |
6801 | paddress (current_inferior ()->arch (), |
6802 | tp->stop_pc ()), | |
0fab7955 | 6803 | tp->ptid.to_string ().c_str (), |
1eb8556f | 6804 | currently_stepping (tp)); |
372316f1 PA |
6805 | |
6806 | /* This in-line step-over finished; clear this so we won't | |
6807 | start a new one. This is what handle_signal_stop would | |
6808 | do, if we returned false. */ | |
6809 | tp->stepping_over_breakpoint = 0; | |
6810 | ||
6811 | /* Wake up the event loop again. */ | |
6812 | mark_async_event_handler (infrun_async_inferior_event_token); | |
6813 | ||
6814 | prepare_to_wait (ecs); | |
6815 | return 1; | |
6816 | } | |
6817 | } | |
6818 | ||
6819 | return 0; | |
4d9d9d04 PA |
6820 | } |
6821 | ||
3f75a984 SM |
6822 | /* See infrun.h. */ |
6823 | ||
6824 | void | |
6825 | notify_signal_received (gdb_signal sig) | |
6826 | { | |
6827 | interps_notify_signal_received (sig); | |
6828 | gdb::observers::signal_received.notify (sig); | |
6829 | } | |
6830 | ||
87829267 SM |
6831 | /* See infrun.h. */ |
6832 | ||
6833 | void | |
6834 | notify_normal_stop (bpstat *bs, int print_frame) | |
6835 | { | |
6836 | interps_notify_normal_stop (bs, print_frame); | |
6837 | gdb::observers::normal_stop.notify (bs, print_frame); | |
6838 | } | |
6839 | ||
77cd03e2 SM |
6840 | /* See infrun.h. */ |
6841 | ||
6842 | void notify_user_selected_context_changed (user_selected_what selection) | |
6843 | { | |
6844 | interps_notify_user_selected_context_changed (selection); | |
6845 | gdb::observers::user_selected_context_changed.notify (selection); | |
6846 | } | |
6847 | ||
4f5d7f63 PA |
6848 | /* Come here when the program has stopped with a signal. */ |
6849 | ||
6850 | static void | |
6851 | handle_signal_stop (struct execution_control_state *ecs) | |
6852 | { | |
bd2b40ac | 6853 | frame_info_ptr frame; |
4f5d7f63 PA |
6854 | struct gdbarch *gdbarch; |
6855 | int stopped_by_watchpoint; | |
6856 | enum stop_kind stop_soon; | |
6857 | int random_signal; | |
c906108c | 6858 | |
183be222 | 6859 | gdb_assert (ecs->ws.kind () == TARGET_WAITKIND_STOPPED); |
f0407826 | 6860 | |
183be222 | 6861 | ecs->event_thread->set_stop_signal (ecs->ws.sig ()); |
c65d6b55 | 6862 | |
f0407826 DE |
6863 | /* Do we need to clean up the state of a thread that has |
6864 | completed a displaced single-step? (Doing so usually affects | |
6865 | the PC, so do it here, before we set stop_pc.) */ | |
372316f1 PA |
6866 | if (finish_step_over (ecs)) |
6867 | return; | |
f0407826 DE |
6868 | |
6869 | /* If we either finished a single-step or hit a breakpoint, but | |
6870 | the user wanted this thread to be stopped, pretend we got a | |
6871 | SIG0 (generic unsignaled stop). */ | |
6872 | if (ecs->event_thread->stop_requested | |
1edb66d8 SM |
6873 | && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP) |
6874 | ecs->event_thread->set_stop_signal (GDB_SIGNAL_0); | |
237fc4c9 | 6875 | |
1edb66d8 SM |
6876 | ecs->event_thread->set_stop_pc |
6877 | (regcache_read_pc (get_thread_regcache (ecs->event_thread))); | |
488f131b | 6878 | |
2ab76a18 PA |
6879 | context_switch (ecs); |
6880 | ||
6881 | if (deprecated_context_hook) | |
6882 | deprecated_context_hook (ecs->event_thread->global_num); | |
6883 | ||
527159b7 | 6884 | if (debug_infrun) |
237fc4c9 | 6885 | { |
00431a78 | 6886 | struct regcache *regcache = get_thread_regcache (ecs->event_thread); |
b926417a | 6887 | struct gdbarch *reg_gdbarch = regcache->arch (); |
7f82dfc7 | 6888 | |
1edb66d8 SM |
6889 | infrun_debug_printf |
6890 | ("stop_pc=%s", paddress (reg_gdbarch, ecs->event_thread->stop_pc ())); | |
d92524f1 | 6891 | if (target_stopped_by_watchpoint ()) |
237fc4c9 | 6892 | { |
dda83cd7 | 6893 | CORE_ADDR addr; |
abbb1732 | 6894 | |
1eb8556f | 6895 | infrun_debug_printf ("stopped by watchpoint"); |
237fc4c9 | 6896 | |
328d42d8 SM |
6897 | if (target_stopped_data_address (current_inferior ()->top_target (), |
6898 | &addr)) | |
1eb8556f | 6899 | infrun_debug_printf ("stopped data address=%s", |
dda83cd7 SM |
6900 | paddress (reg_gdbarch, addr)); |
6901 | else | |
1eb8556f | 6902 | infrun_debug_printf ("(no data address available)"); |
237fc4c9 PA |
6903 | } |
6904 | } | |
527159b7 | 6905 | |
36fa8042 PA |
6906 | /* This is originated from start_remote(), start_inferior() and |
6907 | shared libraries hook functions. */ | |
00431a78 | 6908 | stop_soon = get_inferior_stop_soon (ecs); |
36fa8042 PA |
6909 | if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE) |
6910 | { | |
1eb8556f | 6911 | infrun_debug_printf ("quietly stopped"); |
c4464ade | 6912 | stop_print_frame = true; |
22bcd14b | 6913 | stop_waiting (ecs); |
36fa8042 PA |
6914 | return; |
6915 | } | |
6916 | ||
36fa8042 PA |
6917 | /* This originates from attach_command(). We need to overwrite |
6918 | the stop_signal here, because some kernels don't ignore a | |
6919 | SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call. | |
6920 | See more comments in inferior.h. On the other hand, if we | |
6921 | get a non-SIGSTOP, report it to the user - assume the backend | |
6922 | will handle the SIGSTOP if it should show up later. | |
6923 | ||
6924 | Also consider that the attach is complete when we see a | |
6925 | SIGTRAP. Some systems (e.g. Windows), and stubs supporting | |
6926 | target extended-remote report it instead of a SIGSTOP | |
6927 | (e.g. gdbserver). We already rely on SIGTRAP being our | |
6928 | signal, so this is no exception. | |
6929 | ||
6930 | Also consider that the attach is complete when we see a | |
6931 | GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell | |
6932 | the target to stop all threads of the inferior, in case the | |
6933 | low level attach operation doesn't stop them implicitly. If | |
6934 | they weren't stopped implicitly, then the stub will report a | |
6935 | GDB_SIGNAL_0, meaning: stopped for no particular reason | |
6936 | other than GDB's request. */ | |
6937 | if (stop_soon == STOP_QUIETLY_NO_SIGSTOP | |
1edb66d8 SM |
6938 | && (ecs->event_thread->stop_signal () == GDB_SIGNAL_STOP |
6939 | || ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP | |
6940 | || ecs->event_thread->stop_signal () == GDB_SIGNAL_0)) | |
36fa8042 | 6941 | { |
c4464ade | 6942 | stop_print_frame = true; |
22bcd14b | 6943 | stop_waiting (ecs); |
1edb66d8 | 6944 | ecs->event_thread->set_stop_signal (GDB_SIGNAL_0); |
36fa8042 PA |
6945 | return; |
6946 | } | |
6947 | ||
568d6575 UW |
6948 | /* At this point, get hold of the now-current thread's frame. */ |
6949 | frame = get_current_frame (); | |
6950 | gdbarch = get_frame_arch (frame); | |
6951 | ||
2adfaa28 | 6952 | /* Pull the single step breakpoints out of the target. */ |
1edb66d8 | 6953 | if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP) |
488f131b | 6954 | { |
af48d08f | 6955 | struct regcache *regcache; |
af48d08f | 6956 | CORE_ADDR pc; |
2adfaa28 | 6957 | |
00431a78 | 6958 | regcache = get_thread_regcache (ecs->event_thread); |
f9582a22 | 6959 | const address_space *aspace = ecs->event_thread->inf->aspace.get (); |
8b86c959 | 6960 | |
af48d08f | 6961 | pc = regcache_read_pc (regcache); |
34b7e8a6 | 6962 | |
af48d08f PA |
6963 | /* However, before doing so, if this single-step breakpoint was |
6964 | actually for another thread, set this thread up for moving | |
6965 | past it. */ | |
6966 | if (!thread_has_single_step_breakpoint_here (ecs->event_thread, | |
6967 | aspace, pc)) | |
6968 | { | |
6969 | if (single_step_breakpoint_inserted_here_p (aspace, pc)) | |
2adfaa28 | 6970 | { |
1eb8556f SM |
6971 | infrun_debug_printf ("[%s] hit another thread's single-step " |
6972 | "breakpoint", | |
0fab7955 | 6973 | ecs->ptid.to_string ().c_str ()); |
af48d08f PA |
6974 | ecs->hit_singlestep_breakpoint = 1; |
6975 | } | |
6976 | } | |
6977 | else | |
6978 | { | |
1eb8556f | 6979 | infrun_debug_printf ("[%s] hit its single-step breakpoint", |
0fab7955 | 6980 | ecs->ptid.to_string ().c_str ()); |
2adfaa28 | 6981 | } |
488f131b | 6982 | } |
af48d08f | 6983 | delete_just_stopped_threads_single_step_breakpoints (); |
c906108c | 6984 | |
1edb66d8 | 6985 | if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP |
963f9c80 PA |
6986 | && ecs->event_thread->control.trap_expected |
6987 | && ecs->event_thread->stepping_over_watchpoint) | |
d983da9c DJ |
6988 | stopped_by_watchpoint = 0; |
6989 | else | |
c272a98c | 6990 | stopped_by_watchpoint = watchpoints_triggered (ecs->ws); |
d983da9c DJ |
6991 | |
6992 | /* If necessary, step over this watchpoint. We'll be back to display | |
6993 | it in a moment. */ | |
6994 | if (stopped_by_watchpoint | |
9aed480c | 6995 | && (target_have_steppable_watchpoint () |
568d6575 | 6996 | || gdbarch_have_nonsteppable_watchpoint (gdbarch))) |
488f131b | 6997 | { |
488f131b | 6998 | /* At this point, we are stopped at an instruction which has |
dda83cd7 SM |
6999 | attempted to write to a piece of memory under control of |
7000 | a watchpoint. The instruction hasn't actually executed | |
7001 | yet. If we were to evaluate the watchpoint expression | |
7002 | now, we would get the old value, and therefore no change | |
7003 | would seem to have occurred. | |
7004 | ||
7005 | In order to make watchpoints work `right', we really need | |
7006 | to complete the memory write, and then evaluate the | |
7007 | watchpoint expression. We do this by single-stepping the | |
d983da9c DJ |
7008 | target. |
7009 | ||
7f89fd65 | 7010 | It may not be necessary to disable the watchpoint to step over |
d983da9c DJ |
7011 | it. For example, the PA can (with some kernel cooperation) |
7012 | single step over a watchpoint without disabling the watchpoint. | |
7013 | ||
7014 | It is far more common to need to disable a watchpoint to step | |
7015 | the inferior over it. If we have non-steppable watchpoints, | |
7016 | we must disable the current watchpoint; it's simplest to | |
963f9c80 PA |
7017 | disable all watchpoints. |
7018 | ||
7019 | Any breakpoint at PC must also be stepped over -- if there's | |
7020 | one, it will have already triggered before the watchpoint | |
7021 | triggered, and we either already reported it to the user, or | |
7022 | it didn't cause a stop and we called keep_going. In either | |
7023 | case, if there was a breakpoint at PC, we must be trying to | |
7024 | step past it. */ | |
7025 | ecs->event_thread->stepping_over_watchpoint = 1; | |
7026 | keep_going (ecs); | |
488f131b JB |
7027 | return; |
7028 | } | |
7029 | ||
4e1c45ea | 7030 | ecs->event_thread->stepping_over_breakpoint = 0; |
963f9c80 | 7031 | ecs->event_thread->stepping_over_watchpoint = 0; |
16c381f0 JK |
7032 | bpstat_clear (&ecs->event_thread->control.stop_bpstat); |
7033 | ecs->event_thread->control.stop_step = 0; | |
c4464ade | 7034 | stop_print_frame = true; |
488f131b | 7035 | stopped_by_random_signal = 0; |
313f3b21 | 7036 | bpstat *stop_chain = nullptr; |
488f131b | 7037 | |
edb3359d DJ |
7038 | /* Hide inlined functions starting here, unless we just performed stepi or |
7039 | nexti. After stepi and nexti, always show the innermost frame (not any | |
7040 | inline function call sites). */ | |
16c381f0 | 7041 | if (ecs->event_thread->control.step_range_end != 1) |
0574c78f | 7042 | { |
f9582a22 | 7043 | const address_space *aspace = ecs->event_thread->inf->aspace.get (); |
0574c78f GB |
7044 | |
7045 | /* skip_inline_frames is expensive, so we avoid it if we can | |
7046 | determine that the address is one where functions cannot have | |
7047 | been inlined. This improves performance with inferiors that | |
7048 | load a lot of shared libraries, because the solib event | |
7049 | breakpoint is defined as the address of a function (i.e. not | |
7050 | inline). Note that we have to check the previous PC as well | |
7051 | as the current one to catch cases when we have just | |
7052 | single-stepped off a breakpoint prior to reinstating it. | |
7053 | Note that we're assuming that the code we single-step to is | |
7054 | not inline, but that's not definitive: there's nothing | |
7055 | preventing the event breakpoint function from containing | |
7056 | inlined code, and the single-step ending up there. If the | |
7057 | user had set a breakpoint on that inlined code, the missing | |
7058 | skip_inline_frames call would break things. Fortunately | |
7059 | that's an extremely unlikely scenario. */ | |
f2ffa92b | 7060 | if (!pc_at_non_inline_function (aspace, |
1edb66d8 | 7061 | ecs->event_thread->stop_pc (), |
c272a98c | 7062 | ecs->ws) |
1edb66d8 | 7063 | && !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP |
a210c238 MR |
7064 | && ecs->event_thread->control.trap_expected |
7065 | && pc_at_non_inline_function (aspace, | |
7066 | ecs->event_thread->prev_pc, | |
c272a98c | 7067 | ecs->ws))) |
1c5a993e | 7068 | { |
f2ffa92b | 7069 | stop_chain = build_bpstat_chain (aspace, |
1edb66d8 | 7070 | ecs->event_thread->stop_pc (), |
c272a98c | 7071 | ecs->ws); |
00431a78 | 7072 | skip_inline_frames (ecs->event_thread, stop_chain); |
1c5a993e | 7073 | } |
0574c78f | 7074 | } |
edb3359d | 7075 | |
1edb66d8 | 7076 | if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP |
16c381f0 | 7077 | && ecs->event_thread->control.trap_expected |
568d6575 | 7078 | && gdbarch_single_step_through_delay_p (gdbarch) |
4e1c45ea | 7079 | && currently_stepping (ecs->event_thread)) |
3352ef37 | 7080 | { |
b50d7442 | 7081 | /* We're trying to step off a breakpoint. Turns out that we're |
3352ef37 | 7082 | also on an instruction that needs to be stepped multiple |
1777feb0 | 7083 | times before it's been fully executing. E.g., architectures |
3352ef37 AC |
7084 | with a delay slot. It needs to be stepped twice, once for |
7085 | the instruction and once for the delay slot. */ | |
7086 | int step_through_delay | |
568d6575 | 7087 | = gdbarch_single_step_through_delay (gdbarch, frame); |
abbb1732 | 7088 | |
1eb8556f SM |
7089 | if (step_through_delay) |
7090 | infrun_debug_printf ("step through delay"); | |
7091 | ||
16c381f0 JK |
7092 | if (ecs->event_thread->control.step_range_end == 0 |
7093 | && step_through_delay) | |
3352ef37 AC |
7094 | { |
7095 | /* The user issued a continue when stopped at a breakpoint. | |
7096 | Set up for another trap and get out of here. */ | |
dda83cd7 SM |
7097 | ecs->event_thread->stepping_over_breakpoint = 1; |
7098 | keep_going (ecs); | |
7099 | return; | |
3352ef37 AC |
7100 | } |
7101 | else if (step_through_delay) | |
7102 | { | |
7103 | /* The user issued a step when stopped at a breakpoint. | |
7104 | Maybe we should stop, maybe we should not - the delay | |
7105 | slot *might* correspond to a line of source. In any | |
ca67fcb8 VP |
7106 | case, don't decide that here, just set |
7107 | ecs->stepping_over_breakpoint, making sure we | |
7108 | single-step again before breakpoints are re-inserted. */ | |
4e1c45ea | 7109 | ecs->event_thread->stepping_over_breakpoint = 1; |
3352ef37 AC |
7110 | } |
7111 | } | |
7112 | ||
ab04a2af TT |
7113 | /* See if there is a breakpoint/watchpoint/catchpoint/etc. that |
7114 | handles this event. */ | |
7115 | ecs->event_thread->control.stop_bpstat | |
f9582a22 | 7116 | = bpstat_stop_status (ecs->event_thread->inf->aspace.get (), |
1edb66d8 | 7117 | ecs->event_thread->stop_pc (), |
c272a98c | 7118 | ecs->event_thread, ecs->ws, stop_chain); |
db82e815 | 7119 | |
ab04a2af TT |
7120 | /* Following in case break condition called a |
7121 | function. */ | |
c4464ade | 7122 | stop_print_frame = true; |
73dd234f | 7123 | |
ab04a2af TT |
7124 | /* This is where we handle "moribund" watchpoints. Unlike |
7125 | software breakpoints traps, hardware watchpoint traps are | |
7126 | always distinguishable from random traps. If no high-level | |
7127 | watchpoint is associated with the reported stop data address | |
7128 | anymore, then the bpstat does not explain the signal --- | |
7129 | simply make sure to ignore it if `stopped_by_watchpoint' is | |
7130 | set. */ | |
7131 | ||
1edb66d8 | 7132 | if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP |
47591c29 | 7133 | && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat, |
427cd150 | 7134 | GDB_SIGNAL_TRAP) |
ab04a2af | 7135 | && stopped_by_watchpoint) |
1eb8556f SM |
7136 | { |
7137 | infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, " | |
7138 | "ignoring"); | |
7139 | } | |
73dd234f | 7140 | |
bac7d97b | 7141 | /* NOTE: cagney/2003-03-29: These checks for a random signal |
ab04a2af TT |
7142 | at one stage in the past included checks for an inferior |
7143 | function call's call dummy's return breakpoint. The original | |
7144 | comment, that went with the test, read: | |
03cebad2 | 7145 | |
ab04a2af TT |
7146 | ``End of a stack dummy. Some systems (e.g. Sony news) give |
7147 | another signal besides SIGTRAP, so check here as well as | |
7148 | above.'' | |
73dd234f | 7149 | |
ab04a2af TT |
7150 | If someone ever tries to get call dummys on a |
7151 | non-executable stack to work (where the target would stop | |
7152 | with something like a SIGSEGV), then those tests might need | |
7153 | to be re-instated. Given, however, that the tests were only | |
7154 | enabled when momentary breakpoints were not being used, I | |
7155 | suspect that it won't be the case. | |
488f131b | 7156 | |
ab04a2af TT |
7157 | NOTE: kettenis/2004-02-05: Indeed such checks don't seem to |
7158 | be necessary for call dummies on a non-executable stack on | |
7159 | SPARC. */ | |
488f131b | 7160 | |
bac7d97b | 7161 | /* See if the breakpoints module can explain the signal. */ |
47591c29 PA |
7162 | random_signal |
7163 | = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat, | |
1edb66d8 | 7164 | ecs->event_thread->stop_signal ()); |
bac7d97b | 7165 | |
1cf4d951 PA |
7166 | /* Maybe this was a trap for a software breakpoint that has since |
7167 | been removed. */ | |
7168 | if (random_signal && target_stopped_by_sw_breakpoint ()) | |
7169 | { | |
5133a315 | 7170 | if (gdbarch_program_breakpoint_here_p (gdbarch, |
1edb66d8 | 7171 | ecs->event_thread->stop_pc ())) |
1cf4d951 PA |
7172 | { |
7173 | struct regcache *regcache; | |
7174 | int decr_pc; | |
7175 | ||
7176 | /* Re-adjust PC to what the program would see if GDB was not | |
7177 | debugging it. */ | |
00431a78 | 7178 | regcache = get_thread_regcache (ecs->event_thread); |
527a273a | 7179 | decr_pc = gdbarch_decr_pc_after_break (gdbarch); |
1cf4d951 PA |
7180 | if (decr_pc != 0) |
7181 | { | |
6b09f134 | 7182 | std::optional<scoped_restore_tmpl<int>> |
07036511 | 7183 | restore_operation_disable; |
1cf4d951 PA |
7184 | |
7185 | if (record_full_is_used ()) | |
07036511 TT |
7186 | restore_operation_disable.emplace |
7187 | (record_full_gdb_operation_disable_set ()); | |
1cf4d951 | 7188 | |
f2ffa92b | 7189 | regcache_write_pc (regcache, |
1edb66d8 | 7190 | ecs->event_thread->stop_pc () + decr_pc); |
1cf4d951 PA |
7191 | } |
7192 | } | |
7193 | else | |
7194 | { | |
7195 | /* A delayed software breakpoint event. Ignore the trap. */ | |
1eb8556f | 7196 | infrun_debug_printf ("delayed software breakpoint trap, ignoring"); |
1cf4d951 PA |
7197 | random_signal = 0; |
7198 | } | |
7199 | } | |
7200 | ||
7201 | /* Maybe this was a trap for a hardware breakpoint/watchpoint that | |
7202 | has since been removed. */ | |
7203 | if (random_signal && target_stopped_by_hw_breakpoint ()) | |
7204 | { | |
7205 | /* A delayed hardware breakpoint event. Ignore the trap. */ | |
1eb8556f SM |
7206 | infrun_debug_printf ("delayed hardware breakpoint/watchpoint " |
7207 | "trap, ignoring"); | |
1cf4d951 PA |
7208 | random_signal = 0; |
7209 | } | |
7210 | ||
bac7d97b PA |
7211 | /* If not, perhaps stepping/nexting can. */ |
7212 | if (random_signal) | |
1edb66d8 | 7213 | random_signal = !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP |
bac7d97b | 7214 | && currently_stepping (ecs->event_thread)); |
ab04a2af | 7215 | |
2adfaa28 PA |
7216 | /* Perhaps the thread hit a single-step breakpoint of _another_ |
7217 | thread. Single-step breakpoints are transparent to the | |
7218 | breakpoints module. */ | |
7219 | if (random_signal) | |
7220 | random_signal = !ecs->hit_singlestep_breakpoint; | |
7221 | ||
bac7d97b PA |
7222 | /* No? Perhaps we got a moribund watchpoint. */ |
7223 | if (random_signal) | |
7224 | random_signal = !stopped_by_watchpoint; | |
ab04a2af | 7225 | |
c65d6b55 PA |
7226 | /* Always stop if the user explicitly requested this thread to |
7227 | remain stopped. */ | |
7228 | if (ecs->event_thread->stop_requested) | |
7229 | { | |
7230 | random_signal = 1; | |
1eb8556f | 7231 | infrun_debug_printf ("user-requested stop"); |
c65d6b55 PA |
7232 | } |
7233 | ||
488f131b JB |
7234 | /* For the program's own signals, act according to |
7235 | the signal handling tables. */ | |
7236 | ||
ce12b012 | 7237 | if (random_signal) |
488f131b JB |
7238 | { |
7239 | /* Signal not for debugging purposes. */ | |
1edb66d8 | 7240 | enum gdb_signal stop_signal = ecs->event_thread->stop_signal (); |
488f131b | 7241 | |
1eb8556f SM |
7242 | infrun_debug_printf ("random signal (%s)", |
7243 | gdb_signal_to_symbol_string (stop_signal)); | |
527159b7 | 7244 | |
488f131b JB |
7245 | stopped_by_random_signal = 1; |
7246 | ||
252fbfc8 PA |
7247 | /* Always stop on signals if we're either just gaining control |
7248 | of the program, or the user explicitly requested this thread | |
7249 | to remain stopped. */ | |
d6b48e9c | 7250 | if (stop_soon != NO_STOP_QUIETLY |
252fbfc8 | 7251 | || ecs->event_thread->stop_requested |
1edb66d8 | 7252 | || signal_stop_state (ecs->event_thread->stop_signal ())) |
488f131b | 7253 | { |
22bcd14b | 7254 | stop_waiting (ecs); |
488f131b JB |
7255 | return; |
7256 | } | |
b57bacec PA |
7257 | |
7258 | /* Notify observers the signal has "handle print" set. Note we | |
7259 | returned early above if stopping; normal_stop handles the | |
7260 | printing in that case. */ | |
1edb66d8 | 7261 | if (signal_print[ecs->event_thread->stop_signal ()]) |
b57bacec PA |
7262 | { |
7263 | /* The signal table tells us to print about this signal. */ | |
223ffa71 | 7264 | target_terminal::ours_for_output (); |
3f75a984 | 7265 | notify_signal_received (ecs->event_thread->stop_signal ()); |
223ffa71 | 7266 | target_terminal::inferior (); |
b57bacec | 7267 | } |
488f131b JB |
7268 | |
7269 | /* Clear the signal if it should not be passed. */ | |
1edb66d8 SM |
7270 | if (signal_program[ecs->event_thread->stop_signal ()] == 0) |
7271 | ecs->event_thread->set_stop_signal (GDB_SIGNAL_0); | |
488f131b | 7272 | |
1edb66d8 | 7273 | if (ecs->event_thread->prev_pc == ecs->event_thread->stop_pc () |
16c381f0 | 7274 | && ecs->event_thread->control.trap_expected |
03acd4d8 | 7275 | && ecs->event_thread->control.step_resume_breakpoint == nullptr) |
68f53502 AC |
7276 | { |
7277 | /* We were just starting a new sequence, attempting to | |
7278 | single-step off of a breakpoint and expecting a SIGTRAP. | |
237fc4c9 | 7279 | Instead this signal arrives. This signal will take us out |
68f53502 AC |
7280 | of the stepping range so GDB needs to remember to, when |
7281 | the signal handler returns, resume stepping off that | |
7282 | breakpoint. */ | |
7283 | /* To simplify things, "continue" is forced to use the same | |
7284 | code paths as single-step - set a breakpoint at the | |
7285 | signal return address and then, once hit, step off that | |
7286 | breakpoint. */ | |
1eb8556f | 7287 | infrun_debug_printf ("signal arrived while stepping over breakpoint"); |
d3169d93 | 7288 | |
2c03e5be | 7289 | insert_hp_step_resume_breakpoint_at_frame (frame); |
4e1c45ea | 7290 | ecs->event_thread->step_after_step_resume_breakpoint = 1; |
2455069d UW |
7291 | /* Reset trap_expected to ensure breakpoints are re-inserted. */ |
7292 | ecs->event_thread->control.trap_expected = 0; | |
d137e6dc PA |
7293 | |
7294 | /* If we were nexting/stepping some other thread, switch to | |
7295 | it, so that we don't continue it, losing control. */ | |
7296 | if (!switch_back_to_stepped_thread (ecs)) | |
7297 | keep_going (ecs); | |
9d799f85 | 7298 | return; |
68f53502 | 7299 | } |
9d799f85 | 7300 | |
1edb66d8 SM |
7301 | if (ecs->event_thread->stop_signal () != GDB_SIGNAL_0 |
7302 | && (pc_in_thread_step_range (ecs->event_thread->stop_pc (), | |
f2ffa92b | 7303 | ecs->event_thread) |
e5f8a7cc | 7304 | || ecs->event_thread->control.step_range_end == 1) |
a0cbd650 TT |
7305 | && (get_stack_frame_id (frame) |
7306 | == ecs->event_thread->control.step_stack_frame_id) | |
03acd4d8 | 7307 | && ecs->event_thread->control.step_resume_breakpoint == nullptr) |
d303a6c7 AC |
7308 | { |
7309 | /* The inferior is about to take a signal that will take it | |
7310 | out of the single step range. Set a breakpoint at the | |
7311 | current PC (which is presumably where the signal handler | |
7312 | will eventually return) and then allow the inferior to | |
7313 | run free. | |
7314 | ||
7315 | Note that this is only needed for a signal delivered | |
7316 | while in the single-step range. Nested signals aren't a | |
7317 | problem as they eventually all return. */ | |
1eb8556f | 7318 | infrun_debug_printf ("signal may take us out of single-step range"); |
237fc4c9 | 7319 | |
372316f1 | 7320 | clear_step_over_info (); |
2c03e5be | 7321 | insert_hp_step_resume_breakpoint_at_frame (frame); |
e5f8a7cc | 7322 | ecs->event_thread->step_after_step_resume_breakpoint = 1; |
2455069d UW |
7323 | /* Reset trap_expected to ensure breakpoints are re-inserted. */ |
7324 | ecs->event_thread->control.trap_expected = 0; | |
9d799f85 AC |
7325 | keep_going (ecs); |
7326 | return; | |
d303a6c7 | 7327 | } |
9d799f85 | 7328 | |
85102364 | 7329 | /* Note: step_resume_breakpoint may be non-NULL. This occurs |
9d799f85 AC |
7330 | when either there's a nested signal, or when there's a |
7331 | pending signal enabled just as the signal handler returns | |
7332 | (leaving the inferior at the step-resume-breakpoint without | |
7333 | actually executing it). Either way continue until the | |
7334 | breakpoint is really hit. */ | |
c447ac0b PA |
7335 | |
7336 | if (!switch_back_to_stepped_thread (ecs)) | |
7337 | { | |
1eb8556f | 7338 | infrun_debug_printf ("random signal, keep going"); |
c447ac0b PA |
7339 | |
7340 | keep_going (ecs); | |
7341 | } | |
7342 | return; | |
488f131b | 7343 | } |
94c57d6a PA |
7344 | |
7345 | process_event_stop_test (ecs); | |
7346 | } | |
7347 | ||
fe6356de CL |
7348 | /* Return the address for the beginning of the line. */ |
7349 | ||
7350 | CORE_ADDR | |
7351 | update_line_range_start (CORE_ADDR pc, struct execution_control_state *ecs) | |
7352 | { | |
7353 | /* The line table may have multiple entries for the same source code line. | |
7354 | Given the PC, check the line table and return the PC that corresponds | |
7355 | to the line table entry for the source line that PC is in. */ | |
7356 | CORE_ADDR start_line_pc = ecs->event_thread->control.step_range_start; | |
7357 | std::optional<CORE_ADDR> real_range_start; | |
7358 | ||
7359 | /* Call find_line_range_start to get the smallest address in the | |
7360 | linetable for multiple Line X entries in the line table. */ | |
7361 | real_range_start = find_line_range_start (pc); | |
7362 | ||
7363 | if (real_range_start.has_value ()) | |
7364 | start_line_pc = *real_range_start; | |
7365 | ||
7366 | return start_line_pc; | |
7367 | } | |
7368 | ||
5266f5c2 LS |
7369 | namespace { |
7370 | ||
7371 | /* Helper class for process_event_stop_test implementing lazy evaluation. */ | |
7372 | template<typename T> | |
7373 | class lazy_loader | |
7374 | { | |
7375 | using fetcher_t = std::function<T ()>; | |
7376 | ||
7377 | public: | |
7378 | explicit lazy_loader (fetcher_t &&f) : m_loader (std::move (f)) | |
7379 | { } | |
7380 | ||
7381 | T &operator* () | |
7382 | { | |
7383 | if (!m_value.has_value ()) | |
7384 | m_value.emplace (m_loader ()); | |
7385 | return m_value.value (); | |
7386 | } | |
7387 | ||
7388 | T *operator-> () | |
7389 | { | |
7390 | return &**this; | |
7391 | } | |
7392 | ||
7393 | private: | |
7394 | std::optional<T> m_value; | |
7395 | fetcher_t m_loader; | |
7396 | }; | |
7397 | ||
7398 | } | |
7399 | ||
94c57d6a PA |
7400 | /* Come here when we've got some debug event / signal we can explain |
7401 | (IOW, not a random signal), and test whether it should cause a | |
7402 | stop, or whether we should resume the inferior (transparently). | |
7403 | E.g., could be a breakpoint whose condition evaluates false; we | |
7404 | could be still stepping within the line; etc. */ | |
7405 | ||
7406 | static void | |
7407 | process_event_stop_test (struct execution_control_state *ecs) | |
7408 | { | |
7409 | struct symtab_and_line stop_pc_sal; | |
bd2b40ac | 7410 | frame_info_ptr frame; |
94c57d6a | 7411 | struct gdbarch *gdbarch; |
cdaa5b73 PA |
7412 | CORE_ADDR jmp_buf_pc; |
7413 | struct bpstat_what what; | |
94c57d6a | 7414 | |
cdaa5b73 | 7415 | /* Handle cases caused by hitting a breakpoint. */ |
611c83ae | 7416 | |
cdaa5b73 PA |
7417 | frame = get_current_frame (); |
7418 | gdbarch = get_frame_arch (frame); | |
fcf3daef | 7419 | |
cdaa5b73 | 7420 | what = bpstat_what (ecs->event_thread->control.stop_bpstat); |
611c83ae | 7421 | |
cdaa5b73 PA |
7422 | if (what.call_dummy) |
7423 | { | |
7424 | stop_stack_dummy = what.call_dummy; | |
7425 | } | |
186c406b | 7426 | |
243a9253 PA |
7427 | /* A few breakpoint types have callbacks associated (e.g., |
7428 | bp_jit_event). Run them now. */ | |
7429 | bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat); | |
7430 | ||
bf2813af GL |
7431 | /* Shorthand to make if statements smaller. */ |
7432 | struct frame_id original_frame_id | |
7433 | = ecs->event_thread->control.step_frame_id; | |
5266f5c2 LS |
7434 | lazy_loader<frame_id> curr_frame_id |
7435 | ([] () { return get_frame_id (get_current_frame ()); }); | |
bf2813af | 7436 | |
cdaa5b73 PA |
7437 | switch (what.main_action) |
7438 | { | |
7439 | case BPSTAT_WHAT_SET_LONGJMP_RESUME: | |
7440 | /* If we hit the breakpoint at longjmp while stepping, we | |
7441 | install a momentary breakpoint at the target of the | |
7442 | jmp_buf. */ | |
186c406b | 7443 | |
1eb8556f | 7444 | infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME"); |
186c406b | 7445 | |
cdaa5b73 | 7446 | ecs->event_thread->stepping_over_breakpoint = 1; |
611c83ae | 7447 | |
cdaa5b73 PA |
7448 | if (what.is_longjmp) |
7449 | { | |
7450 | struct value *arg_value; | |
7451 | ||
7452 | /* If we set the longjmp breakpoint via a SystemTap probe, | |
7453 | then use it to extract the arguments. The destination PC | |
7454 | is the third argument to the probe. */ | |
7455 | arg_value = probe_safe_evaluate_at_pc (frame, 2); | |
7456 | if (arg_value) | |
8fa0c4f8 AA |
7457 | { |
7458 | jmp_buf_pc = value_as_address (arg_value); | |
7459 | jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc); | |
7460 | } | |
cdaa5b73 PA |
7461 | else if (!gdbarch_get_longjmp_target_p (gdbarch) |
7462 | || !gdbarch_get_longjmp_target (gdbarch, | |
7463 | frame, &jmp_buf_pc)) | |
e2e4d78b | 7464 | { |
1eb8556f SM |
7465 | infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME " |
7466 | "(!gdbarch_get_longjmp_target)"); | |
cdaa5b73 PA |
7467 | keep_going (ecs); |
7468 | return; | |
e2e4d78b | 7469 | } |
e2e4d78b | 7470 | |
cdaa5b73 PA |
7471 | /* Insert a breakpoint at resume address. */ |
7472 | insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc); | |
7473 | } | |
7474 | else | |
7475 | check_exception_resume (ecs, frame); | |
7476 | keep_going (ecs); | |
7477 | return; | |
e81a37f7 | 7478 | |
cdaa5b73 PA |
7479 | case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME: |
7480 | { | |
bd2b40ac | 7481 | frame_info_ptr init_frame; |
e81a37f7 | 7482 | |
cdaa5b73 | 7483 | /* There are several cases to consider. |
c906108c | 7484 | |
cdaa5b73 PA |
7485 | 1. The initiating frame no longer exists. In this case we |
7486 | must stop, because the exception or longjmp has gone too | |
7487 | far. | |
2c03e5be | 7488 | |
cdaa5b73 PA |
7489 | 2. The initiating frame exists, and is the same as the |
7490 | current frame. We stop, because the exception or longjmp | |
7491 | has been caught. | |
2c03e5be | 7492 | |
cdaa5b73 PA |
7493 | 3. The initiating frame exists and is different from the |
7494 | current frame. This means the exception or longjmp has | |
7495 | been caught beneath the initiating frame, so keep going. | |
c906108c | 7496 | |
cdaa5b73 PA |
7497 | 4. longjmp breakpoint has been placed just to protect |
7498 | against stale dummy frames and user is not interested in | |
7499 | stopping around longjmps. */ | |
c5aa993b | 7500 | |
1eb8556f | 7501 | infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME"); |
c5aa993b | 7502 | |
cdaa5b73 | 7503 | gdb_assert (ecs->event_thread->control.exception_resume_breakpoint |
03acd4d8 | 7504 | != nullptr); |
cdaa5b73 | 7505 | delete_exception_resume_breakpoint (ecs->event_thread); |
c5aa993b | 7506 | |
cdaa5b73 PA |
7507 | if (what.is_longjmp) |
7508 | { | |
b67a2c6f | 7509 | check_longjmp_breakpoint_for_call_dummy (ecs->event_thread); |
c5aa993b | 7510 | |
cdaa5b73 | 7511 | if (!frame_id_p (ecs->event_thread->initiating_frame)) |
e5ef252a | 7512 | { |
cdaa5b73 PA |
7513 | /* Case 4. */ |
7514 | keep_going (ecs); | |
7515 | return; | |
e5ef252a | 7516 | } |
cdaa5b73 | 7517 | } |
c5aa993b | 7518 | |
cdaa5b73 | 7519 | init_frame = frame_find_by_id (ecs->event_thread->initiating_frame); |
527159b7 | 7520 | |
cdaa5b73 PA |
7521 | if (init_frame) |
7522 | { | |
5266f5c2 | 7523 | if (*curr_frame_id == ecs->event_thread->initiating_frame) |
cdaa5b73 PA |
7524 | { |
7525 | /* Case 2. Fall through. */ | |
7526 | } | |
7527 | else | |
7528 | { | |
7529 | /* Case 3. */ | |
7530 | keep_going (ecs); | |
7531 | return; | |
7532 | } | |
68f53502 | 7533 | } |
488f131b | 7534 | |
cdaa5b73 PA |
7535 | /* For Cases 1 and 2, remove the step-resume breakpoint, if it |
7536 | exists. */ | |
7537 | delete_step_resume_breakpoint (ecs->event_thread); | |
e5ef252a | 7538 | |
bdc36728 | 7539 | end_stepping_range (ecs); |
cdaa5b73 PA |
7540 | } |
7541 | return; | |
e5ef252a | 7542 | |
cdaa5b73 | 7543 | case BPSTAT_WHAT_SINGLE: |
1eb8556f | 7544 | infrun_debug_printf ("BPSTAT_WHAT_SINGLE"); |
cdaa5b73 PA |
7545 | ecs->event_thread->stepping_over_breakpoint = 1; |
7546 | /* Still need to check other stuff, at least the case where we | |
7547 | are stepping and step out of the right range. */ | |
7548 | break; | |
e5ef252a | 7549 | |
cdaa5b73 | 7550 | case BPSTAT_WHAT_STEP_RESUME: |
1eb8556f | 7551 | infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME"); |
b22548dd | 7552 | |
b986eec5 CL |
7553 | delete_step_resume_breakpoint (ecs->event_thread); |
7554 | if (ecs->event_thread->control.proceed_to_finish | |
7555 | && execution_direction == EXEC_REVERSE) | |
cdaa5b73 PA |
7556 | { |
7557 | struct thread_info *tp = ecs->event_thread; | |
b22548dd | 7558 | |
b986eec5 CL |
7559 | /* We are finishing a function in reverse, and just hit the |
7560 | step-resume breakpoint at the start address of the | |
7561 | function, and we're almost there -- just need to back up | |
7562 | by one more single-step, which should take us back to the | |
7563 | function call. */ | |
7564 | tp->control.step_range_start = tp->control.step_range_end = 1; | |
7565 | keep_going (ecs); | |
7566 | return; | |
7567 | } | |
7568 | fill_in_stop_func (gdbarch, ecs); | |
7569 | if (ecs->event_thread->stop_pc () == ecs->stop_func_start | |
7570 | && execution_direction == EXEC_REVERSE) | |
7571 | { | |
7572 | /* We are stepping over a function call in reverse, and just | |
7573 | hit the step-resume breakpoint at the start address of | |
7574 | the function. Go back to single-stepping, which should | |
7575 | take us back to the function call. */ | |
7576 | ecs->event_thread->stepping_over_breakpoint = 1; | |
cdaa5b73 PA |
7577 | keep_going (ecs); |
7578 | return; | |
7579 | } | |
7580 | break; | |
e5ef252a | 7581 | |
cdaa5b73 | 7582 | case BPSTAT_WHAT_STOP_NOISY: |
1eb8556f | 7583 | infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY"); |
c4464ade | 7584 | stop_print_frame = true; |
e5ef252a | 7585 | |
33bf4c5c | 7586 | /* Assume the thread stopped for a breakpoint. We'll still check |
99619bea PA |
7587 | whether a/the breakpoint is there when the thread is next |
7588 | resumed. */ | |
7589 | ecs->event_thread->stepping_over_breakpoint = 1; | |
e5ef252a | 7590 | |
22bcd14b | 7591 | stop_waiting (ecs); |
cdaa5b73 | 7592 | return; |
e5ef252a | 7593 | |
cdaa5b73 | 7594 | case BPSTAT_WHAT_STOP_SILENT: |
1eb8556f | 7595 | infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT"); |
c4464ade | 7596 | stop_print_frame = false; |
e5ef252a | 7597 | |
33bf4c5c | 7598 | /* Assume the thread stopped for a breakpoint. We'll still check |
99619bea PA |
7599 | whether a/the breakpoint is there when the thread is next |
7600 | resumed. */ | |
7601 | ecs->event_thread->stepping_over_breakpoint = 1; | |
22bcd14b | 7602 | stop_waiting (ecs); |
cdaa5b73 PA |
7603 | return; |
7604 | ||
7605 | case BPSTAT_WHAT_HP_STEP_RESUME: | |
1eb8556f | 7606 | infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME"); |
cdaa5b73 PA |
7607 | |
7608 | delete_step_resume_breakpoint (ecs->event_thread); | |
7609 | if (ecs->event_thread->step_after_step_resume_breakpoint) | |
7610 | { | |
7611 | /* Back when the step-resume breakpoint was inserted, we | |
7612 | were trying to single-step off a breakpoint. Go back to | |
7613 | doing that. */ | |
7614 | ecs->event_thread->step_after_step_resume_breakpoint = 0; | |
7615 | ecs->event_thread->stepping_over_breakpoint = 1; | |
7616 | keep_going (ecs); | |
7617 | return; | |
e5ef252a | 7618 | } |
cdaa5b73 PA |
7619 | break; |
7620 | ||
7621 | case BPSTAT_WHAT_KEEP_CHECKING: | |
7622 | break; | |
e5ef252a | 7623 | } |
c906108c | 7624 | |
af48d08f PA |
7625 | /* If we stepped a permanent breakpoint and we had a high priority |
7626 | step-resume breakpoint for the address we stepped, but we didn't | |
7627 | hit it, then we must have stepped into the signal handler. The | |
7628 | step-resume was only necessary to catch the case of _not_ | |
7629 | stepping into the handler, so delete it, and fall through to | |
7630 | checking whether the step finished. */ | |
7631 | if (ecs->event_thread->stepped_breakpoint) | |
7632 | { | |
7633 | struct breakpoint *sr_bp | |
7634 | = ecs->event_thread->control.step_resume_breakpoint; | |
7635 | ||
03acd4d8 | 7636 | if (sr_bp != nullptr |
f5951b9f | 7637 | && sr_bp->first_loc ().permanent |
af48d08f | 7638 | && sr_bp->type == bp_hp_step_resume |
f5951b9f | 7639 | && sr_bp->first_loc ().address == ecs->event_thread->prev_pc) |
af48d08f | 7640 | { |
1eb8556f | 7641 | infrun_debug_printf ("stepped permanent breakpoint, stopped in handler"); |
af48d08f PA |
7642 | delete_step_resume_breakpoint (ecs->event_thread); |
7643 | ecs->event_thread->step_after_step_resume_breakpoint = 0; | |
7644 | } | |
7645 | } | |
7646 | ||
cdaa5b73 PA |
7647 | /* We come here if we hit a breakpoint but should not stop for it. |
7648 | Possibly we also were stepping and should stop for that. So fall | |
7649 | through and test for stepping. But, if not stepping, do not | |
7650 | stop. */ | |
c906108c | 7651 | |
a7212384 UW |
7652 | /* In all-stop mode, if we're currently stepping but have stopped in |
7653 | some other thread, we need to switch back to the stepped thread. */ | |
c447ac0b PA |
7654 | if (switch_back_to_stepped_thread (ecs)) |
7655 | return; | |
776f04fa | 7656 | |
8358c15c | 7657 | if (ecs->event_thread->control.step_resume_breakpoint) |
488f131b | 7658 | { |
1eb8556f | 7659 | infrun_debug_printf ("step-resume breakpoint is inserted"); |
527159b7 | 7660 | |
488f131b | 7661 | /* Having a step-resume breakpoint overrides anything |
dda83cd7 SM |
7662 | else having to do with stepping commands until |
7663 | that breakpoint is reached. */ | |
488f131b JB |
7664 | keep_going (ecs); |
7665 | return; | |
7666 | } | |
c5aa993b | 7667 | |
16c381f0 | 7668 | if (ecs->event_thread->control.step_range_end == 0) |
488f131b | 7669 | { |
1eb8556f | 7670 | infrun_debug_printf ("no stepping, continue"); |
488f131b | 7671 | /* Likewise if we aren't even stepping. */ |
488f131b JB |
7672 | keep_going (ecs); |
7673 | return; | |
7674 | } | |
c5aa993b | 7675 | |
7e324e48 | 7676 | fill_in_stop_func (gdbarch, ecs); |
4b7703ad | 7677 | |
488f131b | 7678 | /* If stepping through a line, keep going if still within it. |
c906108c | 7679 | |
488f131b JB |
7680 | Note that step_range_end is the address of the first instruction |
7681 | beyond the step range, and NOT the address of the last instruction | |
31410e84 MS |
7682 | within it! |
7683 | ||
7684 | Note also that during reverse execution, we may be stepping | |
7685 | through a function epilogue and therefore must detect when | |
7686 | the current-frame changes in the middle of a line. */ | |
7687 | ||
1edb66d8 | 7688 | if (pc_in_thread_step_range (ecs->event_thread->stop_pc (), |
f2ffa92b | 7689 | ecs->event_thread) |
31410e84 | 7690 | && (execution_direction != EXEC_REVERSE |
5266f5c2 | 7691 | || *curr_frame_id == original_frame_id)) |
488f131b | 7692 | { |
1eb8556f SM |
7693 | infrun_debug_printf |
7694 | ("stepping inside range [%s-%s]", | |
7695 | paddress (gdbarch, ecs->event_thread->control.step_range_start), | |
7696 | paddress (gdbarch, ecs->event_thread->control.step_range_end)); | |
b2175913 | 7697 | |
c1e36e3e PA |
7698 | /* Tentatively re-enable range stepping; `resume' disables it if |
7699 | necessary (e.g., if we're stepping over a breakpoint or we | |
7700 | have software watchpoints). */ | |
7701 | ecs->event_thread->control.may_range_step = 1; | |
7702 | ||
b2175913 MS |
7703 | /* When stepping backward, stop at beginning of line range |
7704 | (unless it's the function entry point, in which case | |
7705 | keep going back to the call point). */ | |
1edb66d8 | 7706 | CORE_ADDR stop_pc = ecs->event_thread->stop_pc (); |
16c381f0 | 7707 | if (stop_pc == ecs->event_thread->control.step_range_start |
15d2b36c | 7708 | && stop_pc != ecs->stop_func_start |
b2175913 | 7709 | && execution_direction == EXEC_REVERSE) |
bdc36728 | 7710 | end_stepping_range (ecs); |
b2175913 MS |
7711 | else |
7712 | keep_going (ecs); | |
7713 | ||
488f131b JB |
7714 | return; |
7715 | } | |
c5aa993b | 7716 | |
488f131b | 7717 | /* We stepped out of the stepping range. */ |
c906108c | 7718 | |
488f131b | 7719 | /* If we are stepping at the source level and entered the runtime |
388a8562 MS |
7720 | loader dynamic symbol resolution code... |
7721 | ||
7722 | EXEC_FORWARD: we keep on single stepping until we exit the run | |
7723 | time loader code and reach the callee's address. | |
7724 | ||
7725 | EXEC_REVERSE: we've already executed the callee (backward), and | |
7726 | the runtime loader code is handled just like any other | |
7727 | undebuggable function call. Now we need only keep stepping | |
7728 | backward through the trampoline code, and that's handled further | |
7729 | down, so there is nothing for us to do here. */ | |
7730 | ||
7731 | if (execution_direction != EXEC_REVERSE | |
16c381f0 | 7732 | && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE |
be6276e0 | 7733 | && in_solib_dynsym_resolve_code (ecs->event_thread->stop_pc ()) |
bafcc335 LS |
7734 | && (ecs->event_thread->control.step_start_function == nullptr |
7735 | || !in_solib_dynsym_resolve_code ( | |
7736 | ecs->event_thread->control.step_start_function->value_block () | |
7737 | ->entry_pc ()))) | |
488f131b | 7738 | { |
4c8c40e6 | 7739 | CORE_ADDR pc_after_resolver = |
1edb66d8 | 7740 | gdbarch_skip_solib_resolver (gdbarch, ecs->event_thread->stop_pc ()); |
c906108c | 7741 | |
1eb8556f | 7742 | infrun_debug_printf ("stepped into dynsym resolve code"); |
527159b7 | 7743 | |
488f131b JB |
7744 | if (pc_after_resolver) |
7745 | { | |
7746 | /* Set up a step-resume breakpoint at the address | |
7747 | indicated by SKIP_SOLIB_RESOLVER. */ | |
51abb421 | 7748 | symtab_and_line sr_sal; |
488f131b | 7749 | sr_sal.pc = pc_after_resolver; |
6c95b8df | 7750 | sr_sal.pspace = get_frame_program_space (frame); |
488f131b | 7751 | |
a6d9a66e UW |
7752 | insert_step_resume_breakpoint_at_sal (gdbarch, |
7753 | sr_sal, null_frame_id); | |
c5aa993b | 7754 | } |
c906108c | 7755 | |
488f131b JB |
7756 | keep_going (ecs); |
7757 | return; | |
7758 | } | |
c906108c | 7759 | |
1d509aa6 MM |
7760 | /* Step through an indirect branch thunk. */ |
7761 | if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE | |
f2ffa92b | 7762 | && gdbarch_in_indirect_branch_thunk (gdbarch, |
1edb66d8 | 7763 | ecs->event_thread->stop_pc ())) |
1d509aa6 | 7764 | { |
1eb8556f | 7765 | infrun_debug_printf ("stepped into indirect branch thunk"); |
1d509aa6 MM |
7766 | keep_going (ecs); |
7767 | return; | |
7768 | } | |
7769 | ||
16c381f0 JK |
7770 | if (ecs->event_thread->control.step_range_end != 1 |
7771 | && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE | |
7772 | || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL) | |
568d6575 | 7773 | && get_frame_type (frame) == SIGTRAMP_FRAME) |
488f131b | 7774 | { |
1eb8556f | 7775 | infrun_debug_printf ("stepped into signal trampoline"); |
42edda50 | 7776 | /* The inferior, while doing a "step" or "next", has ended up in |
dda83cd7 SM |
7777 | a signal trampoline (either by a signal being delivered or by |
7778 | the signal handler returning). Just single-step until the | |
7779 | inferior leaves the trampoline (either by calling the handler | |
7780 | or returning). */ | |
488f131b JB |
7781 | keep_going (ecs); |
7782 | return; | |
7783 | } | |
c906108c | 7784 | |
14132e89 MR |
7785 | /* If we're in the return path from a shared library trampoline, |
7786 | we want to proceed through the trampoline when stepping. */ | |
7787 | /* macro/2012-04-25: This needs to come before the subroutine | |
7788 | call check below as on some targets return trampolines look | |
7789 | like subroutine calls (MIPS16 return thunks). */ | |
7790 | if (gdbarch_in_solib_return_trampoline (gdbarch, | |
1edb66d8 | 7791 | ecs->event_thread->stop_pc (), |
f2ffa92b | 7792 | ecs->stop_func_name) |
14132e89 MR |
7793 | && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE) |
7794 | { | |
7795 | /* Determine where this trampoline returns. */ | |
1edb66d8 | 7796 | CORE_ADDR stop_pc = ecs->event_thread->stop_pc (); |
f2ffa92b PA |
7797 | CORE_ADDR real_stop_pc |
7798 | = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc); | |
14132e89 | 7799 | |
1eb8556f | 7800 | infrun_debug_printf ("stepped into solib return tramp"); |
14132e89 MR |
7801 | |
7802 | /* Only proceed through if we know where it's going. */ | |
7803 | if (real_stop_pc) | |
7804 | { | |
7805 | /* And put the step-breakpoint there and go until there. */ | |
51abb421 | 7806 | symtab_and_line sr_sal; |
14132e89 MR |
7807 | sr_sal.pc = real_stop_pc; |
7808 | sr_sal.section = find_pc_overlay (sr_sal.pc); | |
7809 | sr_sal.pspace = get_frame_program_space (frame); | |
7810 | ||
7811 | /* Do not specify what the fp should be when we stop since | |
7812 | on some machines the prologue is where the new fp value | |
7813 | is established. */ | |
7814 | insert_step_resume_breakpoint_at_sal (gdbarch, | |
7815 | sr_sal, null_frame_id); | |
7816 | ||
7817 | /* Restart without fiddling with the step ranges or | |
7818 | other state. */ | |
7819 | keep_going (ecs); | |
7820 | return; | |
7821 | } | |
7822 | } | |
7823 | ||
c17eaafe DJ |
7824 | /* Check for subroutine calls. The check for the current frame |
7825 | equalling the step ID is not necessary - the check of the | |
7826 | previous frame's ID is sufficient - but it is a common case and | |
7827 | cheaper than checking the previous frame's ID. | |
14e60db5 | 7828 | |
a0cbd650 | 7829 | NOTE: frame_id::operator== will never report two invalid frame IDs as |
14e60db5 DJ |
7830 | being equal, so to get into this block, both the current and |
7831 | previous frame must have valid frame IDs. */ | |
005ca36a JB |
7832 | /* The outer_frame_id check is a heuristic to detect stepping |
7833 | through startup code. If we step over an instruction which | |
7834 | sets the stack pointer from an invalid value to a valid value, | |
7835 | we may detect that as a subroutine call from the mythical | |
7836 | "outermost" function. This could be fixed by marking | |
7837 | outermost frames as !stack_p,code_p,special_p. Then the | |
7838 | initial outermost frame, before sp was valid, would | |
a0cbd650 | 7839 | have code_addr == &_start. See the comment in frame_id::operator== |
005ca36a | 7840 | for more. */ |
1bd70cb9 CL |
7841 | |
7842 | /* We want "nexti" to step into, not over, signal handlers invoked | |
7843 | by the kernel, therefore this subroutine check should not trigger | |
7844 | for a signal handler invocation. On most platforms, this is already | |
7845 | not the case, as the kernel puts a signal trampoline frame onto the | |
7846 | stack to handle proper return after the handler, and therefore at this | |
7847 | point, the current frame is a grandchild of the step frame, not a | |
7848 | child. However, on some platforms, the kernel actually uses a | |
7849 | trampoline to handle *invocation* of the handler. In that case, | |
7850 | when executing the first instruction of the trampoline, this check | |
7851 | would erroneously detect the trampoline invocation as a subroutine | |
7852 | call. Fix this by checking for SIGTRAMP_FRAME. */ | |
a0cbd650 TT |
7853 | if ((get_stack_frame_id (frame) |
7854 | != ecs->event_thread->control.step_stack_frame_id) | |
1bd70cb9 | 7855 | && get_frame_type (frame) != SIGTRAMP_FRAME |
354f8d0a | 7856 | && ((frame_unwind_caller_id (frame) |
a0cbd650 TT |
7857 | == ecs->event_thread->control.step_stack_frame_id) |
7858 | && ((ecs->event_thread->control.step_stack_frame_id | |
7859 | != outer_frame_id) | |
885eeb5b | 7860 | || (ecs->event_thread->control.step_start_function |
1edb66d8 | 7861 | != find_pc_function (ecs->event_thread->stop_pc ()))))) |
488f131b | 7862 | { |
1edb66d8 | 7863 | CORE_ADDR stop_pc = ecs->event_thread->stop_pc (); |
95918acb | 7864 | CORE_ADDR real_stop_pc; |
8fb3e588 | 7865 | |
1eb8556f | 7866 | infrun_debug_printf ("stepped into subroutine"); |
527159b7 | 7867 | |
b7a084be | 7868 | if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE) |
95918acb AC |
7869 | { |
7870 | /* I presume that step_over_calls is only 0 when we're | |
7871 | supposed to be stepping at the assembly language level | |
7872 | ("stepi"). Just stop. */ | |
388a8562 | 7873 | /* And this works the same backward as frontward. MVS */ |
bdc36728 | 7874 | end_stepping_range (ecs); |
95918acb AC |
7875 | return; |
7876 | } | |
8fb3e588 | 7877 | |
388a8562 MS |
7878 | /* Reverse stepping through solib trampolines. */ |
7879 | ||
7880 | if (execution_direction == EXEC_REVERSE | |
16c381f0 | 7881 | && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE |
388a8562 MS |
7882 | && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc) |
7883 | || (ecs->stop_func_start == 0 | |
7884 | && in_solib_dynsym_resolve_code (stop_pc)))) | |
7885 | { | |
7886 | /* Any solib trampoline code can be handled in reverse | |
7887 | by simply continuing to single-step. We have already | |
7888 | executed the solib function (backwards), and a few | |
7889 | steps will take us back through the trampoline to the | |
7890 | caller. */ | |
7891 | keep_going (ecs); | |
7892 | return; | |
7893 | } | |
7894 | ||
16c381f0 | 7895 | if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL) |
8567c30f | 7896 | { |
b2175913 MS |
7897 | /* We're doing a "next". |
7898 | ||
7899 | Normal (forward) execution: set a breakpoint at the | |
7900 | callee's return address (the address at which the caller | |
7901 | will resume). | |
7902 | ||
7903 | Reverse (backward) execution. set the step-resume | |
7904 | breakpoint at the start of the function that we just | |
7905 | stepped into (backwards), and continue to there. When we | |
6130d0b7 | 7906 | get there, we'll need to single-step back to the caller. */ |
b2175913 MS |
7907 | |
7908 | if (execution_direction == EXEC_REVERSE) | |
7909 | { | |
acf9414f JK |
7910 | /* If we're already at the start of the function, we've either |
7911 | just stepped backward into a single instruction function, | |
7912 | or stepped back out of a signal handler to the first instruction | |
7913 | of the function. Just keep going, which will single-step back | |
7914 | to the caller. */ | |
58c48e72 | 7915 | if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0) |
acf9414f | 7916 | { |
acf9414f | 7917 | /* Normal function call return (static or dynamic). */ |
51abb421 | 7918 | symtab_and_line sr_sal; |
acf9414f JK |
7919 | sr_sal.pc = ecs->stop_func_start; |
7920 | sr_sal.pspace = get_frame_program_space (frame); | |
7921 | insert_step_resume_breakpoint_at_sal (gdbarch, | |
1f3e37e0 | 7922 | sr_sal, get_stack_frame_id (frame)); |
acf9414f | 7923 | } |
b2175913 MS |
7924 | } |
7925 | else | |
568d6575 | 7926 | insert_step_resume_breakpoint_at_caller (frame); |
b2175913 | 7927 | |
8567c30f AC |
7928 | keep_going (ecs); |
7929 | return; | |
7930 | } | |
a53c66de | 7931 | |
95918acb | 7932 | /* If we are in a function call trampoline (a stub between the |
dda83cd7 SM |
7933 | calling routine and the real function), locate the real |
7934 | function. That's what tells us (a) whether we want to step | |
7935 | into it at all, and (b) what prologue we want to run to the | |
7936 | end of, if we do step into it. */ | |
568d6575 | 7937 | real_stop_pc = skip_language_trampoline (frame, stop_pc); |
95918acb | 7938 | if (real_stop_pc == 0) |
568d6575 | 7939 | real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc); |
95918acb AC |
7940 | if (real_stop_pc != 0) |
7941 | ecs->stop_func_start = real_stop_pc; | |
8fb3e588 | 7942 | |
db5f024e | 7943 | if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc)) |
1b2bfbb9 | 7944 | { |
51abb421 | 7945 | symtab_and_line sr_sal; |
1b2bfbb9 | 7946 | sr_sal.pc = ecs->stop_func_start; |
6c95b8df | 7947 | sr_sal.pspace = get_frame_program_space (frame); |
1b2bfbb9 | 7948 | |
a6d9a66e UW |
7949 | insert_step_resume_breakpoint_at_sal (gdbarch, |
7950 | sr_sal, null_frame_id); | |
8fb3e588 AC |
7951 | keep_going (ecs); |
7952 | return; | |
1b2bfbb9 RC |
7953 | } |
7954 | ||
95918acb | 7955 | /* If we have line number information for the function we are |
1bfeeb0f JL |
7956 | thinking of stepping into and the function isn't on the skip |
7957 | list, step into it. | |
95918acb | 7958 | |
dda83cd7 SM |
7959 | If there are several symtabs at that PC (e.g. with include |
7960 | files), just want to know whether *any* of them have line | |
7961 | numbers. find_pc_line handles this. */ | |
95918acb AC |
7962 | { |
7963 | struct symtab_and_line tmp_sal; | |
8fb3e588 | 7964 | |
95918acb | 7965 | tmp_sal = find_pc_line (ecs->stop_func_start, 0); |
2b914b52 | 7966 | if (tmp_sal.line != 0 |
85817405 | 7967 | && !function_name_is_marked_for_skip (ecs->stop_func_name, |
4a4c04f1 BE |
7968 | tmp_sal) |
7969 | && !inline_frame_is_marked_for_skip (true, ecs->event_thread)) | |
95918acb | 7970 | { |
b2175913 | 7971 | if (execution_direction == EXEC_REVERSE) |
568d6575 | 7972 | handle_step_into_function_backward (gdbarch, ecs); |
b2175913 | 7973 | else |
568d6575 | 7974 | handle_step_into_function (gdbarch, ecs); |
95918acb AC |
7975 | return; |
7976 | } | |
7977 | } | |
7978 | ||
7979 | /* If we have no line number and the step-stop-if-no-debug is | |
dda83cd7 SM |
7980 | set, we stop the step so that the user has a chance to switch |
7981 | in assembly mode. */ | |
16c381f0 | 7982 | if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE |
078130d0 | 7983 | && step_stop_if_no_debug) |
95918acb | 7984 | { |
bdc36728 | 7985 | end_stepping_range (ecs); |
95918acb AC |
7986 | return; |
7987 | } | |
7988 | ||
b2175913 MS |
7989 | if (execution_direction == EXEC_REVERSE) |
7990 | { | |
acf9414f JK |
7991 | /* If we're already at the start of the function, we've either just |
7992 | stepped backward into a single instruction function without line | |
7993 | number info, or stepped back out of a signal handler to the first | |
7994 | instruction of the function without line number info. Just keep | |
7995 | going, which will single-step back to the caller. */ | |
7996 | if (ecs->stop_func_start != stop_pc) | |
7997 | { | |
7998 | /* Set a breakpoint at callee's start address. | |
7999 | From there we can step once and be back in the caller. */ | |
51abb421 | 8000 | symtab_and_line sr_sal; |
acf9414f JK |
8001 | sr_sal.pc = ecs->stop_func_start; |
8002 | sr_sal.pspace = get_frame_program_space (frame); | |
8003 | insert_step_resume_breakpoint_at_sal (gdbarch, | |
8004 | sr_sal, null_frame_id); | |
8005 | } | |
b2175913 MS |
8006 | } |
8007 | else | |
8008 | /* Set a breakpoint at callee's return address (the address | |
8009 | at which the caller will resume). */ | |
568d6575 | 8010 | insert_step_resume_breakpoint_at_caller (frame); |
b2175913 | 8011 | |
95918acb | 8012 | keep_going (ecs); |
488f131b | 8013 | return; |
488f131b | 8014 | } |
c906108c | 8015 | |
fdd654f3 MS |
8016 | /* Reverse stepping through solib trampolines. */ |
8017 | ||
8018 | if (execution_direction == EXEC_REVERSE | |
16c381f0 | 8019 | && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE) |
fdd654f3 | 8020 | { |
1edb66d8 | 8021 | CORE_ADDR stop_pc = ecs->event_thread->stop_pc (); |
f2ffa92b | 8022 | |
fdd654f3 MS |
8023 | if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc) |
8024 | || (ecs->stop_func_start == 0 | |
8025 | && in_solib_dynsym_resolve_code (stop_pc))) | |
8026 | { | |
8027 | /* Any solib trampoline code can be handled in reverse | |
8028 | by simply continuing to single-step. We have already | |
8029 | executed the solib function (backwards), and a few | |
8030 | steps will take us back through the trampoline to the | |
8031 | caller. */ | |
8032 | keep_going (ecs); | |
8033 | return; | |
8034 | } | |
8035 | else if (in_solib_dynsym_resolve_code (stop_pc)) | |
8036 | { | |
8037 | /* Stepped backward into the solib dynsym resolver. | |
8038 | Set a breakpoint at its start and continue, then | |
8039 | one more step will take us out. */ | |
51abb421 | 8040 | symtab_and_line sr_sal; |
fdd654f3 | 8041 | sr_sal.pc = ecs->stop_func_start; |
9d1807c3 | 8042 | sr_sal.pspace = get_frame_program_space (frame); |
fdd654f3 MS |
8043 | insert_step_resume_breakpoint_at_sal (gdbarch, |
8044 | sr_sal, null_frame_id); | |
8045 | keep_going (ecs); | |
8046 | return; | |
8047 | } | |
8048 | } | |
8049 | ||
8c95582d AB |
8050 | /* This always returns the sal for the inner-most frame when we are in a |
8051 | stack of inlined frames, even if GDB actually believes that it is in a | |
8052 | more outer frame. This is checked for below by calls to | |
8053 | inline_skipped_frames. */ | |
1edb66d8 | 8054 | stop_pc_sal = find_pc_line (ecs->event_thread->stop_pc (), 0); |
7ed0fe66 | 8055 | |
1b2bfbb9 RC |
8056 | /* NOTE: tausq/2004-05-24: This if block used to be done before all |
8057 | the trampoline processing logic, however, there are some trampolines | |
8058 | that have no names, so we should do trampoline handling first. */ | |
16c381f0 | 8059 | if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE |
03acd4d8 | 8060 | && ecs->stop_func_name == nullptr |
2afb61aa | 8061 | && stop_pc_sal.line == 0) |
1b2bfbb9 | 8062 | { |
1eb8556f | 8063 | infrun_debug_printf ("stepped into undebuggable function"); |
527159b7 | 8064 | |
1b2bfbb9 | 8065 | /* The inferior just stepped into, or returned to, an |
dda83cd7 SM |
8066 | undebuggable function (where there is no debugging information |
8067 | and no line number corresponding to the address where the | |
8068 | inferior stopped). Since we want to skip this kind of code, | |
8069 | we keep going until the inferior returns from this | |
8070 | function - unless the user has asked us not to (via | |
8071 | set step-mode) or we no longer know how to get back | |
8072 | to the call site. */ | |
14e60db5 | 8073 | if (step_stop_if_no_debug |
c7ce8faa | 8074 | || !frame_id_p (frame_unwind_caller_id (frame))) |
1b2bfbb9 RC |
8075 | { |
8076 | /* If we have no line number and the step-stop-if-no-debug | |
8077 | is set, we stop the step so that the user has a chance to | |
8078 | switch in assembly mode. */ | |
bdc36728 | 8079 | end_stepping_range (ecs); |
1b2bfbb9 RC |
8080 | return; |
8081 | } | |
8082 | else | |
8083 | { | |
8084 | /* Set a breakpoint at callee's return address (the address | |
8085 | at which the caller will resume). */ | |
568d6575 | 8086 | insert_step_resume_breakpoint_at_caller (frame); |
1b2bfbb9 RC |
8087 | keep_going (ecs); |
8088 | return; | |
8089 | } | |
8090 | } | |
8091 | ||
2a8339b7 CL |
8092 | if (execution_direction == EXEC_REVERSE |
8093 | && ecs->event_thread->control.proceed_to_finish | |
8094 | && ecs->event_thread->stop_pc () >= ecs->stop_func_alt_start | |
8095 | && ecs->event_thread->stop_pc () < ecs->stop_func_start) | |
8096 | { | |
8097 | /* We are executing the reverse-finish command. | |
8098 | If the system supports multiple entry points and we are finishing a | |
3bfdcabb | 8099 | function in reverse. If we are between the entry points single-step |
2a8339b7 CL |
8100 | back to the alternate entry point. If we are at the alternate entry |
8101 | point -- just need to back up by one more single-step, which | |
8102 | should take us back to the function call. */ | |
8103 | ecs->event_thread->control.step_range_start | |
8104 | = ecs->event_thread->control.step_range_end = 1; | |
8105 | keep_going (ecs); | |
8106 | return; | |
8107 | ||
8108 | } | |
8109 | ||
16c381f0 | 8110 | if (ecs->event_thread->control.step_range_end == 1) |
1b2bfbb9 RC |
8111 | { |
8112 | /* It is stepi or nexti. We always want to stop stepping after | |
dda83cd7 | 8113 | one instruction. */ |
1eb8556f | 8114 | infrun_debug_printf ("stepi/nexti"); |
bdc36728 | 8115 | end_stepping_range (ecs); |
1b2bfbb9 RC |
8116 | return; |
8117 | } | |
8118 | ||
2afb61aa | 8119 | if (stop_pc_sal.line == 0) |
488f131b JB |
8120 | { |
8121 | /* We have no line number information. That means to stop | |
dda83cd7 SM |
8122 | stepping (does this always happen right after one instruction, |
8123 | when we do "s" in a function with no line numbers, | |
8124 | or can this happen as a result of a return or longjmp?). */ | |
1eb8556f | 8125 | infrun_debug_printf ("line number info"); |
bdc36728 | 8126 | end_stepping_range (ecs); |
488f131b JB |
8127 | return; |
8128 | } | |
c906108c | 8129 | |
5d9887ff BE |
8130 | /* Handle the case when subroutines have multiple ranges. When we step |
8131 | from one part to the next part of the same subroutine, all subroutine | |
8132 | levels are skipped again which begin here. Compensate for this by | |
8133 | removing all skipped subroutines, which were already executing from | |
8134 | the user's perspective. */ | |
8135 | ||
8136 | if (get_stack_frame_id (frame) | |
8137 | == ecs->event_thread->control.step_stack_frame_id | |
8138 | && inline_skipped_frames (ecs->event_thread) > 0 | |
8139 | && ecs->event_thread->control.step_frame_id.artificial_depth > 0 | |
8140 | && ecs->event_thread->control.step_frame_id.code_addr_p) | |
8141 | { | |
8142 | int depth = 0; | |
8143 | const struct block *prev | |
8144 | = block_for_pc (ecs->event_thread->control.step_frame_id.code_addr); | |
8145 | const struct block *curr = block_for_pc (ecs->event_thread->stop_pc ()); | |
8146 | while (curr != nullptr && !curr->contains (prev)) | |
8147 | { | |
8148 | if (curr->inlined_p ()) | |
8149 | depth++; | |
8150 | else if (curr->function () != nullptr) | |
8151 | break; | |
8152 | curr = curr->superblock (); | |
8153 | } | |
8154 | while (inline_skipped_frames (ecs->event_thread) > depth) | |
8155 | step_into_inline_frame (ecs->event_thread); | |
8156 | } | |
8157 | ||
edb3359d DJ |
8158 | /* Look for "calls" to inlined functions, part one. If the inline |
8159 | frame machinery detected some skipped call sites, we have entered | |
8160 | a new inline function. */ | |
8161 | ||
5266f5c2 | 8162 | if ((*curr_frame_id == original_frame_id) |
00431a78 | 8163 | && inline_skipped_frames (ecs->event_thread)) |
edb3359d | 8164 | { |
1eb8556f | 8165 | infrun_debug_printf ("stepped into inlined function"); |
edb3359d | 8166 | |
354f8d0a | 8167 | symtab_and_line call_sal = find_frame_sal (frame); |
edb3359d | 8168 | |
16c381f0 | 8169 | if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL) |
edb3359d DJ |
8170 | { |
8171 | /* For "step", we're going to stop. But if the call site | |
8172 | for this inlined function is on the same source line as | |
8173 | we were previously stepping, go down into the function | |
8174 | first. Otherwise stop at the call site. */ | |
8175 | ||
8176 | if (call_sal.line == ecs->event_thread->current_line | |
8177 | && call_sal.symtab == ecs->event_thread->current_symtab) | |
4a4c04f1 BE |
8178 | { |
8179 | step_into_inline_frame (ecs->event_thread); | |
8180 | if (inline_frame_is_marked_for_skip (false, ecs->event_thread)) | |
8181 | { | |
8182 | keep_going (ecs); | |
8183 | return; | |
8184 | } | |
8185 | } | |
edb3359d | 8186 | |
bdc36728 | 8187 | end_stepping_range (ecs); |
edb3359d DJ |
8188 | return; |
8189 | } | |
8190 | else | |
8191 | { | |
8192 | /* For "next", we should stop at the call site if it is on a | |
8193 | different source line. Otherwise continue through the | |
8194 | inlined function. */ | |
8195 | if (call_sal.line == ecs->event_thread->current_line | |
8196 | && call_sal.symtab == ecs->event_thread->current_symtab) | |
8197 | keep_going (ecs); | |
8198 | else | |
bdc36728 | 8199 | end_stepping_range (ecs); |
edb3359d DJ |
8200 | return; |
8201 | } | |
8202 | } | |
8203 | ||
8204 | /* Look for "calls" to inlined functions, part two. If we are still | |
8205 | in the same real function we were stepping through, but we have | |
8206 | to go further up to find the exact frame ID, we are stepping | |
8207 | through a more inlined call beyond its call site. */ | |
8208 | ||
354f8d0a | 8209 | if (get_frame_type (frame) == INLINE_FRAME |
5266f5c2 | 8210 | && (*curr_frame_id != original_frame_id) |
354f8d0a | 8211 | && stepped_in_from (frame, original_frame_id)) |
edb3359d | 8212 | { |
1eb8556f | 8213 | infrun_debug_printf ("stepping through inlined function"); |
edb3359d | 8214 | |
4a4c04f1 BE |
8215 | if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL |
8216 | || inline_frame_is_marked_for_skip (false, ecs->event_thread)) | |
edb3359d DJ |
8217 | keep_going (ecs); |
8218 | else | |
bdc36728 | 8219 | end_stepping_range (ecs); |
edb3359d DJ |
8220 | return; |
8221 | } | |
8222 | ||
8c95582d | 8223 | bool refresh_step_info = true; |
1edb66d8 | 8224 | if ((ecs->event_thread->stop_pc () == stop_pc_sal.pc) |
4e1c45ea | 8225 | && (ecs->event_thread->current_line != stop_pc_sal.line |
24b21115 | 8226 | || ecs->event_thread->current_symtab != stop_pc_sal.symtab)) |
488f131b | 8227 | { |
ebde6f2d TV |
8228 | /* We are at a different line. */ |
8229 | ||
8c95582d AB |
8230 | if (stop_pc_sal.is_stmt) |
8231 | { | |
fe6356de CL |
8232 | if (execution_direction == EXEC_REVERSE) |
8233 | { | |
8234 | /* We are stepping backwards make sure we have reached the | |
8235 | beginning of the line. */ | |
8236 | CORE_ADDR stop_pc = ecs->event_thread->stop_pc (); | |
8237 | CORE_ADDR start_line_pc | |
8238 | = update_line_range_start (stop_pc, ecs); | |
8239 | ||
8240 | if (stop_pc != start_line_pc) | |
8241 | { | |
8242 | /* Have not reached the beginning of the source code line. | |
8243 | Set a step range. Execution should stop in any function | |
8244 | calls we execute back into before reaching the beginning | |
8245 | of the line. */ | |
8246 | ecs->event_thread->control.step_range_start | |
8247 | = start_line_pc; | |
8248 | ecs->event_thread->control.step_range_end = stop_pc; | |
8249 | set_step_info (ecs->event_thread, frame, stop_pc_sal); | |
8250 | keep_going (ecs); | |
8251 | return; | |
8252 | } | |
8253 | } | |
8254 | ||
ebde6f2d TV |
8255 | /* We are at the start of a statement. |
8256 | ||
8257 | So stop. Note that we don't stop if we step into the middle of a | |
8258 | statement. That is said to make things like for (;;) statements | |
8259 | work better. */ | |
1eb8556f | 8260 | infrun_debug_printf ("stepped to a different line"); |
8c95582d AB |
8261 | end_stepping_range (ecs); |
8262 | return; | |
8263 | } | |
5266f5c2 | 8264 | else if (*curr_frame_id == original_frame_id) |
8c95582d | 8265 | { |
ebde6f2d TV |
8266 | /* We are not at the start of a statement, and we have not changed |
8267 | frame. | |
8268 | ||
8269 | We ignore this line table entry, and continue stepping forward, | |
8c95582d AB |
8270 | looking for a better place to stop. */ |
8271 | refresh_step_info = false; | |
1eb8556f SM |
8272 | infrun_debug_printf ("stepped to a different line, but " |
8273 | "it's not the start of a statement"); | |
8c95582d | 8274 | } |
ebde6f2d TV |
8275 | else |
8276 | { | |
8277 | /* We are not the start of a statement, and we have changed frame. | |
8278 | ||
8279 | We ignore this line table entry, and continue stepping forward, | |
8280 | looking for a better place to stop. Keep refresh_step_info at | |
8281 | true to note that the frame has changed, but ignore the line | |
8282 | number to make sure we don't ignore a subsequent entry with the | |
8283 | same line number. */ | |
8284 | stop_pc_sal.line = 0; | |
8285 | infrun_debug_printf ("stepped to a different frame, but " | |
8286 | "it's not the start of a statement"); | |
8287 | } | |
488f131b | 8288 | } |
a0780848 GL |
8289 | |
8290 | if (execution_direction == EXEC_REVERSE | |
5266f5c2 LS |
8291 | && *curr_frame_id != original_frame_id |
8292 | && original_frame_id.code_addr_p && curr_frame_id->code_addr_p | |
8293 | && original_frame_id.code_addr == curr_frame_id->code_addr) | |
bf2813af GL |
8294 | { |
8295 | /* If we enter here, we're leaving a recursive function call. In this | |
8296 | situation, we shouldn't refresh the step information, because if we | |
8297 | do, we'll lose the frame_id of when we started stepping, and this | |
8298 | will make GDB not know we need to print frame information. */ | |
8299 | refresh_step_info = false; | |
8300 | infrun_debug_printf ("reverse stepping, left a recursive call, don't " | |
8301 | "update step info so we remember we left a frame"); | |
8302 | } | |
c906108c | 8303 | |
488f131b | 8304 | /* We aren't done stepping. |
c906108c | 8305 | |
488f131b JB |
8306 | Optimize by setting the stepping range to the line. |
8307 | (We might not be in the original line, but if we entered a | |
8308 | new line in mid-statement, we continue stepping. This makes | |
8c95582d AB |
8309 | things like for(;;) statements work better.) |
8310 | ||
8311 | If we entered a SAL that indicates a non-statement line table entry, | |
8312 | then we update the stepping range, but we don't update the step info, | |
8313 | which includes things like the line number we are stepping away from. | |
8314 | This means we will stop when we find a line table entry that is marked | |
8315 | as is-statement, even if it matches the non-statement one we just | |
8316 | stepped into. */ | |
c906108c | 8317 | |
16c381f0 JK |
8318 | ecs->event_thread->control.step_range_start = stop_pc_sal.pc; |
8319 | ecs->event_thread->control.step_range_end = stop_pc_sal.end; | |
c1e36e3e | 8320 | ecs->event_thread->control.may_range_step = 1; |
c8353d68 AB |
8321 | infrun_debug_printf |
8322 | ("updated step range, start = %s, end = %s, may_range_step = %d", | |
8323 | paddress (gdbarch, ecs->event_thread->control.step_range_start), | |
8324 | paddress (gdbarch, ecs->event_thread->control.step_range_end), | |
8325 | ecs->event_thread->control.may_range_step); | |
8c95582d AB |
8326 | if (refresh_step_info) |
8327 | set_step_info (ecs->event_thread, frame, stop_pc_sal); | |
488f131b | 8328 | |
1eb8556f | 8329 | infrun_debug_printf ("keep going"); |
fe6356de CL |
8330 | |
8331 | if (execution_direction == EXEC_REVERSE) | |
8332 | { | |
8333 | CORE_ADDR stop_pc = ecs->event_thread->stop_pc (); | |
8334 | ||
8335 | /* Make sure the stop_pc is set to the beginning of the line. */ | |
8336 | if (stop_pc != ecs->event_thread->control.step_range_start) | |
8337 | ecs->event_thread->control.step_range_start | |
8338 | = update_line_range_start (stop_pc, ecs); | |
8339 | } | |
8340 | ||
488f131b | 8341 | keep_going (ecs); |
104c1213 JM |
8342 | } |
8343 | ||
408f6686 PA |
8344 | static bool restart_stepped_thread (process_stratum_target *resume_target, |
8345 | ptid_t resume_ptid); | |
8346 | ||
c447ac0b PA |
8347 | /* In all-stop mode, if we're currently stepping but have stopped in |
8348 | some other thread, we may need to switch back to the stepped | |
8349 | thread. Returns true we set the inferior running, false if we left | |
8350 | it stopped (and the event needs further processing). */ | |
8351 | ||
c4464ade | 8352 | static bool |
c447ac0b PA |
8353 | switch_back_to_stepped_thread (struct execution_control_state *ecs) |
8354 | { | |
fbea99ea | 8355 | if (!target_is_non_stop_p ()) |
c447ac0b | 8356 | { |
99619bea PA |
8357 | /* If any thread is blocked on some internal breakpoint, and we |
8358 | simply need to step over that breakpoint to get it going | |
8359 | again, do that first. */ | |
8360 | ||
8361 | /* However, if we see an event for the stepping thread, then we | |
8362 | know all other threads have been moved past their breakpoints | |
8363 | already. Let the caller check whether the step is finished, | |
8364 | etc., before deciding to move it past a breakpoint. */ | |
8365 | if (ecs->event_thread->control.step_range_end != 0) | |
c4464ade | 8366 | return false; |
99619bea PA |
8367 | |
8368 | /* Check if the current thread is blocked on an incomplete | |
8369 | step-over, interrupted by a random signal. */ | |
8370 | if (ecs->event_thread->control.trap_expected | |
1edb66d8 | 8371 | && ecs->event_thread->stop_signal () != GDB_SIGNAL_TRAP) |
c447ac0b | 8372 | { |
1eb8556f SM |
8373 | infrun_debug_printf |
8374 | ("need to finish step-over of [%s]", | |
0fab7955 | 8375 | ecs->event_thread->ptid.to_string ().c_str ()); |
99619bea | 8376 | keep_going (ecs); |
c4464ade | 8377 | return true; |
99619bea | 8378 | } |
2adfaa28 | 8379 | |
99619bea PA |
8380 | /* Check if the current thread is blocked by a single-step |
8381 | breakpoint of another thread. */ | |
8382 | if (ecs->hit_singlestep_breakpoint) | |
8383 | { | |
1eb8556f | 8384 | infrun_debug_printf ("need to step [%s] over single-step breakpoint", |
0fab7955 | 8385 | ecs->ptid.to_string ().c_str ()); |
99619bea | 8386 | keep_going (ecs); |
c4464ade | 8387 | return true; |
99619bea PA |
8388 | } |
8389 | ||
4d9d9d04 PA |
8390 | /* If this thread needs yet another step-over (e.g., stepping |
8391 | through a delay slot), do it first before moving on to | |
8392 | another thread. */ | |
8393 | if (thread_still_needs_step_over (ecs->event_thread)) | |
8394 | { | |
1eb8556f SM |
8395 | infrun_debug_printf |
8396 | ("thread [%s] still needs step-over", | |
0fab7955 | 8397 | ecs->event_thread->ptid.to_string ().c_str ()); |
4d9d9d04 | 8398 | keep_going (ecs); |
c4464ade | 8399 | return true; |
4d9d9d04 | 8400 | } |
70509625 | 8401 | |
483805cf PA |
8402 | /* If scheduler locking applies even if not stepping, there's no |
8403 | need to walk over threads. Above we've checked whether the | |
8404 | current thread is stepping. If some other thread not the | |
8405 | event thread is stepping, then it must be that scheduler | |
8406 | locking is not in effect. */ | |
856e7dd6 | 8407 | if (schedlock_applies (ecs->event_thread)) |
c4464ade | 8408 | return false; |
483805cf | 8409 | |
4d9d9d04 PA |
8410 | /* Otherwise, we no longer expect a trap in the current thread. |
8411 | Clear the trap_expected flag before switching back -- this is | |
8412 | what keep_going does as well, if we call it. */ | |
8413 | ecs->event_thread->control.trap_expected = 0; | |
8414 | ||
8415 | /* Likewise, clear the signal if it should not be passed. */ | |
1edb66d8 SM |
8416 | if (!signal_program[ecs->event_thread->stop_signal ()]) |
8417 | ecs->event_thread->set_stop_signal (GDB_SIGNAL_0); | |
4d9d9d04 | 8418 | |
408f6686 | 8419 | if (restart_stepped_thread (ecs->target, ecs->ptid)) |
4d9d9d04 PA |
8420 | { |
8421 | prepare_to_wait (ecs); | |
c4464ade | 8422 | return true; |
4d9d9d04 PA |
8423 | } |
8424 | ||
408f6686 PA |
8425 | switch_to_thread (ecs->event_thread); |
8426 | } | |
4d9d9d04 | 8427 | |
408f6686 PA |
8428 | return false; |
8429 | } | |
f3f8ece4 | 8430 | |
408f6686 PA |
8431 | /* Look for the thread that was stepping, and resume it. |
8432 | RESUME_TARGET / RESUME_PTID indicate the set of threads the caller | |
8433 | is resuming. Return true if a thread was started, false | |
8434 | otherwise. */ | |
483805cf | 8435 | |
408f6686 PA |
8436 | static bool |
8437 | restart_stepped_thread (process_stratum_target *resume_target, | |
8438 | ptid_t resume_ptid) | |
8439 | { | |
8440 | /* Do all pending step-overs before actually proceeding with | |
8441 | step/next/etc. */ | |
8442 | if (start_step_over ()) | |
8443 | return true; | |
483805cf | 8444 | |
408f6686 PA |
8445 | for (thread_info *tp : all_threads_safe ()) |
8446 | { | |
8447 | if (tp->state == THREAD_EXITED) | |
8448 | continue; | |
8449 | ||
1edb66d8 | 8450 | if (tp->has_pending_waitstatus ()) |
408f6686 | 8451 | continue; |
483805cf | 8452 | |
408f6686 PA |
8453 | /* Ignore threads of processes the caller is not |
8454 | resuming. */ | |
8455 | if (!sched_multi | |
8456 | && (tp->inf->process_target () != resume_target | |
8457 | || tp->inf->pid != resume_ptid.pid ())) | |
8458 | continue; | |
483805cf | 8459 | |
408f6686 PA |
8460 | if (tp->control.trap_expected) |
8461 | { | |
8462 | infrun_debug_printf ("switching back to stepped thread (step-over)"); | |
483805cf | 8463 | |
408f6686 PA |
8464 | if (keep_going_stepped_thread (tp)) |
8465 | return true; | |
99619bea | 8466 | } |
408f6686 PA |
8467 | } |
8468 | ||
8469 | for (thread_info *tp : all_threads_safe ()) | |
8470 | { | |
8471 | if (tp->state == THREAD_EXITED) | |
8472 | continue; | |
8473 | ||
1edb66d8 | 8474 | if (tp->has_pending_waitstatus ()) |
408f6686 | 8475 | continue; |
99619bea | 8476 | |
408f6686 PA |
8477 | /* Ignore threads of processes the caller is not |
8478 | resuming. */ | |
8479 | if (!sched_multi | |
8480 | && (tp->inf->process_target () != resume_target | |
8481 | || tp->inf->pid != resume_ptid.pid ())) | |
8482 | continue; | |
8483 | ||
8484 | /* Did we find the stepping thread? */ | |
8485 | if (tp->control.step_range_end) | |
99619bea | 8486 | { |
408f6686 | 8487 | infrun_debug_printf ("switching back to stepped thread (stepping)"); |
c447ac0b | 8488 | |
408f6686 PA |
8489 | if (keep_going_stepped_thread (tp)) |
8490 | return true; | |
2ac7589c PA |
8491 | } |
8492 | } | |
2adfaa28 | 8493 | |
c4464ade | 8494 | return false; |
2ac7589c | 8495 | } |
2adfaa28 | 8496 | |
408f6686 PA |
8497 | /* See infrun.h. */ |
8498 | ||
8499 | void | |
8500 | restart_after_all_stop_detach (process_stratum_target *proc_target) | |
8501 | { | |
8502 | /* Note we don't check target_is_non_stop_p() here, because the | |
8503 | current inferior may no longer have a process_stratum target | |
8504 | pushed, as we just detached. */ | |
8505 | ||
8506 | /* See if we have a THREAD_RUNNING thread that need to be | |
8507 | re-resumed. If we have any thread that is already executing, | |
8508 | then we don't need to resume the target -- it is already been | |
8509 | resumed. With the remote target (in all-stop), it's even | |
8510 | impossible to issue another resumption if the target is already | |
8511 | resumed, until the target reports a stop. */ | |
8512 | for (thread_info *thr : all_threads (proc_target)) | |
8513 | { | |
8514 | if (thr->state != THREAD_RUNNING) | |
8515 | continue; | |
8516 | ||
8517 | /* If we have any thread that is already executing, then we | |
8518 | don't need to resume the target -- it is already been | |
8519 | resumed. */ | |
611841bb | 8520 | if (thr->executing ()) |
408f6686 PA |
8521 | return; |
8522 | ||
8523 | /* If we have a pending event to process, skip resuming the | |
8524 | target and go straight to processing it. */ | |
1edb66d8 | 8525 | if (thr->resumed () && thr->has_pending_waitstatus ()) |
408f6686 PA |
8526 | return; |
8527 | } | |
8528 | ||
8529 | /* Alright, we need to re-resume the target. If a thread was | |
8530 | stepping, we need to restart it stepping. */ | |
8531 | if (restart_stepped_thread (proc_target, minus_one_ptid)) | |
8532 | return; | |
8533 | ||
8534 | /* Otherwise, find the first THREAD_RUNNING thread and resume | |
8535 | it. */ | |
8536 | for (thread_info *thr : all_threads (proc_target)) | |
8537 | { | |
8538 | if (thr->state != THREAD_RUNNING) | |
8539 | continue; | |
8540 | ||
aa563d16 | 8541 | execution_control_state ecs (thr); |
408f6686 PA |
8542 | switch_to_thread (thr); |
8543 | keep_going (&ecs); | |
8544 | return; | |
8545 | } | |
8546 | } | |
8547 | ||
2ac7589c PA |
8548 | /* Set a previously stepped thread back to stepping. Returns true on |
8549 | success, false if the resume is not possible (e.g., the thread | |
8550 | vanished). */ | |
8551 | ||
c4464ade | 8552 | static bool |
2ac7589c PA |
8553 | keep_going_stepped_thread (struct thread_info *tp) |
8554 | { | |
bd2b40ac | 8555 | frame_info_ptr frame; |
2adfaa28 | 8556 | |
2ac7589c PA |
8557 | /* If the stepping thread exited, then don't try to switch back and |
8558 | resume it, which could fail in several different ways depending | |
8559 | on the target. Instead, just keep going. | |
2adfaa28 | 8560 | |
2ac7589c PA |
8561 | We can find a stepping dead thread in the thread list in two |
8562 | cases: | |
2adfaa28 | 8563 | |
2ac7589c PA |
8564 | - The target supports thread exit events, and when the target |
8565 | tries to delete the thread from the thread list, inferior_ptid | |
8566 | pointed at the exiting thread. In such case, calling | |
8567 | delete_thread does not really remove the thread from the list; | |
8568 | instead, the thread is left listed, with 'exited' state. | |
64ce06e4 | 8569 | |
2ac7589c PA |
8570 | - The target's debug interface does not support thread exit |
8571 | events, and so we have no idea whatsoever if the previously | |
8572 | stepping thread is still alive. For that reason, we need to | |
8573 | synchronously query the target now. */ | |
2adfaa28 | 8574 | |
00431a78 | 8575 | if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid)) |
2ac7589c | 8576 | { |
1eb8556f SM |
8577 | infrun_debug_printf ("not resuming previously stepped thread, it has " |
8578 | "vanished"); | |
2ac7589c | 8579 | |
00431a78 | 8580 | delete_thread (tp); |
c4464ade | 8581 | return false; |
c447ac0b | 8582 | } |
2ac7589c | 8583 | |
1eb8556f | 8584 | infrun_debug_printf ("resuming previously stepped thread"); |
2ac7589c | 8585 | |
aa563d16 | 8586 | execution_control_state ecs (tp); |
00431a78 | 8587 | switch_to_thread (tp); |
2ac7589c | 8588 | |
1edb66d8 | 8589 | tp->set_stop_pc (regcache_read_pc (get_thread_regcache (tp))); |
2ac7589c | 8590 | frame = get_current_frame (); |
2ac7589c PA |
8591 | |
8592 | /* If the PC of the thread we were trying to single-step has | |
8593 | changed, then that thread has trapped or been signaled, but the | |
8594 | event has not been reported to GDB yet. Re-poll the target | |
8595 | looking for this particular thread's event (i.e. temporarily | |
8596 | enable schedlock) by: | |
8597 | ||
8598 | - setting a break at the current PC | |
8599 | - resuming that particular thread, only (by setting trap | |
8600 | expected) | |
8601 | ||
8602 | This prevents us continuously moving the single-step breakpoint | |
8603 | forward, one instruction at a time, overstepping. */ | |
8604 | ||
1edb66d8 | 8605 | if (tp->stop_pc () != tp->prev_pc) |
2ac7589c PA |
8606 | { |
8607 | ptid_t resume_ptid; | |
8608 | ||
1eb8556f | 8609 | infrun_debug_printf ("expected thread advanced also (%s -> %s)", |
99d9c3b9 SM |
8610 | paddress (current_inferior ()->arch (), tp->prev_pc), |
8611 | paddress (current_inferior ()->arch (), | |
8612 | tp->stop_pc ())); | |
2ac7589c PA |
8613 | |
8614 | /* Clear the info of the previous step-over, as it's no longer | |
8615 | valid (if the thread was trying to step over a breakpoint, it | |
8616 | has already succeeded). It's what keep_going would do too, | |
8617 | if we called it. Do this before trying to insert the sss | |
8618 | breakpoint, otherwise if we were previously trying to step | |
8619 | over this exact address in another thread, the breakpoint is | |
8620 | skipped. */ | |
8621 | clear_step_over_info (); | |
8622 | tp->control.trap_expected = 0; | |
8623 | ||
8624 | insert_single_step_breakpoint (get_frame_arch (frame), | |
8625 | get_frame_address_space (frame), | |
1edb66d8 | 8626 | tp->stop_pc ()); |
2ac7589c | 8627 | |
7846f3aa | 8628 | tp->set_resumed (true); |
fbea99ea | 8629 | resume_ptid = internal_resume_ptid (tp->control.stepping_command); |
c4464ade | 8630 | do_target_resume (resume_ptid, false, GDB_SIGNAL_0); |
2ac7589c PA |
8631 | } |
8632 | else | |
8633 | { | |
1eb8556f | 8634 | infrun_debug_printf ("expected thread still hasn't advanced"); |
2ac7589c | 8635 | |
aa563d16 | 8636 | keep_going_pass_signal (&ecs); |
2ac7589c | 8637 | } |
c4464ade SM |
8638 | |
8639 | return true; | |
c447ac0b PA |
8640 | } |
8641 | ||
8b061563 PA |
8642 | /* Is thread TP in the middle of (software or hardware) |
8643 | single-stepping? (Note the result of this function must never be | |
8644 | passed directly as target_resume's STEP parameter.) */ | |
104c1213 | 8645 | |
c4464ade | 8646 | static bool |
b3444185 | 8647 | currently_stepping (struct thread_info *tp) |
a7212384 | 8648 | { |
8358c15c | 8649 | return ((tp->control.step_range_end |
03acd4d8 | 8650 | && tp->control.step_resume_breakpoint == nullptr) |
8358c15c | 8651 | || tp->control.trap_expected |
af48d08f | 8652 | || tp->stepped_breakpoint |
8358c15c | 8653 | || bpstat_should_step ()); |
a7212384 UW |
8654 | } |
8655 | ||
b2175913 MS |
8656 | /* Inferior has stepped into a subroutine call with source code that |
8657 | we should not step over. Do step to the first line of code in | |
8658 | it. */ | |
c2c6d25f JM |
8659 | |
8660 | static void | |
568d6575 UW |
8661 | handle_step_into_function (struct gdbarch *gdbarch, |
8662 | struct execution_control_state *ecs) | |
c2c6d25f | 8663 | { |
7e324e48 GB |
8664 | fill_in_stop_func (gdbarch, ecs); |
8665 | ||
f2ffa92b | 8666 | compunit_symtab *cust |
1edb66d8 | 8667 | = find_pc_compunit_symtab (ecs->event_thread->stop_pc ()); |
03acd4d8 | 8668 | if (cust != nullptr && cust->language () != language_asm) |
46a62268 YQ |
8669 | ecs->stop_func_start |
8670 | = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start); | |
c2c6d25f | 8671 | |
51abb421 | 8672 | symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0); |
c2c6d25f JM |
8673 | /* Use the step_resume_break to step until the end of the prologue, |
8674 | even if that involves jumps (as it seems to on the vax under | |
8675 | 4.2). */ | |
8676 | /* If the prologue ends in the middle of a source line, continue to | |
8677 | the end of that source line (if it is still within the function). | |
8678 | Otherwise, just go to end of prologue. */ | |
2afb61aa PA |
8679 | if (stop_func_sal.end |
8680 | && stop_func_sal.pc != ecs->stop_func_start | |
8681 | && stop_func_sal.end < ecs->stop_func_end) | |
8682 | ecs->stop_func_start = stop_func_sal.end; | |
c2c6d25f | 8683 | |
2dbd5e30 KB |
8684 | /* Architectures which require breakpoint adjustment might not be able |
8685 | to place a breakpoint at the computed address. If so, the test | |
8686 | ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust | |
8687 | ecs->stop_func_start to an address at which a breakpoint may be | |
8688 | legitimately placed. | |
8fb3e588 | 8689 | |
2dbd5e30 KB |
8690 | Note: kevinb/2004-01-19: On FR-V, if this adjustment is not |
8691 | made, GDB will enter an infinite loop when stepping through | |
8692 | optimized code consisting of VLIW instructions which contain | |
8693 | subinstructions corresponding to different source lines. On | |
8694 | FR-V, it's not permitted to place a breakpoint on any but the | |
8695 | first subinstruction of a VLIW instruction. When a breakpoint is | |
8696 | set, GDB will adjust the breakpoint address to the beginning of | |
8697 | the VLIW instruction. Thus, we need to make the corresponding | |
8698 | adjustment here when computing the stop address. */ | |
8fb3e588 | 8699 | |
568d6575 | 8700 | if (gdbarch_adjust_breakpoint_address_p (gdbarch)) |
2dbd5e30 KB |
8701 | { |
8702 | ecs->stop_func_start | |
568d6575 | 8703 | = gdbarch_adjust_breakpoint_address (gdbarch, |
8fb3e588 | 8704 | ecs->stop_func_start); |
2dbd5e30 KB |
8705 | } |
8706 | ||
1edb66d8 | 8707 | if (ecs->stop_func_start == ecs->event_thread->stop_pc ()) |
c2c6d25f JM |
8708 | { |
8709 | /* We are already there: stop now. */ | |
bdc36728 | 8710 | end_stepping_range (ecs); |
c2c6d25f JM |
8711 | return; |
8712 | } | |
8713 | else | |
8714 | { | |
8715 | /* Put the step-breakpoint there and go until there. */ | |
51abb421 | 8716 | symtab_and_line sr_sal; |
c2c6d25f JM |
8717 | sr_sal.pc = ecs->stop_func_start; |
8718 | sr_sal.section = find_pc_overlay (ecs->stop_func_start); | |
6c95b8df | 8719 | sr_sal.pspace = get_frame_program_space (get_current_frame ()); |
44cbf7b5 | 8720 | |
c2c6d25f | 8721 | /* Do not specify what the fp should be when we stop since on |
dda83cd7 SM |
8722 | some machines the prologue is where the new fp value is |
8723 | established. */ | |
a6d9a66e | 8724 | insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id); |
c2c6d25f JM |
8725 | |
8726 | /* And make sure stepping stops right away then. */ | |
16c381f0 | 8727 | ecs->event_thread->control.step_range_end |
dda83cd7 | 8728 | = ecs->event_thread->control.step_range_start; |
c2c6d25f JM |
8729 | } |
8730 | keep_going (ecs); | |
8731 | } | |
d4f3574e | 8732 | |
b2175913 MS |
8733 | /* Inferior has stepped backward into a subroutine call with source |
8734 | code that we should not step over. Do step to the beginning of the | |
8735 | last line of code in it. */ | |
8736 | ||
8737 | static void | |
568d6575 UW |
8738 | handle_step_into_function_backward (struct gdbarch *gdbarch, |
8739 | struct execution_control_state *ecs) | |
b2175913 | 8740 | { |
43f3e411 | 8741 | struct compunit_symtab *cust; |
167e4384 | 8742 | struct symtab_and_line stop_func_sal; |
b2175913 | 8743 | |
7e324e48 GB |
8744 | fill_in_stop_func (gdbarch, ecs); |
8745 | ||
1edb66d8 | 8746 | cust = find_pc_compunit_symtab (ecs->event_thread->stop_pc ()); |
03acd4d8 | 8747 | if (cust != nullptr && cust->language () != language_asm) |
46a62268 YQ |
8748 | ecs->stop_func_start |
8749 | = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start); | |
b2175913 | 8750 | |
1edb66d8 | 8751 | stop_func_sal = find_pc_line (ecs->event_thread->stop_pc (), 0); |
b2175913 MS |
8752 | |
8753 | /* OK, we're just going to keep stepping here. */ | |
1edb66d8 | 8754 | if (stop_func_sal.pc == ecs->event_thread->stop_pc ()) |
b2175913 MS |
8755 | { |
8756 | /* We're there already. Just stop stepping now. */ | |
bdc36728 | 8757 | end_stepping_range (ecs); |
b2175913 MS |
8758 | } |
8759 | else | |
8760 | { | |
8761 | /* Else just reset the step range and keep going. | |
8762 | No step-resume breakpoint, they don't work for | |
8763 | epilogues, which can have multiple entry paths. */ | |
16c381f0 JK |
8764 | ecs->event_thread->control.step_range_start = stop_func_sal.pc; |
8765 | ecs->event_thread->control.step_range_end = stop_func_sal.end; | |
b2175913 MS |
8766 | keep_going (ecs); |
8767 | } | |
8768 | return; | |
8769 | } | |
8770 | ||
d3169d93 | 8771 | /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID. |
44cbf7b5 AC |
8772 | This is used to both functions and to skip over code. */ |
8773 | ||
8774 | static void | |
2c03e5be PA |
8775 | insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch, |
8776 | struct symtab_and_line sr_sal, | |
8777 | struct frame_id sr_id, | |
8778 | enum bptype sr_type) | |
44cbf7b5 | 8779 | { |
611c83ae PA |
8780 | /* There should never be more than one step-resume or longjmp-resume |
8781 | breakpoint per thread, so we should never be setting a new | |
44cbf7b5 | 8782 | step_resume_breakpoint when one is already active. */ |
03acd4d8 | 8783 | gdb_assert (inferior_thread ()->control.step_resume_breakpoint == nullptr); |
2c03e5be | 8784 | gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume); |
d3169d93 | 8785 | |
1eb8556f SM |
8786 | infrun_debug_printf ("inserting step-resume breakpoint at %s", |
8787 | paddress (gdbarch, sr_sal.pc)); | |
d3169d93 | 8788 | |
8358c15c | 8789 | inferior_thread ()->control.step_resume_breakpoint |
454dafbd | 8790 | = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release (); |
2c03e5be PA |
8791 | } |
8792 | ||
9da8c2a0 | 8793 | void |
2c03e5be PA |
8794 | insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch, |
8795 | struct symtab_and_line sr_sal, | |
8796 | struct frame_id sr_id) | |
8797 | { | |
8798 | insert_step_resume_breakpoint_at_sal_1 (gdbarch, | |
8799 | sr_sal, sr_id, | |
8800 | bp_step_resume); | |
44cbf7b5 | 8801 | } |
7ce450bd | 8802 | |
2c03e5be PA |
8803 | /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc. |
8804 | This is used to skip a potential signal handler. | |
7ce450bd | 8805 | |
14e60db5 DJ |
8806 | This is called with the interrupted function's frame. The signal |
8807 | handler, when it returns, will resume the interrupted function at | |
8808 | RETURN_FRAME.pc. */ | |
d303a6c7 AC |
8809 | |
8810 | static void | |
8480a37e | 8811 | insert_hp_step_resume_breakpoint_at_frame (const frame_info_ptr &return_frame) |
d303a6c7 | 8812 | { |
03acd4d8 | 8813 | gdb_assert (return_frame != nullptr); |
d303a6c7 | 8814 | |
51abb421 PA |
8815 | struct gdbarch *gdbarch = get_frame_arch (return_frame); |
8816 | ||
8817 | symtab_and_line sr_sal; | |
568d6575 | 8818 | sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame)); |
d303a6c7 | 8819 | sr_sal.section = find_pc_overlay (sr_sal.pc); |
6c95b8df | 8820 | sr_sal.pspace = get_frame_program_space (return_frame); |
d303a6c7 | 8821 | |
2c03e5be PA |
8822 | insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal, |
8823 | get_stack_frame_id (return_frame), | |
8824 | bp_hp_step_resume); | |
d303a6c7 AC |
8825 | } |
8826 | ||
2c03e5be PA |
8827 | /* Insert a "step-resume breakpoint" at the previous frame's PC. This |
8828 | is used to skip a function after stepping into it (for "next" or if | |
8829 | the called function has no debugging information). | |
14e60db5 DJ |
8830 | |
8831 | The current function has almost always been reached by single | |
8832 | stepping a call or return instruction. NEXT_FRAME belongs to the | |
8833 | current function, and the breakpoint will be set at the caller's | |
8834 | resume address. | |
8835 | ||
8836 | This is a separate function rather than reusing | |
2c03e5be | 8837 | insert_hp_step_resume_breakpoint_at_frame in order to avoid |
14e60db5 | 8838 | get_prev_frame, which may stop prematurely (see the implementation |
c7ce8faa | 8839 | of frame_unwind_caller_id for an example). */ |
14e60db5 DJ |
8840 | |
8841 | static void | |
8480a37e | 8842 | insert_step_resume_breakpoint_at_caller (const frame_info_ptr &next_frame) |
14e60db5 | 8843 | { |
14e60db5 DJ |
8844 | /* We shouldn't have gotten here if we don't know where the call site |
8845 | is. */ | |
c7ce8faa | 8846 | gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame))); |
14e60db5 | 8847 | |
51abb421 | 8848 | struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame); |
14e60db5 | 8849 | |
51abb421 | 8850 | symtab_and_line sr_sal; |
c7ce8faa DJ |
8851 | sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, |
8852 | frame_unwind_caller_pc (next_frame)); | |
14e60db5 | 8853 | sr_sal.section = find_pc_overlay (sr_sal.pc); |
6c95b8df | 8854 | sr_sal.pspace = frame_unwind_program_space (next_frame); |
14e60db5 | 8855 | |
a6d9a66e | 8856 | insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, |
c7ce8faa | 8857 | frame_unwind_caller_id (next_frame)); |
14e60db5 DJ |
8858 | } |
8859 | ||
611c83ae PA |
8860 | /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a |
8861 | new breakpoint at the target of a jmp_buf. The handling of | |
8862 | longjmp-resume uses the same mechanisms used for handling | |
8863 | "step-resume" breakpoints. */ | |
8864 | ||
8865 | static void | |
a6d9a66e | 8866 | insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc) |
611c83ae | 8867 | { |
e81a37f7 TT |
8868 | /* There should never be more than one longjmp-resume breakpoint per |
8869 | thread, so we should never be setting a new | |
611c83ae | 8870 | longjmp_resume_breakpoint when one is already active. */ |
03acd4d8 | 8871 | gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == nullptr); |
611c83ae | 8872 | |
1eb8556f SM |
8873 | infrun_debug_printf ("inserting longjmp-resume breakpoint at %s", |
8874 | paddress (gdbarch, pc)); | |
611c83ae | 8875 | |
e81a37f7 | 8876 | inferior_thread ()->control.exception_resume_breakpoint = |
454dafbd | 8877 | set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release (); |
611c83ae PA |
8878 | } |
8879 | ||
186c406b TT |
8880 | /* Insert an exception resume breakpoint. TP is the thread throwing |
8881 | the exception. The block B is the block of the unwinder debug hook | |
8882 | function. FRAME is the frame corresponding to the call to this | |
8883 | function. SYM is the symbol of the function argument holding the | |
8884 | target PC of the exception. */ | |
8885 | ||
8886 | static void | |
8887 | insert_exception_resume_breakpoint (struct thread_info *tp, | |
3977b71f | 8888 | const struct block *b, |
8480a37e | 8889 | const frame_info_ptr &frame, |
186c406b TT |
8890 | struct symbol *sym) |
8891 | { | |
a70b8144 | 8892 | try |
186c406b | 8893 | { |
63e43d3a | 8894 | struct block_symbol vsym; |
186c406b TT |
8895 | struct value *value; |
8896 | CORE_ADDR handler; | |
8897 | struct breakpoint *bp; | |
8898 | ||
987012b8 | 8899 | vsym = lookup_symbol_search_name (sym->search_name (), |
ccf41c24 | 8900 | b, SEARCH_VAR_DOMAIN); |
63e43d3a | 8901 | value = read_var_value (vsym.symbol, vsym.block, frame); |
186c406b | 8902 | /* If the value was optimized out, revert to the old behavior. */ |
d00664db | 8903 | if (! value->optimized_out ()) |
186c406b TT |
8904 | { |
8905 | handler = value_as_address (value); | |
8906 | ||
1eb8556f SM |
8907 | infrun_debug_printf ("exception resume at %lx", |
8908 | (unsigned long) handler); | |
186c406b | 8909 | |
60a13bbc AB |
8910 | /* set_momentary_breakpoint_at_pc creates a thread-specific |
8911 | breakpoint for the current inferior thread. */ | |
8912 | gdb_assert (tp == inferior_thread ()); | |
186c406b | 8913 | bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame), |
454dafbd TT |
8914 | handler, |
8915 | bp_exception_resume).release (); | |
c70a6932 | 8916 | |
60a13bbc | 8917 | tp->control.exception_resume_breakpoint = bp; |
186c406b TT |
8918 | } |
8919 | } | |
230d2906 | 8920 | catch (const gdb_exception_error &e) |
492d29ea PA |
8921 | { |
8922 | /* We want to ignore errors here. */ | |
8923 | } | |
186c406b TT |
8924 | } |
8925 | ||
28106bc2 SDJ |
8926 | /* A helper for check_exception_resume that sets an |
8927 | exception-breakpoint based on a SystemTap probe. */ | |
8928 | ||
8929 | static void | |
8930 | insert_exception_resume_from_probe (struct thread_info *tp, | |
729662a5 | 8931 | const struct bound_probe *probe, |
8480a37e | 8932 | const frame_info_ptr &frame) |
28106bc2 SDJ |
8933 | { |
8934 | struct value *arg_value; | |
8935 | CORE_ADDR handler; | |
8936 | struct breakpoint *bp; | |
8937 | ||
8938 | arg_value = probe_safe_evaluate_at_pc (frame, 1); | |
8939 | if (!arg_value) | |
8940 | return; | |
8941 | ||
8942 | handler = value_as_address (arg_value); | |
8943 | ||
1eb8556f SM |
8944 | infrun_debug_printf ("exception resume at %s", |
8945 | paddress (probe->objfile->arch (), handler)); | |
28106bc2 | 8946 | |
60a13bbc AB |
8947 | /* set_momentary_breakpoint_at_pc creates a thread-specific breakpoint |
8948 | for the current inferior thread. */ | |
8949 | gdb_assert (tp == inferior_thread ()); | |
28106bc2 | 8950 | bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame), |
454dafbd | 8951 | handler, bp_exception_resume).release (); |
60a13bbc | 8952 | tp->control.exception_resume_breakpoint = bp; |
28106bc2 SDJ |
8953 | } |
8954 | ||
186c406b TT |
8955 | /* This is called when an exception has been intercepted. Check to |
8956 | see whether the exception's destination is of interest, and if so, | |
8957 | set an exception resume breakpoint there. */ | |
8958 | ||
8959 | static void | |
8960 | check_exception_resume (struct execution_control_state *ecs, | |
8480a37e | 8961 | const frame_info_ptr &frame) |
186c406b | 8962 | { |
729662a5 | 8963 | struct bound_probe probe; |
28106bc2 SDJ |
8964 | struct symbol *func; |
8965 | ||
8966 | /* First see if this exception unwinding breakpoint was set via a | |
8967 | SystemTap probe point. If so, the probe has two arguments: the | |
8968 | CFA and the HANDLER. We ignore the CFA, extract the handler, and | |
8969 | set a breakpoint there. */ | |
6bac7473 | 8970 | probe = find_probe_by_pc (get_frame_pc (frame)); |
935676c9 | 8971 | if (probe.prob) |
28106bc2 | 8972 | { |
729662a5 | 8973 | insert_exception_resume_from_probe (ecs->event_thread, &probe, frame); |
28106bc2 SDJ |
8974 | return; |
8975 | } | |
8976 | ||
8977 | func = get_frame_function (frame); | |
8978 | if (!func) | |
8979 | return; | |
186c406b | 8980 | |
a70b8144 | 8981 | try |
186c406b | 8982 | { |
3977b71f | 8983 | const struct block *b; |
186c406b TT |
8984 | int argno = 0; |
8985 | ||
8986 | /* The exception breakpoint is a thread-specific breakpoint on | |
8987 | the unwinder's debug hook, declared as: | |
8988 | ||
8989 | void _Unwind_DebugHook (void *cfa, void *handler); | |
8990 | ||
8991 | The CFA argument indicates the frame to which control is | |
8992 | about to be transferred. HANDLER is the destination PC. | |
8993 | ||
8994 | We ignore the CFA and set a temporary breakpoint at HANDLER. | |
8995 | This is not extremely efficient but it avoids issues in gdb | |
8996 | with computing the DWARF CFA, and it also works even in weird | |
8997 | cases such as throwing an exception from inside a signal | |
8998 | handler. */ | |
8999 | ||
4aeddc50 | 9000 | b = func->value_block (); |
548a89df | 9001 | for (struct symbol *sym : block_iterator_range (b)) |
186c406b | 9002 | { |
d9743061 | 9003 | if (!sym->is_argument ()) |
186c406b TT |
9004 | continue; |
9005 | ||
9006 | if (argno == 0) | |
9007 | ++argno; | |
9008 | else | |
9009 | { | |
9010 | insert_exception_resume_breakpoint (ecs->event_thread, | |
9011 | b, frame, sym); | |
9012 | break; | |
9013 | } | |
9014 | } | |
9015 | } | |
230d2906 | 9016 | catch (const gdb_exception_error &e) |
492d29ea PA |
9017 | { |
9018 | } | |
186c406b TT |
9019 | } |
9020 | ||
104c1213 | 9021 | static void |
22bcd14b | 9022 | stop_waiting (struct execution_control_state *ecs) |
104c1213 | 9023 | { |
1eb8556f | 9024 | infrun_debug_printf ("stop_waiting"); |
527159b7 | 9025 | |
cd0fc7c3 SS |
9026 | /* Let callers know we don't want to wait for the inferior anymore. */ |
9027 | ecs->wait_some_more = 0; | |
9028 | } | |
9029 | ||
4d9d9d04 PA |
9030 | /* Like keep_going, but passes the signal to the inferior, even if the |
9031 | signal is set to nopass. */ | |
d4f3574e SS |
9032 | |
9033 | static void | |
4d9d9d04 | 9034 | keep_going_pass_signal (struct execution_control_state *ecs) |
d4f3574e | 9035 | { |
d7e15655 | 9036 | gdb_assert (ecs->event_thread->ptid == inferior_ptid); |
7846f3aa | 9037 | gdb_assert (!ecs->event_thread->resumed ()); |
4d9d9d04 | 9038 | |
d4f3574e | 9039 | /* Save the pc before execution, to compare with pc after stop. */ |
fb14de7b | 9040 | ecs->event_thread->prev_pc |
fc75c28b | 9041 | = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread)); |
d4f3574e | 9042 | |
4d9d9d04 | 9043 | if (ecs->event_thread->control.trap_expected) |
d4f3574e | 9044 | { |
4d9d9d04 PA |
9045 | struct thread_info *tp = ecs->event_thread; |
9046 | ||
1eb8556f SM |
9047 | infrun_debug_printf ("%s has trap_expected set, " |
9048 | "resuming to collect trap", | |
0fab7955 | 9049 | tp->ptid.to_string ().c_str ()); |
4d9d9d04 | 9050 | |
a9ba6bae PA |
9051 | /* We haven't yet gotten our trap, and either: intercepted a |
9052 | non-signal event (e.g., a fork); or took a signal which we | |
9053 | are supposed to pass through to the inferior. Simply | |
9054 | continue. */ | |
1edb66d8 | 9055 | resume (ecs->event_thread->stop_signal ()); |
d4f3574e | 9056 | } |
372316f1 PA |
9057 | else if (step_over_info_valid_p ()) |
9058 | { | |
9059 | /* Another thread is stepping over a breakpoint in-line. If | |
9060 | this thread needs a step-over too, queue the request. In | |
9061 | either case, this resume must be deferred for later. */ | |
9062 | struct thread_info *tp = ecs->event_thread; | |
9063 | ||
9064 | if (ecs->hit_singlestep_breakpoint | |
9065 | || thread_still_needs_step_over (tp)) | |
9066 | { | |
1eb8556f SM |
9067 | infrun_debug_printf ("step-over already in progress: " |
9068 | "step-over for %s deferred", | |
0fab7955 | 9069 | tp->ptid.to_string ().c_str ()); |
28d5518b | 9070 | global_thread_step_over_chain_enqueue (tp); |
372316f1 PA |
9071 | } |
9072 | else | |
0fab7955 SM |
9073 | infrun_debug_printf ("step-over in progress: resume of %s deferred", |
9074 | tp->ptid.to_string ().c_str ()); | |
372316f1 | 9075 | } |
d4f3574e SS |
9076 | else |
9077 | { | |
9c742269 | 9078 | regcache *regcache = get_thread_regcache (ecs->event_thread); |
963f9c80 PA |
9079 | int remove_bp; |
9080 | int remove_wps; | |
8d297bbf | 9081 | step_over_what step_what; |
31e77af2 | 9082 | |
d4f3574e | 9083 | /* Either the trap was not expected, but we are continuing |
a9ba6bae PA |
9084 | anyway (if we got a signal, the user asked it be passed to |
9085 | the child) | |
9086 | -- or -- | |
9087 | We got our expected trap, but decided we should resume from | |
9088 | it. | |
d4f3574e | 9089 | |
a9ba6bae | 9090 | We're going to run this baby now! |
d4f3574e | 9091 | |
c36b740a VP |
9092 | Note that insert_breakpoints won't try to re-insert |
9093 | already inserted breakpoints. Therefore, we don't | |
9094 | care if breakpoints were already inserted, or not. */ | |
a9ba6bae | 9095 | |
31e77af2 PA |
9096 | /* If we need to step over a breakpoint, and we're not using |
9097 | displaced stepping to do so, insert all breakpoints | |
9098 | (watchpoints, etc.) but the one we're stepping over, step one | |
9099 | instruction, and then re-insert the breakpoint when that step | |
9100 | is finished. */ | |
963f9c80 | 9101 | |
6c4cfb24 PA |
9102 | step_what = thread_still_needs_step_over (ecs->event_thread); |
9103 | ||
963f9c80 | 9104 | remove_bp = (ecs->hit_singlestep_breakpoint |
6c4cfb24 PA |
9105 | || (step_what & STEP_OVER_BREAKPOINT)); |
9106 | remove_wps = (step_what & STEP_OVER_WATCHPOINT); | |
963f9c80 | 9107 | |
cb71640d PA |
9108 | /* We can't use displaced stepping if we need to step past a |
9109 | watchpoint. The instruction copied to the scratch pad would | |
9110 | still trigger the watchpoint. */ | |
9111 | if (remove_bp | |
3fc8eb30 | 9112 | && (remove_wps || !use_displaced_stepping (ecs->event_thread))) |
45e8c884 | 9113 | { |
f9582a22 | 9114 | set_step_over_info (ecs->event_thread->inf->aspace.get (), |
21edc42f YQ |
9115 | regcache_read_pc (regcache), remove_wps, |
9116 | ecs->event_thread->global_num); | |
45e8c884 | 9117 | } |
963f9c80 | 9118 | else if (remove_wps) |
03acd4d8 | 9119 | set_step_over_info (nullptr, 0, remove_wps, -1); |
372316f1 PA |
9120 | |
9121 | /* If we now need to do an in-line step-over, we need to stop | |
9122 | all other threads. Note this must be done before | |
9123 | insert_breakpoints below, because that removes the breakpoint | |
9124 | we're about to step over, otherwise other threads could miss | |
9125 | it. */ | |
fbea99ea | 9126 | if (step_over_info_valid_p () && target_is_non_stop_p ()) |
4f5539f0 | 9127 | stop_all_threads ("starting in-line step-over"); |
abbb1732 | 9128 | |
31e77af2 | 9129 | /* Stop stepping if inserting breakpoints fails. */ |
a70b8144 | 9130 | try |
31e77af2 PA |
9131 | { |
9132 | insert_breakpoints (); | |
9133 | } | |
230d2906 | 9134 | catch (const gdb_exception_error &e) |
31e77af2 PA |
9135 | { |
9136 | exception_print (gdb_stderr, e); | |
22bcd14b | 9137 | stop_waiting (ecs); |
bdf2a94a | 9138 | clear_step_over_info (); |
31e77af2 | 9139 | return; |
d4f3574e SS |
9140 | } |
9141 | ||
963f9c80 | 9142 | ecs->event_thread->control.trap_expected = (remove_bp || remove_wps); |
d4f3574e | 9143 | |
1edb66d8 | 9144 | resume (ecs->event_thread->stop_signal ()); |
d4f3574e SS |
9145 | } |
9146 | ||
488f131b | 9147 | prepare_to_wait (ecs); |
d4f3574e SS |
9148 | } |
9149 | ||
4d9d9d04 PA |
9150 | /* Called when we should continue running the inferior, because the |
9151 | current event doesn't cause a user visible stop. This does the | |
9152 | resuming part; waiting for the next event is done elsewhere. */ | |
9153 | ||
9154 | static void | |
9155 | keep_going (struct execution_control_state *ecs) | |
9156 | { | |
9157 | if (ecs->event_thread->control.trap_expected | |
1edb66d8 | 9158 | && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP) |
4d9d9d04 PA |
9159 | ecs->event_thread->control.trap_expected = 0; |
9160 | ||
1edb66d8 SM |
9161 | if (!signal_program[ecs->event_thread->stop_signal ()]) |
9162 | ecs->event_thread->set_stop_signal (GDB_SIGNAL_0); | |
4d9d9d04 PA |
9163 | keep_going_pass_signal (ecs); |
9164 | } | |
9165 | ||
104c1213 JM |
9166 | /* This function normally comes after a resume, before |
9167 | handle_inferior_event exits. It takes care of any last bits of | |
9168 | housekeeping, and sets the all-important wait_some_more flag. */ | |
cd0fc7c3 | 9169 | |
104c1213 JM |
9170 | static void |
9171 | prepare_to_wait (struct execution_control_state *ecs) | |
cd0fc7c3 | 9172 | { |
1eb8556f | 9173 | infrun_debug_printf ("prepare_to_wait"); |
104c1213 | 9174 | |
104c1213 | 9175 | ecs->wait_some_more = 1; |
0b333c5e | 9176 | |
42bd97a6 PA |
9177 | /* If the target can't async, emulate it by marking the infrun event |
9178 | handler such that as soon as we get back to the event-loop, we | |
9179 | immediately end up in fetch_inferior_event again calling | |
9180 | target_wait. */ | |
9181 | if (!target_can_async_p ()) | |
0b333c5e | 9182 | mark_infrun_async_event_handler (); |
c906108c | 9183 | } |
11cf8741 | 9184 | |
fd664c91 | 9185 | /* We are done with the step range of a step/next/si/ni command. |
b57bacec | 9186 | Called once for each n of a "step n" operation. */ |
fd664c91 PA |
9187 | |
9188 | static void | |
bdc36728 | 9189 | end_stepping_range (struct execution_control_state *ecs) |
fd664c91 | 9190 | { |
bdc36728 | 9191 | ecs->event_thread->control.stop_step = 1; |
bdc36728 | 9192 | stop_waiting (ecs); |
fd664c91 PA |
9193 | } |
9194 | ||
33d62d64 JK |
9195 | /* Several print_*_reason functions to print why the inferior has stopped. |
9196 | We always print something when the inferior exits, or receives a signal. | |
9197 | The rest of the cases are dealt with later on in normal_stop and | |
9198 | print_it_typical. Ideally there should be a call to one of these | |
9199 | print_*_reason functions functions from handle_inferior_event each time | |
22bcd14b | 9200 | stop_waiting is called. |
33d62d64 | 9201 | |
fd664c91 PA |
9202 | Note that we don't call these directly, instead we delegate that to |
9203 | the interpreters, through observers. Interpreters then call these | |
9204 | with whatever uiout is right. */ | |
33d62d64 | 9205 | |
fd664c91 PA |
9206 | void |
9207 | print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal) | |
11cf8741 | 9208 | { |
33d62d64 | 9209 | annotate_signalled (); |
112e8700 SM |
9210 | if (uiout->is_mi_like_p ()) |
9211 | uiout->field_string | |
9212 | ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED)); | |
9213 | uiout->text ("\nProgram terminated with signal "); | |
33d62d64 | 9214 | annotate_signal_name (); |
112e8700 | 9215 | uiout->field_string ("signal-name", |
2ea28649 | 9216 | gdb_signal_to_name (siggnal)); |
33d62d64 | 9217 | annotate_signal_name_end (); |
112e8700 | 9218 | uiout->text (", "); |
33d62d64 | 9219 | annotate_signal_string (); |
112e8700 | 9220 | uiout->field_string ("signal-meaning", |
2ea28649 | 9221 | gdb_signal_to_string (siggnal)); |
33d62d64 | 9222 | annotate_signal_string_end (); |
112e8700 SM |
9223 | uiout->text (".\n"); |
9224 | uiout->text ("The program no longer exists.\n"); | |
33d62d64 JK |
9225 | } |
9226 | ||
fd664c91 PA |
9227 | void |
9228 | print_exited_reason (struct ui_out *uiout, int exitstatus) | |
33d62d64 | 9229 | { |
fda326dd | 9230 | struct inferior *inf = current_inferior (); |
a068643d | 9231 | std::string pidstr = target_pid_to_str (ptid_t (inf->pid)); |
fda326dd | 9232 | |
33d62d64 JK |
9233 | annotate_exited (exitstatus); |
9234 | if (exitstatus) | |
9235 | { | |
112e8700 SM |
9236 | if (uiout->is_mi_like_p ()) |
9237 | uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED)); | |
6a831f06 PA |
9238 | std::string exit_code_str |
9239 | = string_printf ("0%o", (unsigned int) exitstatus); | |
9240 | uiout->message ("[Inferior %s (%s) exited with code %pF]\n", | |
9241 | plongest (inf->num), pidstr.c_str (), | |
9242 | string_field ("exit-code", exit_code_str.c_str ())); | |
33d62d64 JK |
9243 | } |
9244 | else | |
11cf8741 | 9245 | { |
112e8700 SM |
9246 | if (uiout->is_mi_like_p ()) |
9247 | uiout->field_string | |
9248 | ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY)); | |
6a831f06 PA |
9249 | uiout->message ("[Inferior %s (%s) exited normally]\n", |
9250 | plongest (inf->num), pidstr.c_str ()); | |
33d62d64 | 9251 | } |
33d62d64 JK |
9252 | } |
9253 | ||
fd664c91 PA |
9254 | void |
9255 | print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal) | |
33d62d64 | 9256 | { |
f303dbd6 PA |
9257 | struct thread_info *thr = inferior_thread (); |
9258 | ||
bb079752 AB |
9259 | infrun_debug_printf ("signal = %s", gdb_signal_to_string (siggnal)); |
9260 | ||
33d62d64 JK |
9261 | annotate_signal (); |
9262 | ||
112e8700 | 9263 | if (uiout->is_mi_like_p ()) |
f303dbd6 PA |
9264 | ; |
9265 | else if (show_thread_that_caused_stop ()) | |
33d62d64 | 9266 | { |
112e8700 | 9267 | uiout->text ("\nThread "); |
33eca680 | 9268 | uiout->field_string ("thread-id", print_thread_id (thr)); |
f303dbd6 | 9269 | |
25558938 | 9270 | const char *name = thread_name (thr); |
03acd4d8 | 9271 | if (name != nullptr) |
f303dbd6 | 9272 | { |
112e8700 | 9273 | uiout->text (" \""); |
33eca680 | 9274 | uiout->field_string ("name", name); |
112e8700 | 9275 | uiout->text ("\""); |
f303dbd6 | 9276 | } |
33d62d64 | 9277 | } |
f303dbd6 | 9278 | else |
112e8700 | 9279 | uiout->text ("\nProgram"); |
f303dbd6 | 9280 | |
112e8700 SM |
9281 | if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ()) |
9282 | uiout->text (" stopped"); | |
33d62d64 JK |
9283 | else |
9284 | { | |
112e8700 | 9285 | uiout->text (" received signal "); |
8b93c638 | 9286 | annotate_signal_name (); |
112e8700 SM |
9287 | if (uiout->is_mi_like_p ()) |
9288 | uiout->field_string | |
9289 | ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED)); | |
9290 | uiout->field_string ("signal-name", gdb_signal_to_name (siggnal)); | |
8b93c638 | 9291 | annotate_signal_name_end (); |
112e8700 | 9292 | uiout->text (", "); |
8b93c638 | 9293 | annotate_signal_string (); |
112e8700 | 9294 | uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal)); |
012b3a21 | 9295 | |
9c742269 | 9296 | regcache *regcache = get_thread_regcache (thr); |
272bb05c JB |
9297 | struct gdbarch *gdbarch = regcache->arch (); |
9298 | if (gdbarch_report_signal_info_p (gdbarch)) | |
9299 | gdbarch_report_signal_info (gdbarch, uiout, siggnal); | |
9300 | ||
8b93c638 | 9301 | annotate_signal_string_end (); |
33d62d64 | 9302 | } |
112e8700 | 9303 | uiout->text (".\n"); |
33d62d64 | 9304 | } |
252fbfc8 | 9305 | |
fd664c91 PA |
9306 | void |
9307 | print_no_history_reason (struct ui_out *uiout) | |
33d62d64 | 9308 | { |
37f54063 BL |
9309 | if (uiout->is_mi_like_p ()) |
9310 | uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_NO_HISTORY)); | |
08919701 GL |
9311 | else if (execution_direction == EXEC_FORWARD) |
9312 | uiout->text ("\nReached end of recorded history; stopping.\nFollowing " | |
9313 | "forward execution will be added to history.\n"); | |
37f54063 | 9314 | else |
08919701 GL |
9315 | { |
9316 | gdb_assert (execution_direction == EXEC_REVERSE); | |
9317 | uiout->text ("\nReached end of recorded history; stopping.\nBackward " | |
9318 | "execution from here not possible.\n"); | |
9319 | } | |
11cf8741 | 9320 | } |
43ff13b4 | 9321 | |
0c7e1a46 PA |
9322 | /* Print current location without a level number, if we have changed |
9323 | functions or hit a breakpoint. Print source line if we have one. | |
9324 | bpstat_print contains the logic deciding in detail what to print, | |
9325 | based on the event(s) that just occurred. */ | |
9326 | ||
243a9253 | 9327 | static void |
c272a98c | 9328 | print_stop_location (const target_waitstatus &ws) |
0c7e1a46 PA |
9329 | { |
9330 | int bpstat_ret; | |
f486487f | 9331 | enum print_what source_flag; |
0c7e1a46 PA |
9332 | int do_frame_printing = 1; |
9333 | struct thread_info *tp = inferior_thread (); | |
9334 | ||
c272a98c | 9335 | bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws.kind ()); |
0c7e1a46 PA |
9336 | switch (bpstat_ret) |
9337 | { | |
9338 | case PRINT_UNKNOWN: | |
9339 | /* FIXME: cagney/2002-12-01: Given that a frame ID does (or | |
9340 | should) carry around the function and does (or should) use | |
9341 | that when doing a frame comparison. */ | |
9342 | if (tp->control.stop_step | |
a0cbd650 TT |
9343 | && (tp->control.step_frame_id |
9344 | == get_frame_id (get_current_frame ())) | |
f2ffa92b | 9345 | && (tp->control.step_start_function |
1edb66d8 | 9346 | == find_pc_function (tp->stop_pc ()))) |
0c7e1a46 | 9347 | { |
b464e193 TV |
9348 | symtab_and_line sal = find_frame_sal (get_selected_frame (nullptr)); |
9349 | if (sal.symtab != tp->current_symtab) | |
9350 | { | |
9351 | /* Finished step in same frame but into different file, print | |
9352 | location and source line. */ | |
9353 | source_flag = SRC_AND_LOC; | |
9354 | } | |
9355 | else | |
9356 | { | |
9357 | /* Finished step in same frame and same file, just print source | |
9358 | line. */ | |
9359 | source_flag = SRC_LINE; | |
9360 | } | |
0c7e1a46 PA |
9361 | } |
9362 | else | |
9363 | { | |
b464e193 TV |
9364 | /* Finished step into different frame, print location and source |
9365 | line. */ | |
0c7e1a46 PA |
9366 | source_flag = SRC_AND_LOC; |
9367 | } | |
9368 | break; | |
9369 | case PRINT_SRC_AND_LOC: | |
9370 | /* Print location and source line. */ | |
9371 | source_flag = SRC_AND_LOC; | |
9372 | break; | |
9373 | case PRINT_SRC_ONLY: | |
9374 | source_flag = SRC_LINE; | |
9375 | break; | |
9376 | case PRINT_NOTHING: | |
9377 | /* Something bogus. */ | |
9378 | source_flag = SRC_LINE; | |
9379 | do_frame_printing = 0; | |
9380 | break; | |
9381 | default: | |
f34652de | 9382 | internal_error (_("Unknown value.")); |
0c7e1a46 PA |
9383 | } |
9384 | ||
9385 | /* The behavior of this routine with respect to the source | |
9386 | flag is: | |
9387 | SRC_LINE: Print only source line | |
9388 | LOCATION: Print only location | |
9389 | SRC_AND_LOC: Print location and source line. */ | |
9390 | if (do_frame_printing) | |
03acd4d8 | 9391 | print_stack_frame (get_selected_frame (nullptr), 0, source_flag, 1); |
243a9253 PA |
9392 | } |
9393 | ||
519d6343 | 9394 | /* See `print_stop_event` in infrun.h. */ |
243a9253 | 9395 | |
519d6343 AM |
9396 | static void |
9397 | do_print_stop_event (struct ui_out *uiout, bool displays) | |
243a9253 | 9398 | { |
243a9253 | 9399 | struct target_waitstatus last; |
243a9253 PA |
9400 | struct thread_info *tp; |
9401 | ||
5b6d1e4f | 9402 | get_last_target_status (nullptr, nullptr, &last); |
243a9253 | 9403 | |
67ad9399 TT |
9404 | { |
9405 | scoped_restore save_uiout = make_scoped_restore (¤t_uiout, uiout); | |
0c7e1a46 | 9406 | |
c272a98c | 9407 | print_stop_location (last); |
243a9253 | 9408 | |
67ad9399 | 9409 | /* Display the auto-display expressions. */ |
4c7d57e7 TT |
9410 | if (displays) |
9411 | do_displays (); | |
67ad9399 | 9412 | } |
243a9253 PA |
9413 | |
9414 | tp = inferior_thread (); | |
573269a8 LS |
9415 | if (tp->thread_fsm () != nullptr |
9416 | && tp->thread_fsm ()->finished_p ()) | |
243a9253 PA |
9417 | { |
9418 | struct return_value_info *rv; | |
9419 | ||
573269a8 LS |
9420 | rv = tp->thread_fsm ()->return_value (); |
9421 | if (rv != nullptr) | |
243a9253 PA |
9422 | print_return_value (uiout, rv); |
9423 | } | |
0c7e1a46 PA |
9424 | } |
9425 | ||
519d6343 AM |
9426 | /* See infrun.h. This function itself sets up buffered output for the |
9427 | duration of do_print_stop_event, which performs the actual event | |
9428 | printing. */ | |
9429 | ||
9430 | void | |
9431 | print_stop_event (struct ui_out *uiout, bool displays) | |
9432 | { | |
9433 | do_with_buffered_output (do_print_stop_event, uiout, displays); | |
9434 | } | |
9435 | ||
388a7084 PA |
9436 | /* See infrun.h. */ |
9437 | ||
9438 | void | |
9439 | maybe_remove_breakpoints (void) | |
9440 | { | |
55f6301a | 9441 | if (!breakpoints_should_be_inserted_now () && target_has_execution ()) |
388a7084 PA |
9442 | { |
9443 | if (remove_breakpoints ()) | |
9444 | { | |
223ffa71 | 9445 | target_terminal::ours_for_output (); |
6cb06a8c TT |
9446 | gdb_printf (_("Cannot remove breakpoints because " |
9447 | "program is no longer writable.\nFurther " | |
9448 | "execution is probably impossible.\n")); | |
388a7084 PA |
9449 | } |
9450 | } | |
9451 | } | |
9452 | ||
4c2f2a79 PA |
9453 | /* The execution context that just caused a normal stop. */ |
9454 | ||
9455 | struct stop_context | |
9456 | { | |
2d844eaf | 9457 | stop_context (); |
2d844eaf TT |
9458 | |
9459 | DISABLE_COPY_AND_ASSIGN (stop_context); | |
9460 | ||
9461 | bool changed () const; | |
9462 | ||
4c2f2a79 PA |
9463 | /* The stop ID. */ |
9464 | ULONGEST stop_id; | |
c906108c | 9465 | |
4c2f2a79 | 9466 | /* The event PTID. */ |
c906108c | 9467 | |
4c2f2a79 PA |
9468 | ptid_t ptid; |
9469 | ||
973c5759 | 9470 | /* If stopped for a thread event, this is the thread that caused the |
4c2f2a79 | 9471 | stop. */ |
d634cd0b | 9472 | thread_info_ref thread; |
4c2f2a79 PA |
9473 | |
9474 | /* The inferior that caused the stop. */ | |
9475 | int inf_num; | |
9476 | }; | |
9477 | ||
2d844eaf | 9478 | /* Initializes a new stop context. If stopped for a thread event, this |
4c2f2a79 PA |
9479 | takes a strong reference to the thread. */ |
9480 | ||
2d844eaf | 9481 | stop_context::stop_context () |
4c2f2a79 | 9482 | { |
2d844eaf TT |
9483 | stop_id = get_stop_id (); |
9484 | ptid = inferior_ptid; | |
9485 | inf_num = current_inferior ()->num; | |
4c2f2a79 | 9486 | |
d7e15655 | 9487 | if (inferior_ptid != null_ptid) |
4c2f2a79 PA |
9488 | { |
9489 | /* Take a strong reference so that the thread can't be deleted | |
9490 | yet. */ | |
d634cd0b | 9491 | thread = thread_info_ref::new_reference (inferior_thread ()); |
4c2f2a79 | 9492 | } |
4c2f2a79 PA |
9493 | } |
9494 | ||
9495 | /* Return true if the current context no longer matches the saved stop | |
9496 | context. */ | |
9497 | ||
2d844eaf TT |
9498 | bool |
9499 | stop_context::changed () const | |
9500 | { | |
9501 | if (ptid != inferior_ptid) | |
9502 | return true; | |
9503 | if (inf_num != current_inferior ()->num) | |
9504 | return true; | |
03acd4d8 | 9505 | if (thread != nullptr && thread->state != THREAD_STOPPED) |
2d844eaf TT |
9506 | return true; |
9507 | if (get_stop_id () != stop_id) | |
9508 | return true; | |
9509 | return false; | |
4c2f2a79 PA |
9510 | } |
9511 | ||
9512 | /* See infrun.h. */ | |
9513 | ||
8dd08de7 AB |
9514 | bool |
9515 | normal_stop () | |
c906108c | 9516 | { |
73b65bb0 | 9517 | struct target_waitstatus last; |
73b65bb0 | 9518 | |
5b6d1e4f | 9519 | get_last_target_status (nullptr, nullptr, &last); |
73b65bb0 | 9520 | |
4c2f2a79 PA |
9521 | new_stop_id (); |
9522 | ||
29f49a6a PA |
9523 | /* If an exception is thrown from this point on, make sure to |
9524 | propagate GDB's knowledge of the executing state to the | |
9525 | frontend/user running state. A QUIT is an easy exception to see | |
9526 | here, so do this before any filtered output. */ | |
731f534f | 9527 | |
5b6d1e4f | 9528 | ptid_t finish_ptid = null_ptid; |
731f534f | 9529 | |
c35b1492 | 9530 | if (!non_stop) |
5b6d1e4f | 9531 | finish_ptid = minus_one_ptid; |
183be222 SM |
9532 | else if (last.kind () == TARGET_WAITKIND_SIGNALLED |
9533 | || last.kind () == TARGET_WAITKIND_EXITED) | |
e1316e60 PA |
9534 | { |
9535 | /* On some targets, we may still have live threads in the | |
9536 | inferior when we get a process exit event. E.g., for | |
9537 | "checkpoint", when the current checkpoint/fork exits, | |
9538 | linux-fork.c automatically switches to another fork from | |
9539 | within target_mourn_inferior. */ | |
731f534f | 9540 | if (inferior_ptid != null_ptid) |
5b6d1e4f | 9541 | finish_ptid = ptid_t (inferior_ptid.pid ()); |
e1316e60 | 9542 | } |
9488c327 PA |
9543 | else if (last.kind () != TARGET_WAITKIND_NO_RESUMED |
9544 | && last.kind () != TARGET_WAITKIND_THREAD_EXITED) | |
5b6d1e4f PA |
9545 | finish_ptid = inferior_ptid; |
9546 | ||
6b09f134 | 9547 | std::optional<scoped_finish_thread_state> maybe_finish_thread_state; |
5b6d1e4f PA |
9548 | if (finish_ptid != null_ptid) |
9549 | { | |
9550 | maybe_finish_thread_state.emplace | |
9551 | (user_visible_resume_target (finish_ptid), finish_ptid); | |
9552 | } | |
29f49a6a | 9553 | |
b57bacec PA |
9554 | /* As we're presenting a stop, and potentially removing breakpoints, |
9555 | update the thread list so we can tell whether there are threads | |
9556 | running on the target. With target remote, for example, we can | |
9557 | only learn about new threads when we explicitly update the thread | |
9558 | list. Do this before notifying the interpreters about signal | |
9559 | stops, end of stepping ranges, etc., so that the "new thread" | |
9560 | output is emitted before e.g., "Program received signal FOO", | |
9561 | instead of after. */ | |
9562 | update_thread_list (); | |
9563 | ||
183be222 | 9564 | if (last.kind () == TARGET_WAITKIND_STOPPED && stopped_by_random_signal) |
3f75a984 | 9565 | notify_signal_received (inferior_thread ()->stop_signal ()); |
b57bacec | 9566 | |
c906108c SS |
9567 | /* As with the notification of thread events, we want to delay |
9568 | notifying the user that we've switched thread context until | |
9569 | the inferior actually stops. | |
9570 | ||
73b65bb0 DJ |
9571 | There's no point in saying anything if the inferior has exited. |
9572 | Note that SIGNALLED here means "exited with a signal", not | |
b65dc60b PA |
9573 | "received a signal". |
9574 | ||
9575 | Also skip saying anything in non-stop mode. In that mode, as we | |
9576 | don't want GDB to switch threads behind the user's back, to avoid | |
9577 | races where the user is typing a command to apply to thread x, | |
9578 | but GDB switches to thread y before the user finishes entering | |
9579 | the command, fetch_inferior_event installs a cleanup to restore | |
9580 | the current thread back to the thread the user had selected right | |
9581 | after this event is handled, so we're not really switching, only | |
9582 | informing of a stop. */ | |
a81871f7 | 9583 | if (!non_stop) |
c906108c | 9584 | { |
a81871f7 PA |
9585 | if ((last.kind () != TARGET_WAITKIND_SIGNALLED |
9586 | && last.kind () != TARGET_WAITKIND_EXITED | |
9488c327 PA |
9587 | && last.kind () != TARGET_WAITKIND_NO_RESUMED |
9588 | && last.kind () != TARGET_WAITKIND_THREAD_EXITED) | |
a81871f7 PA |
9589 | && target_has_execution () |
9590 | && previous_thread != inferior_thread ()) | |
3b12939d | 9591 | { |
a81871f7 PA |
9592 | SWITCH_THRU_ALL_UIS () |
9593 | { | |
9594 | target_terminal::ours_for_output (); | |
9595 | gdb_printf (_("[Switching to %s]\n"), | |
9596 | target_pid_to_str (inferior_ptid).c_str ()); | |
9597 | annotate_thread_changed (); | |
9598 | } | |
3b12939d | 9599 | } |
a81871f7 PA |
9600 | |
9601 | update_previous_thread (); | |
c906108c | 9602 | } |
c906108c | 9603 | |
9488c327 PA |
9604 | if (last.kind () == TARGET_WAITKIND_NO_RESUMED |
9605 | || last.kind () == TARGET_WAITKIND_THREAD_EXITED) | |
0e5bf2a8 | 9606 | { |
21d48304 PA |
9607 | stop_print_frame = false; |
9608 | ||
0e454242 | 9609 | SWITCH_THRU_ALL_UIS () |
3b12939d PA |
9610 | if (current_ui->prompt_state == PROMPT_BLOCKED) |
9611 | { | |
223ffa71 | 9612 | target_terminal::ours_for_output (); |
9488c327 PA |
9613 | if (last.kind () == TARGET_WAITKIND_NO_RESUMED) |
9614 | gdb_printf (_("No unwaited-for children left.\n")); | |
9615 | else if (last.kind () == TARGET_WAITKIND_THREAD_EXITED) | |
9616 | gdb_printf (_("Command aborted, thread exited.\n")); | |
9617 | else | |
9618 | gdb_assert_not_reached ("unhandled"); | |
3b12939d | 9619 | } |
0e5bf2a8 PA |
9620 | } |
9621 | ||
b57bacec | 9622 | /* Note: this depends on the update_thread_list call above. */ |
388a7084 | 9623 | maybe_remove_breakpoints (); |
c906108c | 9624 | |
c906108c SS |
9625 | /* If an auto-display called a function and that got a signal, |
9626 | delete that auto-display to avoid an infinite recursion. */ | |
9627 | ||
9628 | if (stopped_by_random_signal) | |
9629 | disable_current_display (); | |
9630 | ||
0e454242 | 9631 | SWITCH_THRU_ALL_UIS () |
3b12939d PA |
9632 | { |
9633 | async_enable_stdin (); | |
9634 | } | |
c906108c | 9635 | |
388a7084 | 9636 | /* Let the user/frontend see the threads as stopped. */ |
731f534f | 9637 | maybe_finish_thread_state.reset (); |
388a7084 PA |
9638 | |
9639 | /* Select innermost stack frame - i.e., current frame is frame 0, | |
9640 | and current location is based on that. Handle the case where the | |
9641 | dummy call is returning after being stopped. E.g. the dummy call | |
9642 | previously hit a breakpoint. (If the dummy call returns | |
9643 | normally, we won't reach here.) Do this before the stop hook is | |
9644 | run, so that it doesn't get to see the temporary dummy frame, | |
9645 | which is not where we'll present the stop. */ | |
9646 | if (has_stack_frames ()) | |
9647 | { | |
9648 | if (stop_stack_dummy == STOP_STACK_DUMMY) | |
9649 | { | |
9650 | /* Pop the empty frame that contains the stack dummy. This | |
9651 | also restores inferior state prior to the call (struct | |
9652 | infcall_suspend_state). */ | |
bd2b40ac | 9653 | frame_info_ptr frame = get_current_frame (); |
388a7084 PA |
9654 | |
9655 | gdb_assert (get_frame_type (frame) == DUMMY_FRAME); | |
9656 | frame_pop (frame); | |
9657 | /* frame_pop calls reinit_frame_cache as the last thing it | |
9658 | does which means there's now no selected frame. */ | |
9659 | } | |
9660 | ||
9661 | select_frame (get_current_frame ()); | |
9662 | ||
9663 | /* Set the current source location. */ | |
9664 | set_current_sal_from_frame (get_current_frame ()); | |
9665 | } | |
dd7e2d2b PA |
9666 | |
9667 | /* Look up the hook_stop and run it (CLI internally handles problem | |
9668 | of stop_command's pre-hook not existing). */ | |
49a82d50 | 9669 | stop_context saved_context; |
4c2f2a79 | 9670 | |
49a82d50 TT |
9671 | try |
9672 | { | |
9673 | execute_cmd_pre_hook (stop_command); | |
4c2f2a79 | 9674 | } |
b1ffd112 | 9675 | catch (const gdb_exception_error &ex) |
49a82d50 TT |
9676 | { |
9677 | exception_fprintf (gdb_stderr, ex, | |
9678 | "Error while running hook_stop:\n"); | |
9679 | } | |
9680 | ||
9681 | /* If the stop hook resumes the target, then there's no point in | |
9682 | trying to notify about the previous stop; its context is | |
9683 | gone. Likewise if the command switches thread or inferior -- | |
9684 | the observers would print a stop for the wrong | |
9685 | thread/inferior. */ | |
9686 | if (saved_context.changed ()) | |
8dd08de7 | 9687 | return true; |
dd7e2d2b | 9688 | |
388a7084 PA |
9689 | /* Notify observers about the stop. This is where the interpreters |
9690 | print the stop event. */ | |
87829267 SM |
9691 | notify_normal_stop ((inferior_ptid != null_ptid |
9692 | ? inferior_thread ()->control.stop_bpstat | |
9693 | : nullptr), | |
9694 | stop_print_frame); | |
243a9253 PA |
9695 | annotate_stopped (); |
9696 | ||
55f6301a | 9697 | if (target_has_execution ()) |
48844aa6 | 9698 | { |
183be222 SM |
9699 | if (last.kind () != TARGET_WAITKIND_SIGNALLED |
9700 | && last.kind () != TARGET_WAITKIND_EXITED | |
9488c327 PA |
9701 | && last.kind () != TARGET_WAITKIND_NO_RESUMED |
9702 | && last.kind () != TARGET_WAITKIND_THREAD_EXITED) | |
48844aa6 PA |
9703 | /* Delete the breakpoint we stopped at, if it wants to be deleted. |
9704 | Delete any breakpoint that is to be deleted at the next stop. */ | |
16c381f0 | 9705 | breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat); |
94cc34af | 9706 | } |
6c95b8df | 9707 | |
8dd08de7 | 9708 | return false; |
c906108c | 9709 | } |
c906108c | 9710 | \f |
c5aa993b | 9711 | int |
96baa820 | 9712 | signal_stop_state (int signo) |
c906108c | 9713 | { |
d6b48e9c | 9714 | return signal_stop[signo]; |
c906108c SS |
9715 | } |
9716 | ||
c5aa993b | 9717 | int |
96baa820 | 9718 | signal_print_state (int signo) |
c906108c SS |
9719 | { |
9720 | return signal_print[signo]; | |
9721 | } | |
9722 | ||
c5aa993b | 9723 | int |
96baa820 | 9724 | signal_pass_state (int signo) |
c906108c SS |
9725 | { |
9726 | return signal_program[signo]; | |
9727 | } | |
9728 | ||
2455069d UW |
9729 | static void |
9730 | signal_cache_update (int signo) | |
9731 | { | |
9732 | if (signo == -1) | |
9733 | { | |
a493e3e2 | 9734 | for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++) |
2455069d UW |
9735 | signal_cache_update (signo); |
9736 | ||
9737 | return; | |
9738 | } | |
9739 | ||
9740 | signal_pass[signo] = (signal_stop[signo] == 0 | |
9741 | && signal_print[signo] == 0 | |
ab04a2af TT |
9742 | && signal_program[signo] == 1 |
9743 | && signal_catch[signo] == 0); | |
2455069d UW |
9744 | } |
9745 | ||
488f131b | 9746 | int |
7bda5e4a | 9747 | signal_stop_update (int signo, int state) |
d4f3574e SS |
9748 | { |
9749 | int ret = signal_stop[signo]; | |
abbb1732 | 9750 | |
d4f3574e | 9751 | signal_stop[signo] = state; |
2455069d | 9752 | signal_cache_update (signo); |
d4f3574e SS |
9753 | return ret; |
9754 | } | |
9755 | ||
488f131b | 9756 | int |
7bda5e4a | 9757 | signal_print_update (int signo, int state) |
d4f3574e SS |
9758 | { |
9759 | int ret = signal_print[signo]; | |
abbb1732 | 9760 | |
d4f3574e | 9761 | signal_print[signo] = state; |
2455069d | 9762 | signal_cache_update (signo); |
d4f3574e SS |
9763 | return ret; |
9764 | } | |
9765 | ||
488f131b | 9766 | int |
7bda5e4a | 9767 | signal_pass_update (int signo, int state) |
d4f3574e SS |
9768 | { |
9769 | int ret = signal_program[signo]; | |
abbb1732 | 9770 | |
d4f3574e | 9771 | signal_program[signo] = state; |
2455069d | 9772 | signal_cache_update (signo); |
d4f3574e SS |
9773 | return ret; |
9774 | } | |
9775 | ||
ab04a2af TT |
9776 | /* Update the global 'signal_catch' from INFO and notify the |
9777 | target. */ | |
9778 | ||
9779 | void | |
9780 | signal_catch_update (const unsigned int *info) | |
9781 | { | |
9782 | int i; | |
9783 | ||
9784 | for (i = 0; i < GDB_SIGNAL_LAST; ++i) | |
9785 | signal_catch[i] = info[i] > 0; | |
9786 | signal_cache_update (-1); | |
adc6a863 | 9787 | target_pass_signals (signal_pass); |
ab04a2af TT |
9788 | } |
9789 | ||
c906108c | 9790 | static void |
96baa820 | 9791 | sig_print_header (void) |
c906108c | 9792 | { |
6cb06a8c TT |
9793 | gdb_printf (_("Signal Stop\tPrint\tPass " |
9794 | "to program\tDescription\n")); | |
c906108c SS |
9795 | } |
9796 | ||
9797 | static void | |
2ea28649 | 9798 | sig_print_info (enum gdb_signal oursig) |
c906108c | 9799 | { |
2ea28649 | 9800 | const char *name = gdb_signal_to_name (oursig); |
c906108c | 9801 | int name_padding = 13 - strlen (name); |
96baa820 | 9802 | |
c906108c SS |
9803 | if (name_padding <= 0) |
9804 | name_padding = 0; | |
9805 | ||
6cb06a8c TT |
9806 | gdb_printf ("%s", name); |
9807 | gdb_printf ("%*.*s ", name_padding, name_padding, " "); | |
9808 | gdb_printf ("%s\t", signal_stop[oursig] ? "Yes" : "No"); | |
9809 | gdb_printf ("%s\t", signal_print[oursig] ? "Yes" : "No"); | |
9810 | gdb_printf ("%s\t\t", signal_program[oursig] ? "Yes" : "No"); | |
9811 | gdb_printf ("%s\n", gdb_signal_to_string (oursig)); | |
c906108c SS |
9812 | } |
9813 | ||
9814 | /* Specify how various signals in the inferior should be handled. */ | |
9815 | ||
9816 | static void | |
0b39b52e | 9817 | handle_command (const char *args, int from_tty) |
c906108c | 9818 | { |
c906108c | 9819 | int digits, wordlen; |
b926417a | 9820 | int sigfirst, siglast; |
2ea28649 | 9821 | enum gdb_signal oursig; |
c906108c | 9822 | int allsigs; |
c906108c | 9823 | |
03acd4d8 | 9824 | if (args == nullptr) |
c906108c | 9825 | { |
e2e0b3e5 | 9826 | error_no_arg (_("signal to handle")); |
c906108c SS |
9827 | } |
9828 | ||
1777feb0 | 9829 | /* Allocate and zero an array of flags for which signals to handle. */ |
c906108c | 9830 | |
adc6a863 PA |
9831 | const size_t nsigs = GDB_SIGNAL_LAST; |
9832 | unsigned char sigs[nsigs] {}; | |
c906108c | 9833 | |
1777feb0 | 9834 | /* Break the command line up into args. */ |
c906108c | 9835 | |
773a1edc | 9836 | gdb_argv built_argv (args); |
c906108c SS |
9837 | |
9838 | /* Walk through the args, looking for signal oursigs, signal names, and | |
9839 | actions. Signal numbers and signal names may be interspersed with | |
9840 | actions, with the actions being performed for all signals cumulatively | |
1777feb0 | 9841 | specified. Signal ranges can be specified as <LOW>-<HIGH>. */ |
c906108c | 9842 | |
773a1edc | 9843 | for (char *arg : built_argv) |
c906108c | 9844 | { |
773a1edc TT |
9845 | wordlen = strlen (arg); |
9846 | for (digits = 0; isdigit (arg[digits]); digits++) | |
c906108c SS |
9847 | {; |
9848 | } | |
9849 | allsigs = 0; | |
9850 | sigfirst = siglast = -1; | |
9851 | ||
773a1edc | 9852 | if (wordlen >= 1 && !strncmp (arg, "all", wordlen)) |
c906108c SS |
9853 | { |
9854 | /* Apply action to all signals except those used by the | |
1777feb0 | 9855 | debugger. Silently skip those. */ |
c906108c SS |
9856 | allsigs = 1; |
9857 | sigfirst = 0; | |
9858 | siglast = nsigs - 1; | |
9859 | } | |
773a1edc | 9860 | else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen)) |
c906108c SS |
9861 | { |
9862 | SET_SIGS (nsigs, sigs, signal_stop); | |
9863 | SET_SIGS (nsigs, sigs, signal_print); | |
9864 | } | |
773a1edc | 9865 | else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen)) |
c906108c SS |
9866 | { |
9867 | UNSET_SIGS (nsigs, sigs, signal_program); | |
9868 | } | |
773a1edc | 9869 | else if (wordlen >= 2 && !strncmp (arg, "print", wordlen)) |
c906108c SS |
9870 | { |
9871 | SET_SIGS (nsigs, sigs, signal_print); | |
9872 | } | |
773a1edc | 9873 | else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen)) |
c906108c SS |
9874 | { |
9875 | SET_SIGS (nsigs, sigs, signal_program); | |
9876 | } | |
773a1edc | 9877 | else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen)) |
c906108c SS |
9878 | { |
9879 | UNSET_SIGS (nsigs, sigs, signal_stop); | |
9880 | } | |
773a1edc | 9881 | else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen)) |
c906108c SS |
9882 | { |
9883 | SET_SIGS (nsigs, sigs, signal_program); | |
9884 | } | |
773a1edc | 9885 | else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen)) |
c906108c SS |
9886 | { |
9887 | UNSET_SIGS (nsigs, sigs, signal_print); | |
9888 | UNSET_SIGS (nsigs, sigs, signal_stop); | |
9889 | } | |
773a1edc | 9890 | else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen)) |
c906108c SS |
9891 | { |
9892 | UNSET_SIGS (nsigs, sigs, signal_program); | |
9893 | } | |
9894 | else if (digits > 0) | |
9895 | { | |
9896 | /* It is numeric. The numeric signal refers to our own | |
9897 | internal signal numbering from target.h, not to host/target | |
9898 | signal number. This is a feature; users really should be | |
9899 | using symbolic names anyway, and the common ones like | |
9900 | SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */ | |
9901 | ||
9902 | sigfirst = siglast = (int) | |
773a1edc TT |
9903 | gdb_signal_from_command (atoi (arg)); |
9904 | if (arg[digits] == '-') | |
c906108c SS |
9905 | { |
9906 | siglast = (int) | |
773a1edc | 9907 | gdb_signal_from_command (atoi (arg + digits + 1)); |
c906108c SS |
9908 | } |
9909 | if (sigfirst > siglast) | |
9910 | { | |
1777feb0 | 9911 | /* Bet he didn't figure we'd think of this case... */ |
b926417a | 9912 | std::swap (sigfirst, siglast); |
c906108c SS |
9913 | } |
9914 | } | |
9915 | else | |
9916 | { | |
773a1edc | 9917 | oursig = gdb_signal_from_name (arg); |
a493e3e2 | 9918 | if (oursig != GDB_SIGNAL_UNKNOWN) |
c906108c SS |
9919 | { |
9920 | sigfirst = siglast = (int) oursig; | |
9921 | } | |
9922 | else | |
9923 | { | |
9924 | /* Not a number and not a recognized flag word => complain. */ | |
773a1edc | 9925 | error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg); |
c906108c SS |
9926 | } |
9927 | } | |
9928 | ||
9929 | /* If any signal numbers or symbol names were found, set flags for | |
dda83cd7 | 9930 | which signals to apply actions to. */ |
c906108c | 9931 | |
b926417a | 9932 | for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++) |
c906108c | 9933 | { |
2ea28649 | 9934 | switch ((enum gdb_signal) signum) |
c906108c | 9935 | { |
a493e3e2 PA |
9936 | case GDB_SIGNAL_TRAP: |
9937 | case GDB_SIGNAL_INT: | |
c906108c SS |
9938 | if (!allsigs && !sigs[signum]) |
9939 | { | |
9e2f0ad4 | 9940 | if (query (_("%s is used by the debugger.\n\ |
3e43a32a | 9941 | Are you sure you want to change it? "), |
2ea28649 | 9942 | gdb_signal_to_name ((enum gdb_signal) signum))) |
c906108c SS |
9943 | { |
9944 | sigs[signum] = 1; | |
9945 | } | |
9946 | else | |
6cb06a8c | 9947 | gdb_printf (_("Not confirmed, unchanged.\n")); |
c906108c SS |
9948 | } |
9949 | break; | |
a493e3e2 PA |
9950 | case GDB_SIGNAL_0: |
9951 | case GDB_SIGNAL_DEFAULT: | |
9952 | case GDB_SIGNAL_UNKNOWN: | |
c906108c SS |
9953 | /* Make sure that "all" doesn't print these. */ |
9954 | break; | |
9955 | default: | |
9956 | sigs[signum] = 1; | |
9957 | break; | |
9958 | } | |
9959 | } | |
c906108c SS |
9960 | } |
9961 | ||
b926417a | 9962 | for (int signum = 0; signum < nsigs; signum++) |
3a031f65 PA |
9963 | if (sigs[signum]) |
9964 | { | |
2455069d | 9965 | signal_cache_update (-1); |
adc6a863 PA |
9966 | target_pass_signals (signal_pass); |
9967 | target_program_signals (signal_program); | |
c906108c | 9968 | |
3a031f65 PA |
9969 | if (from_tty) |
9970 | { | |
9971 | /* Show the results. */ | |
9972 | sig_print_header (); | |
9973 | for (; signum < nsigs; signum++) | |
9974 | if (sigs[signum]) | |
aead7601 | 9975 | sig_print_info ((enum gdb_signal) signum); |
3a031f65 PA |
9976 | } |
9977 | ||
9978 | break; | |
9979 | } | |
c906108c SS |
9980 | } |
9981 | ||
de0bea00 MF |
9982 | /* Complete the "handle" command. */ |
9983 | ||
eb3ff9a5 | 9984 | static void |
de0bea00 | 9985 | handle_completer (struct cmd_list_element *ignore, |
eb3ff9a5 | 9986 | completion_tracker &tracker, |
6f937416 | 9987 | const char *text, const char *word) |
de0bea00 | 9988 | { |
de0bea00 MF |
9989 | static const char * const keywords[] = |
9990 | { | |
9991 | "all", | |
9992 | "stop", | |
9993 | "ignore", | |
9994 | "print", | |
9995 | "pass", | |
9996 | "nostop", | |
9997 | "noignore", | |
9998 | "noprint", | |
9999 | "nopass", | |
03acd4d8 | 10000 | nullptr, |
de0bea00 MF |
10001 | }; |
10002 | ||
eb3ff9a5 PA |
10003 | signal_completer (ignore, tracker, text, word); |
10004 | complete_on_enum (tracker, keywords, word, word); | |
de0bea00 MF |
10005 | } |
10006 | ||
2ea28649 PA |
10007 | enum gdb_signal |
10008 | gdb_signal_from_command (int num) | |
ed01b82c PA |
10009 | { |
10010 | if (num >= 1 && num <= 15) | |
2ea28649 | 10011 | return (enum gdb_signal) num; |
ed01b82c PA |
10012 | error (_("Only signals 1-15 are valid as numeric signals.\n\ |
10013 | Use \"info signals\" for a list of symbolic signals.")); | |
10014 | } | |
10015 | ||
c906108c SS |
10016 | /* Print current contents of the tables set by the handle command. |
10017 | It is possible we should just be printing signals actually used | |
10018 | by the current target (but for things to work right when switching | |
10019 | targets, all signals should be in the signal tables). */ | |
10020 | ||
10021 | static void | |
1d12d88f | 10022 | info_signals_command (const char *signum_exp, int from_tty) |
c906108c | 10023 | { |
2ea28649 | 10024 | enum gdb_signal oursig; |
abbb1732 | 10025 | |
c906108c SS |
10026 | sig_print_header (); |
10027 | ||
10028 | if (signum_exp) | |
10029 | { | |
10030 | /* First see if this is a symbol name. */ | |
2ea28649 | 10031 | oursig = gdb_signal_from_name (signum_exp); |
a493e3e2 | 10032 | if (oursig == GDB_SIGNAL_UNKNOWN) |
c906108c SS |
10033 | { |
10034 | /* No, try numeric. */ | |
10035 | oursig = | |
2ea28649 | 10036 | gdb_signal_from_command (parse_and_eval_long (signum_exp)); |
c906108c SS |
10037 | } |
10038 | sig_print_info (oursig); | |
10039 | return; | |
10040 | } | |
10041 | ||
6cb06a8c | 10042 | gdb_printf ("\n"); |
c906108c | 10043 | /* These ugly casts brought to you by the native VAX compiler. */ |
a493e3e2 PA |
10044 | for (oursig = GDB_SIGNAL_FIRST; |
10045 | (int) oursig < (int) GDB_SIGNAL_LAST; | |
2ea28649 | 10046 | oursig = (enum gdb_signal) ((int) oursig + 1)) |
c906108c SS |
10047 | { |
10048 | QUIT; | |
10049 | ||
a493e3e2 PA |
10050 | if (oursig != GDB_SIGNAL_UNKNOWN |
10051 | && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0) | |
c906108c SS |
10052 | sig_print_info (oursig); |
10053 | } | |
10054 | ||
9e69a2e1 TT |
10055 | gdb_printf (_("\nUse the \"%ps\" command to change these tables.\n"), |
10056 | styled_string (command_style.style (), "handle")); | |
c906108c | 10057 | } |
4aa995e1 PA |
10058 | |
10059 | /* The $_siginfo convenience variable is a bit special. We don't know | |
10060 | for sure the type of the value until we actually have a chance to | |
7a9dd1b2 | 10061 | fetch the data. The type can change depending on gdbarch, so it is |
4aa995e1 PA |
10062 | also dependent on which thread you have selected. |
10063 | ||
10064 | 1. making $_siginfo be an internalvar that creates a new value on | |
10065 | access. | |
10066 | ||
10067 | 2. making the value of $_siginfo be an lval_computed value. */ | |
10068 | ||
10069 | /* This function implements the lval_computed support for reading a | |
10070 | $_siginfo value. */ | |
10071 | ||
10072 | static void | |
10073 | siginfo_value_read (struct value *v) | |
10074 | { | |
10075 | LONGEST transferred; | |
10076 | ||
a911d87a PA |
10077 | /* If we can access registers, so can we access $_siginfo. Likewise |
10078 | vice versa. */ | |
10079 | validate_registers_access (); | |
c709acd1 | 10080 | |
4aa995e1 | 10081 | transferred = |
328d42d8 SM |
10082 | target_read (current_inferior ()->top_target (), |
10083 | TARGET_OBJECT_SIGNAL_INFO, | |
03acd4d8 | 10084 | nullptr, |
bbe912ba | 10085 | v->contents_all_raw ().data (), |
76675c4d | 10086 | v->offset (), |
d0c97917 | 10087 | v->type ()->length ()); |
4aa995e1 | 10088 | |
d0c97917 | 10089 | if (transferred != v->type ()->length ()) |
4aa995e1 PA |
10090 | error (_("Unable to read siginfo")); |
10091 | } | |
10092 | ||
10093 | /* This function implements the lval_computed support for writing a | |
10094 | $_siginfo value. */ | |
10095 | ||
10096 | static void | |
10097 | siginfo_value_write (struct value *v, struct value *fromval) | |
10098 | { | |
10099 | LONGEST transferred; | |
10100 | ||
a911d87a PA |
10101 | /* If we can access registers, so can we access $_siginfo. Likewise |
10102 | vice versa. */ | |
10103 | validate_registers_access (); | |
c709acd1 | 10104 | |
328d42d8 | 10105 | transferred = target_write (current_inferior ()->top_target (), |
4aa995e1 | 10106 | TARGET_OBJECT_SIGNAL_INFO, |
03acd4d8 | 10107 | nullptr, |
bbe912ba | 10108 | fromval->contents_all_raw ().data (), |
76675c4d | 10109 | v->offset (), |
d0c97917 | 10110 | fromval->type ()->length ()); |
4aa995e1 | 10111 | |
d0c97917 | 10112 | if (transferred != fromval->type ()->length ()) |
4aa995e1 PA |
10113 | error (_("Unable to write siginfo")); |
10114 | } | |
10115 | ||
c8f2448a | 10116 | static const struct lval_funcs siginfo_value_funcs = |
4aa995e1 PA |
10117 | { |
10118 | siginfo_value_read, | |
10119 | siginfo_value_write | |
10120 | }; | |
10121 | ||
10122 | /* Return a new value with the correct type for the siginfo object of | |
78267919 UW |
10123 | the current thread using architecture GDBARCH. Return a void value |
10124 | if there's no object available. */ | |
4aa995e1 | 10125 | |
2c0b251b | 10126 | static struct value * |
22d2b532 SDJ |
10127 | siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var, |
10128 | void *ignore) | |
4aa995e1 | 10129 | { |
841de120 | 10130 | if (target_has_stack () |
d7e15655 | 10131 | && inferior_ptid != null_ptid |
78267919 | 10132 | && gdbarch_get_siginfo_type_p (gdbarch)) |
4aa995e1 | 10133 | { |
78267919 | 10134 | struct type *type = gdbarch_get_siginfo_type (gdbarch); |
abbb1732 | 10135 | |
b64e2602 | 10136 | return value::allocate_computed (type, &siginfo_value_funcs, nullptr); |
4aa995e1 PA |
10137 | } |
10138 | ||
317c3ed9 | 10139 | return value::allocate (builtin_type (gdbarch)->builtin_void); |
4aa995e1 PA |
10140 | } |
10141 | ||
c906108c | 10142 | \f |
16c381f0 JK |
10143 | /* infcall_suspend_state contains state about the program itself like its |
10144 | registers and any signal it received when it last stopped. | |
10145 | This state must be restored regardless of how the inferior function call | |
10146 | ends (either successfully, or after it hits a breakpoint or signal) | |
10147 | if the program is to properly continue where it left off. */ | |
10148 | ||
6bf78e29 | 10149 | class infcall_suspend_state |
7a292a7a | 10150 | { |
6bf78e29 AB |
10151 | public: |
10152 | /* Capture state from GDBARCH, TP, and REGCACHE that must be restored | |
10153 | once the inferior function call has finished. */ | |
10154 | infcall_suspend_state (struct gdbarch *gdbarch, | |
dda83cd7 SM |
10155 | const struct thread_info *tp, |
10156 | struct regcache *regcache) | |
1edb66d8 | 10157 | : m_registers (new readonly_detached_regcache (*regcache)) |
6bf78e29 | 10158 | { |
1edb66d8 SM |
10159 | tp->save_suspend_to (m_thread_suspend); |
10160 | ||
6bf78e29 AB |
10161 | gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data; |
10162 | ||
10163 | if (gdbarch_get_siginfo_type_p (gdbarch)) | |
10164 | { | |
dda83cd7 | 10165 | struct type *type = gdbarch_get_siginfo_type (gdbarch); |
df86565b | 10166 | size_t len = type->length (); |
6bf78e29 | 10167 | |
dda83cd7 | 10168 | siginfo_data.reset ((gdb_byte *) xmalloc (len)); |
6bf78e29 | 10169 | |
328d42d8 | 10170 | if (target_read (current_inferior ()->top_target (), |
03acd4d8 | 10171 | TARGET_OBJECT_SIGNAL_INFO, nullptr, |
dda83cd7 SM |
10172 | siginfo_data.get (), 0, len) != len) |
10173 | { | |
10174 | /* Errors ignored. */ | |
10175 | siginfo_data.reset (nullptr); | |
10176 | } | |
6bf78e29 AB |
10177 | } |
10178 | ||
10179 | if (siginfo_data) | |
10180 | { | |
dda83cd7 SM |
10181 | m_siginfo_gdbarch = gdbarch; |
10182 | m_siginfo_data = std::move (siginfo_data); | |
6bf78e29 AB |
10183 | } |
10184 | } | |
10185 | ||
10186 | /* Return a pointer to the stored register state. */ | |
16c381f0 | 10187 | |
6bf78e29 AB |
10188 | readonly_detached_regcache *registers () const |
10189 | { | |
10190 | return m_registers.get (); | |
10191 | } | |
10192 | ||
10193 | /* Restores the stored state into GDBARCH, TP, and REGCACHE. */ | |
10194 | ||
10195 | void restore (struct gdbarch *gdbarch, | |
dda83cd7 SM |
10196 | struct thread_info *tp, |
10197 | struct regcache *regcache) const | |
6bf78e29 | 10198 | { |
1edb66d8 | 10199 | tp->restore_suspend_from (m_thread_suspend); |
6bf78e29 AB |
10200 | |
10201 | if (m_siginfo_gdbarch == gdbarch) | |
10202 | { | |
dda83cd7 | 10203 | struct type *type = gdbarch_get_siginfo_type (gdbarch); |
6bf78e29 | 10204 | |
dda83cd7 | 10205 | /* Errors ignored. */ |
328d42d8 | 10206 | target_write (current_inferior ()->top_target (), |
03acd4d8 | 10207 | TARGET_OBJECT_SIGNAL_INFO, nullptr, |
df86565b | 10208 | m_siginfo_data.get (), 0, type->length ()); |
6bf78e29 AB |
10209 | } |
10210 | ||
10211 | /* The inferior can be gone if the user types "print exit(0)" | |
10212 | (and perhaps other times). */ | |
55f6301a | 10213 | if (target_has_execution ()) |
6bf78e29 AB |
10214 | /* NB: The register write goes through to the target. */ |
10215 | regcache->restore (registers ()); | |
10216 | } | |
10217 | ||
10218 | private: | |
10219 | /* How the current thread stopped before the inferior function call was | |
10220 | executed. */ | |
10221 | struct thread_suspend_state m_thread_suspend; | |
10222 | ||
10223 | /* The registers before the inferior function call was executed. */ | |
10224 | std::unique_ptr<readonly_detached_regcache> m_registers; | |
1736ad11 | 10225 | |
35515841 | 10226 | /* Format of SIGINFO_DATA or NULL if it is not present. */ |
6bf78e29 | 10227 | struct gdbarch *m_siginfo_gdbarch = nullptr; |
1736ad11 JK |
10228 | |
10229 | /* The inferior format depends on SIGINFO_GDBARCH and it has a length of | |
df86565b | 10230 | gdbarch_get_siginfo_type ()->length (). For different gdbarch the |
1736ad11 | 10231 | content would be invalid. */ |
6bf78e29 | 10232 | gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data; |
b89667eb DE |
10233 | }; |
10234 | ||
cb524840 TT |
10235 | infcall_suspend_state_up |
10236 | save_infcall_suspend_state () | |
b89667eb | 10237 | { |
b89667eb | 10238 | struct thread_info *tp = inferior_thread (); |
9c742269 | 10239 | regcache *regcache = get_thread_regcache (tp); |
ac7936df | 10240 | struct gdbarch *gdbarch = regcache->arch (); |
1736ad11 | 10241 | |
6bf78e29 AB |
10242 | infcall_suspend_state_up inf_state |
10243 | (new struct infcall_suspend_state (gdbarch, tp, regcache)); | |
1736ad11 | 10244 | |
6bf78e29 AB |
10245 | /* Having saved the current state, adjust the thread state, discarding |
10246 | any stop signal information. The stop signal is not useful when | |
10247 | starting an inferior function call, and run_inferior_call will not use | |
10248 | the signal due to its `proceed' call with GDB_SIGNAL_0. */ | |
1edb66d8 | 10249 | tp->set_stop_signal (GDB_SIGNAL_0); |
35515841 | 10250 | |
b89667eb DE |
10251 | return inf_state; |
10252 | } | |
10253 | ||
10254 | /* Restore inferior session state to INF_STATE. */ | |
10255 | ||
10256 | void | |
16c381f0 | 10257 | restore_infcall_suspend_state (struct infcall_suspend_state *inf_state) |
b89667eb DE |
10258 | { |
10259 | struct thread_info *tp = inferior_thread (); | |
9c742269 | 10260 | regcache *regcache = get_thread_regcache (inferior_thread ()); |
ac7936df | 10261 | struct gdbarch *gdbarch = regcache->arch (); |
b89667eb | 10262 | |
6bf78e29 | 10263 | inf_state->restore (gdbarch, tp, regcache); |
16c381f0 | 10264 | discard_infcall_suspend_state (inf_state); |
b89667eb DE |
10265 | } |
10266 | ||
b89667eb | 10267 | void |
16c381f0 | 10268 | discard_infcall_suspend_state (struct infcall_suspend_state *inf_state) |
b89667eb | 10269 | { |
dd848631 | 10270 | delete inf_state; |
b89667eb DE |
10271 | } |
10272 | ||
daf6667d | 10273 | readonly_detached_regcache * |
16c381f0 | 10274 | get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state) |
b89667eb | 10275 | { |
6bf78e29 | 10276 | return inf_state->registers (); |
b89667eb DE |
10277 | } |
10278 | ||
16c381f0 JK |
10279 | /* infcall_control_state contains state regarding gdb's control of the |
10280 | inferior itself like stepping control. It also contains session state like | |
10281 | the user's currently selected frame. */ | |
b89667eb | 10282 | |
16c381f0 | 10283 | struct infcall_control_state |
b89667eb | 10284 | { |
16c381f0 JK |
10285 | struct thread_control_state thread_control; |
10286 | struct inferior_control_state inferior_control; | |
d82142e2 JK |
10287 | |
10288 | /* Other fields: */ | |
ee841dd8 TT |
10289 | enum stop_stack_kind stop_stack_dummy = STOP_NONE; |
10290 | int stopped_by_random_signal = 0; | |
7a292a7a | 10291 | |
79952e69 PA |
10292 | /* ID and level of the selected frame when the inferior function |
10293 | call was made. */ | |
ee841dd8 | 10294 | struct frame_id selected_frame_id {}; |
79952e69 | 10295 | int selected_frame_level = -1; |
7a292a7a SS |
10296 | }; |
10297 | ||
c906108c | 10298 | /* Save all of the information associated with the inferior<==>gdb |
b89667eb | 10299 | connection. */ |
c906108c | 10300 | |
cb524840 TT |
10301 | infcall_control_state_up |
10302 | save_infcall_control_state () | |
c906108c | 10303 | { |
cb524840 | 10304 | infcall_control_state_up inf_status (new struct infcall_control_state); |
4e1c45ea | 10305 | struct thread_info *tp = inferior_thread (); |
d6b48e9c | 10306 | struct inferior *inf = current_inferior (); |
7a292a7a | 10307 | |
16c381f0 JK |
10308 | inf_status->thread_control = tp->control; |
10309 | inf_status->inferior_control = inf->control; | |
d82142e2 | 10310 | |
03acd4d8 CL |
10311 | tp->control.step_resume_breakpoint = nullptr; |
10312 | tp->control.exception_resume_breakpoint = nullptr; | |
8358c15c | 10313 | |
16c381f0 JK |
10314 | /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of |
10315 | chain. If caller's caller is walking the chain, they'll be happier if we | |
10316 | hand them back the original chain when restore_infcall_control_state is | |
10317 | called. */ | |
10318 | tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat); | |
d82142e2 JK |
10319 | |
10320 | /* Other fields: */ | |
10321 | inf_status->stop_stack_dummy = stop_stack_dummy; | |
10322 | inf_status->stopped_by_random_signal = stopped_by_random_signal; | |
c5aa993b | 10323 | |
79952e69 PA |
10324 | save_selected_frame (&inf_status->selected_frame_id, |
10325 | &inf_status->selected_frame_level); | |
b89667eb | 10326 | |
7a292a7a | 10327 | return inf_status; |
c906108c SS |
10328 | } |
10329 | ||
b89667eb DE |
10330 | /* Restore inferior session state to INF_STATUS. */ |
10331 | ||
c906108c | 10332 | void |
16c381f0 | 10333 | restore_infcall_control_state (struct infcall_control_state *inf_status) |
c906108c | 10334 | { |
4e1c45ea | 10335 | struct thread_info *tp = inferior_thread (); |
d6b48e9c | 10336 | struct inferior *inf = current_inferior (); |
4e1c45ea | 10337 | |
8358c15c JK |
10338 | if (tp->control.step_resume_breakpoint) |
10339 | tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop; | |
10340 | ||
5b79abe7 TT |
10341 | if (tp->control.exception_resume_breakpoint) |
10342 | tp->control.exception_resume_breakpoint->disposition | |
10343 | = disp_del_at_next_stop; | |
10344 | ||
d82142e2 | 10345 | /* Handle the bpstat_copy of the chain. */ |
16c381f0 | 10346 | bpstat_clear (&tp->control.stop_bpstat); |
d82142e2 | 10347 | |
16c381f0 JK |
10348 | tp->control = inf_status->thread_control; |
10349 | inf->control = inf_status->inferior_control; | |
d82142e2 JK |
10350 | |
10351 | /* Other fields: */ | |
10352 | stop_stack_dummy = inf_status->stop_stack_dummy; | |
10353 | stopped_by_random_signal = inf_status->stopped_by_random_signal; | |
c906108c | 10354 | |
841de120 | 10355 | if (target_has_stack ()) |
c906108c | 10356 | { |
79952e69 PA |
10357 | restore_selected_frame (inf_status->selected_frame_id, |
10358 | inf_status->selected_frame_level); | |
c906108c | 10359 | } |
c906108c | 10360 | |
ee841dd8 | 10361 | delete inf_status; |
7a292a7a | 10362 | } |
c906108c SS |
10363 | |
10364 | void | |
16c381f0 | 10365 | discard_infcall_control_state (struct infcall_control_state *inf_status) |
7a292a7a | 10366 | { |
8358c15c JK |
10367 | if (inf_status->thread_control.step_resume_breakpoint) |
10368 | inf_status->thread_control.step_resume_breakpoint->disposition | |
10369 | = disp_del_at_next_stop; | |
10370 | ||
5b79abe7 TT |
10371 | if (inf_status->thread_control.exception_resume_breakpoint) |
10372 | inf_status->thread_control.exception_resume_breakpoint->disposition | |
10373 | = disp_del_at_next_stop; | |
10374 | ||
1777feb0 | 10375 | /* See save_infcall_control_state for info on stop_bpstat. */ |
16c381f0 | 10376 | bpstat_clear (&inf_status->thread_control.stop_bpstat); |
8358c15c | 10377 | |
ee841dd8 | 10378 | delete inf_status; |
7a292a7a | 10379 | } |
b89667eb | 10380 | \f |
7f89fd65 | 10381 | /* See infrun.h. */ |
0c557179 SDJ |
10382 | |
10383 | void | |
10384 | clear_exit_convenience_vars (void) | |
10385 | { | |
10386 | clear_internalvar (lookup_internalvar ("_exitsignal")); | |
10387 | clear_internalvar (lookup_internalvar ("_exitcode")); | |
10388 | } | |
c5aa993b | 10389 | \f |
488f131b | 10390 | |
b2175913 MS |
10391 | /* User interface for reverse debugging: |
10392 | Set exec-direction / show exec-direction commands | |
10393 | (returns error unless target implements to_set_exec_direction method). */ | |
10394 | ||
170742de | 10395 | enum exec_direction_kind execution_direction = EXEC_FORWARD; |
b2175913 MS |
10396 | static const char exec_forward[] = "forward"; |
10397 | static const char exec_reverse[] = "reverse"; | |
10398 | static const char *exec_direction = exec_forward; | |
40478521 | 10399 | static const char *const exec_direction_names[] = { |
b2175913 MS |
10400 | exec_forward, |
10401 | exec_reverse, | |
03acd4d8 | 10402 | nullptr |
b2175913 MS |
10403 | }; |
10404 | ||
10405 | static void | |
eb4c3f4a | 10406 | set_exec_direction_func (const char *args, int from_tty, |
b2175913 MS |
10407 | struct cmd_list_element *cmd) |
10408 | { | |
05374cfd | 10409 | if (target_can_execute_reverse ()) |
b2175913 MS |
10410 | { |
10411 | if (!strcmp (exec_direction, exec_forward)) | |
10412 | execution_direction = EXEC_FORWARD; | |
10413 | else if (!strcmp (exec_direction, exec_reverse)) | |
10414 | execution_direction = EXEC_REVERSE; | |
10415 | } | |
8bbed405 MS |
10416 | else |
10417 | { | |
10418 | exec_direction = exec_forward; | |
10419 | error (_("Target does not support this operation.")); | |
10420 | } | |
b2175913 MS |
10421 | } |
10422 | ||
10423 | static void | |
10424 | show_exec_direction_func (struct ui_file *out, int from_tty, | |
10425 | struct cmd_list_element *cmd, const char *value) | |
10426 | { | |
10427 | switch (execution_direction) { | |
10428 | case EXEC_FORWARD: | |
6cb06a8c | 10429 | gdb_printf (out, _("Forward.\n")); |
b2175913 MS |
10430 | break; |
10431 | case EXEC_REVERSE: | |
6cb06a8c | 10432 | gdb_printf (out, _("Reverse.\n")); |
b2175913 | 10433 | break; |
b2175913 | 10434 | default: |
f34652de | 10435 | internal_error (_("bogus execution_direction value: %d"), |
d8b34453 | 10436 | (int) execution_direction); |
b2175913 MS |
10437 | } |
10438 | } | |
10439 | ||
d4db2f36 PA |
10440 | static void |
10441 | show_schedule_multiple (struct ui_file *file, int from_tty, | |
10442 | struct cmd_list_element *c, const char *value) | |
10443 | { | |
6cb06a8c TT |
10444 | gdb_printf (file, _("Resuming the execution of threads " |
10445 | "of all processes is %s.\n"), value); | |
d4db2f36 | 10446 | } |
ad52ddc6 | 10447 | |
22d2b532 SDJ |
10448 | /* Implementation of `siginfo' variable. */ |
10449 | ||
10450 | static const struct internalvar_funcs siginfo_funcs = | |
10451 | { | |
10452 | siginfo_make_value, | |
03acd4d8 | 10453 | nullptr, |
22d2b532 SDJ |
10454 | }; |
10455 | ||
372316f1 PA |
10456 | /* Callback for infrun's target events source. This is marked when a |
10457 | thread has a pending status to process. */ | |
10458 | ||
10459 | static void | |
10460 | infrun_async_inferior_event_handler (gdb_client_data data) | |
10461 | { | |
6b36ddeb | 10462 | clear_async_event_handler (infrun_async_inferior_event_token); |
b1a35af2 | 10463 | inferior_event_handler (INF_REG_EVENT); |
372316f1 PA |
10464 | } |
10465 | ||
8087c3fa | 10466 | #if GDB_SELF_TEST |
b161a60d SM |
10467 | namespace selftests |
10468 | { | |
10469 | ||
10470 | /* Verify that when two threads with the same ptid exist (from two different | |
10471 | targets) and one of them changes ptid, we only update inferior_ptid if | |
10472 | it is appropriate. */ | |
10473 | ||
10474 | static void | |
10475 | infrun_thread_ptid_changed () | |
10476 | { | |
27b1f19f | 10477 | gdbarch *arch = current_inferior ()->arch (); |
b161a60d SM |
10478 | |
10479 | /* The thread which inferior_ptid represents changes ptid. */ | |
10480 | { | |
10481 | scoped_restore_current_pspace_and_thread restore; | |
10482 | ||
10483 | scoped_mock_context<test_target_ops> target1 (arch); | |
10484 | scoped_mock_context<test_target_ops> target2 (arch); | |
b161a60d SM |
10485 | |
10486 | ptid_t old_ptid (111, 222); | |
10487 | ptid_t new_ptid (111, 333); | |
10488 | ||
10489 | target1.mock_inferior.pid = old_ptid.pid (); | |
10490 | target1.mock_thread.ptid = old_ptid; | |
922cc93d SM |
10491 | target1.mock_inferior.ptid_thread_map.clear (); |
10492 | target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread; | |
10493 | ||
b161a60d SM |
10494 | target2.mock_inferior.pid = old_ptid.pid (); |
10495 | target2.mock_thread.ptid = old_ptid; | |
922cc93d SM |
10496 | target2.mock_inferior.ptid_thread_map.clear (); |
10497 | target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread; | |
b161a60d SM |
10498 | |
10499 | auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid); | |
10500 | set_current_inferior (&target1.mock_inferior); | |
10501 | ||
10502 | thread_change_ptid (&target1.mock_target, old_ptid, new_ptid); | |
10503 | ||
10504 | gdb_assert (inferior_ptid == new_ptid); | |
10505 | } | |
10506 | ||
10507 | /* A thread with the same ptid as inferior_ptid, but from another target, | |
10508 | changes ptid. */ | |
10509 | { | |
10510 | scoped_restore_current_pspace_and_thread restore; | |
10511 | ||
10512 | scoped_mock_context<test_target_ops> target1 (arch); | |
10513 | scoped_mock_context<test_target_ops> target2 (arch); | |
b161a60d SM |
10514 | |
10515 | ptid_t old_ptid (111, 222); | |
10516 | ptid_t new_ptid (111, 333); | |
10517 | ||
10518 | target1.mock_inferior.pid = old_ptid.pid (); | |
10519 | target1.mock_thread.ptid = old_ptid; | |
922cc93d SM |
10520 | target1.mock_inferior.ptid_thread_map.clear (); |
10521 | target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread; | |
10522 | ||
b161a60d SM |
10523 | target2.mock_inferior.pid = old_ptid.pid (); |
10524 | target2.mock_thread.ptid = old_ptid; | |
922cc93d SM |
10525 | target2.mock_inferior.ptid_thread_map.clear (); |
10526 | target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread; | |
b161a60d SM |
10527 | |
10528 | auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid); | |
10529 | set_current_inferior (&target2.mock_inferior); | |
10530 | ||
10531 | thread_change_ptid (&target1.mock_target, old_ptid, new_ptid); | |
10532 | ||
10533 | gdb_assert (inferior_ptid == old_ptid); | |
10534 | } | |
10535 | } | |
10536 | ||
10537 | } /* namespace selftests */ | |
10538 | ||
8087c3fa JB |
10539 | #endif /* GDB_SELF_TEST */ |
10540 | ||
5fe70629 | 10541 | INIT_GDB_FILE (infrun) |
c906108c | 10542 | { |
de0bea00 | 10543 | struct cmd_list_element *c; |
c906108c | 10544 | |
372316f1 PA |
10545 | /* Register extra event sources in the event loop. */ |
10546 | infrun_async_inferior_event_token | |
03acd4d8 | 10547 | = create_async_event_handler (infrun_async_inferior_event_handler, nullptr, |
db20ebdf | 10548 | "infrun"); |
372316f1 | 10549 | |
e0f25bd9 SM |
10550 | cmd_list_element *info_signals_cmd |
10551 | = add_info ("signals", info_signals_command, _("\ | |
1bedd215 AC |
10552 | What debugger does when program gets various signals.\n\ |
10553 | Specify a signal as argument to print info on that signal only.")); | |
e0f25bd9 | 10554 | add_info_alias ("handle", info_signals_cmd, 0); |
c906108c | 10555 | |
de0bea00 | 10556 | c = add_com ("handle", class_run, handle_command, _("\ |
dfbd5e7b | 10557 | Specify how to handle signals.\n\ |
486c7739 | 10558 | Usage: handle SIGNAL [ACTIONS]\n\ |
c906108c | 10559 | Args are signals and actions to apply to those signals.\n\ |
dfbd5e7b | 10560 | If no actions are specified, the current settings for the specified signals\n\ |
486c7739 MF |
10561 | will be displayed instead.\n\ |
10562 | \n\ | |
c906108c SS |
10563 | Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\ |
10564 | from 1-15 are allowed for compatibility with old versions of GDB.\n\ | |
10565 | Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\ | |
10566 | The special arg \"all\" is recognized to mean all signals except those\n\ | |
1bedd215 | 10567 | used by the debugger, typically SIGTRAP and SIGINT.\n\ |
486c7739 | 10568 | \n\ |
1bedd215 | 10569 | Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\ |
c906108c SS |
10570 | \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\ |
10571 | Stop means reenter debugger if this signal happens (implies print).\n\ | |
10572 | Print means print a message if this signal happens.\n\ | |
10573 | Pass means let program see this signal; otherwise program doesn't know.\n\ | |
10574 | Ignore is a synonym for nopass and noignore is a synonym for pass.\n\ | |
dfbd5e7b PA |
10575 | Pass and Stop may be combined.\n\ |
10576 | \n\ | |
10577 | Multiple signals may be specified. Signal numbers and signal names\n\ | |
10578 | may be interspersed with actions, with the actions being performed for\n\ | |
10579 | all signals cumulatively specified.")); | |
de0bea00 | 10580 | set_cmd_completer (c, handle_completer); |
486c7739 | 10581 | |
49a82d50 TT |
10582 | stop_command = add_cmd ("stop", class_obscure, |
10583 | not_just_help_class_command, _("\ | |
1a966eab | 10584 | There is no `stop' command, but you can set a hook on `stop'.\n\ |
c906108c | 10585 | This allows you to set a list of commands to be run each time execution\n\ |
1a966eab | 10586 | of the program stops."), &cmdlist); |
c906108c | 10587 | |
94ba44a6 SM |
10588 | add_setshow_boolean_cmd |
10589 | ("infrun", class_maintenance, &debug_infrun, | |
10590 | _("Set inferior debugging."), | |
10591 | _("Show inferior debugging."), | |
10592 | _("When non-zero, inferior specific debugging is enabled."), | |
03acd4d8 | 10593 | nullptr, show_debug_infrun, &setdebuglist, &showdebuglist); |
527159b7 | 10594 | |
ad52ddc6 PA |
10595 | add_setshow_boolean_cmd ("non-stop", no_class, |
10596 | &non_stop_1, _("\ | |
10597 | Set whether gdb controls the inferior in non-stop mode."), _("\ | |
10598 | Show whether gdb controls the inferior in non-stop mode."), _("\ | |
10599 | When debugging a multi-threaded program and this setting is\n\ | |
10600 | off (the default, also called all-stop mode), when one thread stops\n\ | |
10601 | (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\ | |
10602 | all other threads in the program while you interact with the thread of\n\ | |
10603 | interest. When you continue or step a thread, you can allow the other\n\ | |
10604 | threads to run, or have them remain stopped, but while you inspect any\n\ | |
10605 | thread's state, all threads stop.\n\ | |
10606 | \n\ | |
10607 | In non-stop mode, when one thread stops, other threads can continue\n\ | |
10608 | to run freely. You'll be able to step each thread independently,\n\ | |
10609 | leave it stopped or free to run as needed."), | |
10610 | set_non_stop, | |
10611 | show_non_stop, | |
10612 | &setlist, | |
10613 | &showlist); | |
10614 | ||
adc6a863 | 10615 | for (size_t i = 0; i < GDB_SIGNAL_LAST; i++) |
c906108c SS |
10616 | { |
10617 | signal_stop[i] = 1; | |
10618 | signal_print[i] = 1; | |
10619 | signal_program[i] = 1; | |
ab04a2af | 10620 | signal_catch[i] = 0; |
c906108c SS |
10621 | } |
10622 | ||
4d9d9d04 PA |
10623 | /* Signals caused by debugger's own actions should not be given to |
10624 | the program afterwards. | |
10625 | ||
10626 | Do not deliver GDB_SIGNAL_TRAP by default, except when the user | |
10627 | explicitly specifies that it should be delivered to the target | |
10628 | program. Typically, that would occur when a user is debugging a | |
10629 | target monitor on a simulator: the target monitor sets a | |
10630 | breakpoint; the simulator encounters this breakpoint and halts | |
10631 | the simulation handing control to GDB; GDB, noting that the stop | |
10632 | address doesn't map to any known breakpoint, returns control back | |
10633 | to the simulator; the simulator then delivers the hardware | |
10634 | equivalent of a GDB_SIGNAL_TRAP to the program being | |
10635 | debugged. */ | |
a493e3e2 PA |
10636 | signal_program[GDB_SIGNAL_TRAP] = 0; |
10637 | signal_program[GDB_SIGNAL_INT] = 0; | |
c906108c SS |
10638 | |
10639 | /* Signals that are not errors should not normally enter the debugger. */ | |
a493e3e2 PA |
10640 | signal_stop[GDB_SIGNAL_ALRM] = 0; |
10641 | signal_print[GDB_SIGNAL_ALRM] = 0; | |
10642 | signal_stop[GDB_SIGNAL_VTALRM] = 0; | |
10643 | signal_print[GDB_SIGNAL_VTALRM] = 0; | |
10644 | signal_stop[GDB_SIGNAL_PROF] = 0; | |
10645 | signal_print[GDB_SIGNAL_PROF] = 0; | |
10646 | signal_stop[GDB_SIGNAL_CHLD] = 0; | |
10647 | signal_print[GDB_SIGNAL_CHLD] = 0; | |
10648 | signal_stop[GDB_SIGNAL_IO] = 0; | |
10649 | signal_print[GDB_SIGNAL_IO] = 0; | |
10650 | signal_stop[GDB_SIGNAL_POLL] = 0; | |
10651 | signal_print[GDB_SIGNAL_POLL] = 0; | |
10652 | signal_stop[GDB_SIGNAL_URG] = 0; | |
10653 | signal_print[GDB_SIGNAL_URG] = 0; | |
10654 | signal_stop[GDB_SIGNAL_WINCH] = 0; | |
10655 | signal_print[GDB_SIGNAL_WINCH] = 0; | |
10656 | signal_stop[GDB_SIGNAL_PRIO] = 0; | |
10657 | signal_print[GDB_SIGNAL_PRIO] = 0; | |
c906108c | 10658 | |
cd0fc7c3 SS |
10659 | /* These signals are used internally by user-level thread |
10660 | implementations. (See signal(5) on Solaris.) Like the above | |
10661 | signals, a healthy program receives and handles them as part of | |
10662 | its normal operation. */ | |
a493e3e2 PA |
10663 | signal_stop[GDB_SIGNAL_LWP] = 0; |
10664 | signal_print[GDB_SIGNAL_LWP] = 0; | |
10665 | signal_stop[GDB_SIGNAL_WAITING] = 0; | |
10666 | signal_print[GDB_SIGNAL_WAITING] = 0; | |
10667 | signal_stop[GDB_SIGNAL_CANCEL] = 0; | |
10668 | signal_print[GDB_SIGNAL_CANCEL] = 0; | |
bc7b765a JB |
10669 | signal_stop[GDB_SIGNAL_LIBRT] = 0; |
10670 | signal_print[GDB_SIGNAL_LIBRT] = 0; | |
cd0fc7c3 | 10671 | |
2455069d UW |
10672 | /* Update cached state. */ |
10673 | signal_cache_update (-1); | |
10674 | ||
85c07804 AC |
10675 | add_setshow_zinteger_cmd ("stop-on-solib-events", class_support, |
10676 | &stop_on_solib_events, _("\ | |
10677 | Set stopping for shared library events."), _("\ | |
10678 | Show stopping for shared library events."), _("\ | |
c906108c SS |
10679 | If nonzero, gdb will give control to the user when the dynamic linker\n\ |
10680 | notifies gdb of shared library events. The most common event of interest\n\ | |
85c07804 | 10681 | to the user would be loading/unloading of a new library."), |
f9e14852 | 10682 | set_stop_on_solib_events, |
920d2a44 | 10683 | show_stop_on_solib_events, |
85c07804 | 10684 | &setlist, &showlist); |
c906108c | 10685 | |
7ab04401 AC |
10686 | add_setshow_enum_cmd ("follow-fork-mode", class_run, |
10687 | follow_fork_mode_kind_names, | |
10688 | &follow_fork_mode_string, _("\ | |
10689 | Set debugger response to a program call of fork or vfork."), _("\ | |
10690 | Show debugger response to a program call of fork or vfork."), _("\ | |
c906108c SS |
10691 | A fork or vfork creates a new process. follow-fork-mode can be:\n\ |
10692 | parent - the original process is debugged after a fork\n\ | |
10693 | child - the new process is debugged after a fork\n\ | |
ea1dd7bc | 10694 | The unfollowed process will continue to run.\n\ |
7ab04401 | 10695 | By default, the debugger will follow the parent process."), |
03acd4d8 | 10696 | nullptr, |
920d2a44 | 10697 | show_follow_fork_mode_string, |
7ab04401 AC |
10698 | &setlist, &showlist); |
10699 | ||
6c95b8df PA |
10700 | add_setshow_enum_cmd ("follow-exec-mode", class_run, |
10701 | follow_exec_mode_names, | |
10702 | &follow_exec_mode_string, _("\ | |
10703 | Set debugger response to a program call of exec."), _("\ | |
10704 | Show debugger response to a program call of exec."), _("\ | |
10705 | An exec call replaces the program image of a process.\n\ | |
10706 | \n\ | |
10707 | follow-exec-mode can be:\n\ | |
10708 | \n\ | |
cce7e648 | 10709 | new - the debugger creates a new inferior and rebinds the process\n\ |
6c95b8df PA |
10710 | to this new inferior. The program the process was running before\n\ |
10711 | the exec call can be restarted afterwards by restarting the original\n\ | |
10712 | inferior.\n\ | |
10713 | \n\ | |
10714 | same - the debugger keeps the process bound to the same inferior.\n\ | |
10715 | The new executable image replaces the previous executable loaded in\n\ | |
10716 | the inferior. Restarting the inferior after the exec call restarts\n\ | |
10717 | the executable the process was running after the exec call.\n\ | |
10718 | \n\ | |
10719 | By default, the debugger will use the same inferior."), | |
03acd4d8 | 10720 | nullptr, |
6c95b8df PA |
10721 | show_follow_exec_mode_string, |
10722 | &setlist, &showlist); | |
10723 | ||
7ab04401 AC |
10724 | add_setshow_enum_cmd ("scheduler-locking", class_run, |
10725 | scheduler_enums, &scheduler_mode, _("\ | |
10726 | Set mode for locking scheduler during execution."), _("\ | |
10727 | Show mode for locking scheduler during execution."), _("\ | |
f2665db5 MM |
10728 | off == no locking (threads may preempt at any time)\n\ |
10729 | on == full locking (no thread except the current thread may run)\n\ | |
dda83cd7 | 10730 | This applies to both normal execution and replay mode.\n\ |
f2665db5 | 10731 | step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\ |
dda83cd7 SM |
10732 | In this mode, other threads may run during other commands.\n\ |
10733 | This applies to both normal execution and replay mode.\n\ | |
f2665db5 | 10734 | replay == scheduler locked in replay mode and unlocked during normal execution."), |
7ab04401 | 10735 | set_schedlock_func, /* traps on target vector */ |
920d2a44 | 10736 | show_scheduler_mode, |
7ab04401 | 10737 | &setlist, &showlist); |
5fbbeb29 | 10738 | |
d4db2f36 PA |
10739 | add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\ |
10740 | Set mode for resuming threads of all processes."), _("\ | |
10741 | Show mode for resuming threads of all processes."), _("\ | |
10742 | When on, execution commands (such as 'continue' or 'next') resume all\n\ | |
10743 | threads of all processes. When off (which is the default), execution\n\ | |
10744 | commands only resume the threads of the current process. The set of\n\ | |
10745 | threads that are resumed is further refined by the scheduler-locking\n\ | |
10746 | mode (see help set scheduler-locking)."), | |
03acd4d8 | 10747 | nullptr, |
d4db2f36 PA |
10748 | show_schedule_multiple, |
10749 | &setlist, &showlist); | |
10750 | ||
5bf193a2 AC |
10751 | add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\ |
10752 | Set mode of the step operation."), _("\ | |
10753 | Show mode of the step operation."), _("\ | |
10754 | When set, doing a step over a function without debug line information\n\ | |
10755 | will stop at the first instruction of that function. Otherwise, the\n\ | |
10756 | function is skipped and the step command stops at a different source line."), | |
03acd4d8 | 10757 | nullptr, |
920d2a44 | 10758 | show_step_stop_if_no_debug, |
5bf193a2 | 10759 | &setlist, &showlist); |
ca6724c1 | 10760 | |
72d0e2c5 YQ |
10761 | add_setshow_auto_boolean_cmd ("displaced-stepping", class_run, |
10762 | &can_use_displaced_stepping, _("\ | |
237fc4c9 PA |
10763 | Set debugger's willingness to use displaced stepping."), _("\ |
10764 | Show debugger's willingness to use displaced stepping."), _("\ | |
fff08868 HZ |
10765 | If on, gdb will use displaced stepping to step over breakpoints if it is\n\ |
10766 | supported by the target architecture. If off, gdb will not use displaced\n\ | |
10767 | stepping to step over breakpoints, even if such is supported by the target\n\ | |
10768 | architecture. If auto (which is the default), gdb will use displaced stepping\n\ | |
10769 | if the target architecture supports it and non-stop mode is active, but will not\n\ | |
10770 | use it in all-stop mode (see help set non-stop)."), | |
03acd4d8 | 10771 | nullptr, |
72d0e2c5 YQ |
10772 | show_can_use_displaced_stepping, |
10773 | &setlist, &showlist); | |
237fc4c9 | 10774 | |
b2175913 MS |
10775 | add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names, |
10776 | &exec_direction, _("Set direction of execution.\n\ | |
10777 | Options are 'forward' or 'reverse'."), | |
10778 | _("Show direction of execution (forward/reverse)."), | |
10779 | _("Tells gdb whether to execute forward or backward."), | |
10780 | set_exec_direction_func, show_exec_direction_func, | |
10781 | &setlist, &showlist); | |
10782 | ||
6c95b8df PA |
10783 | /* Set/show detach-on-fork: user-settable mode. */ |
10784 | ||
10785 | add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\ | |
10786 | Set whether gdb will detach the child of a fork."), _("\ | |
10787 | Show whether gdb will detach the child of a fork."), _("\ | |
10788 | Tells gdb whether to detach the child of a fork."), | |
03acd4d8 | 10789 | nullptr, nullptr, &setlist, &showlist); |
6c95b8df | 10790 | |
03583c20 UW |
10791 | /* Set/show disable address space randomization mode. */ |
10792 | ||
10793 | add_setshow_boolean_cmd ("disable-randomization", class_support, | |
10794 | &disable_randomization, _("\ | |
10795 | Set disabling of debuggee's virtual address space randomization."), _("\ | |
10796 | Show disabling of debuggee's virtual address space randomization."), _("\ | |
10797 | When this mode is on (which is the default), randomization of the virtual\n\ | |
10798 | address space is disabled. Standalone programs run with the randomization\n\ | |
10799 | enabled by default on some platforms."), | |
10800 | &set_disable_randomization, | |
10801 | &show_disable_randomization, | |
10802 | &setlist, &showlist); | |
10803 | ||
ca6724c1 | 10804 | /* ptid initializations */ |
ca6724c1 KB |
10805 | inferior_ptid = null_ptid; |
10806 | target_last_wait_ptid = minus_one_ptid; | |
5231c1fd | 10807 | |
c90e7d63 SM |
10808 | gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed, |
10809 | "infrun"); | |
10810 | gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested, | |
10811 | "infrun"); | |
c90e7d63 SM |
10812 | gdb::observers::inferior_exit.attach (infrun_inferior_exit, "infrun"); |
10813 | gdb::observers::inferior_execd.attach (infrun_inferior_execd, "infrun"); | |
4aa995e1 PA |
10814 | |
10815 | /* Explicitly create without lookup, since that tries to create a | |
10816 | value with a void typed value, and when we get here, gdbarch | |
10817 | isn't initialized yet. At this point, we're quite sure there | |
10818 | isn't another convenience variable of the same name. */ | |
03acd4d8 | 10819 | create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, nullptr); |
d914c394 SS |
10820 | |
10821 | add_setshow_boolean_cmd ("observer", no_class, | |
10822 | &observer_mode_1, _("\ | |
10823 | Set whether gdb controls the inferior in observer mode."), _("\ | |
10824 | Show whether gdb controls the inferior in observer mode."), _("\ | |
10825 | In observer mode, GDB can get data from the inferior, but not\n\ | |
10826 | affect its execution. Registers and memory may not be changed,\n\ | |
10827 | breakpoints may not be set, and the program cannot be interrupted\n\ | |
10828 | or signalled."), | |
10829 | set_observer_mode, | |
10830 | show_observer_mode, | |
10831 | &setlist, | |
10832 | &showlist); | |
b161a60d SM |
10833 | |
10834 | #if GDB_SELF_TEST | |
10835 | selftests::register_test ("infrun_thread_ptid_changed", | |
10836 | selftests::infrun_thread_ptid_changed); | |
10837 | #endif | |
c906108c | 10838 | } |