]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/infrun.c
Put single-step breakpoints on the bp_location chain
[thirdparty/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2014 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "breakpoint.h"
28 #include "gdb_wait.h"
29 #include "gdbcore.h"
30 #include "gdbcmd.h"
31 #include "cli/cli-script.h"
32 #include "target.h"
33 #include "gdbthread.h"
34 #include "annotate.h"
35 #include "symfile.h"
36 #include "top.h"
37 #include <signal.h>
38 #include "inf-loop.h"
39 #include "regcache.h"
40 #include "value.h"
41 #include "observer.h"
42 #include "language.h"
43 #include "solib.h"
44 #include "main.h"
45 #include "dictionary.h"
46 #include "block.h"
47 #include "mi/mi-common.h"
48 #include "event-top.h"
49 #include "record.h"
50 #include "record-full.h"
51 #include "inline-frame.h"
52 #include "jit.h"
53 #include "tracepoint.h"
54 #include "continuations.h"
55 #include "interps.h"
56 #include "skip.h"
57 #include "probe.h"
58 #include "objfiles.h"
59 #include "completer.h"
60 #include "target-descriptions.h"
61 #include "target-dcache.h"
62 #include "terminal.h"
63
64 /* Prototypes for local functions */
65
66 static void signals_info (char *, int);
67
68 static void handle_command (char *, int);
69
70 static void sig_print_info (enum gdb_signal);
71
72 static void sig_print_header (void);
73
74 static void resume_cleanups (void *);
75
76 static int hook_stop_stub (void *);
77
78 static int restore_selected_frame (void *);
79
80 static int follow_fork (void);
81
82 static int follow_fork_inferior (int follow_child, int detach_fork);
83
84 static void follow_inferior_reset_breakpoints (void);
85
86 static void set_schedlock_func (char *args, int from_tty,
87 struct cmd_list_element *c);
88
89 static int currently_stepping (struct thread_info *tp);
90
91 static void xdb_handle_command (char *args, int from_tty);
92
93 void _initialize_infrun (void);
94
95 void nullify_last_target_wait_ptid (void);
96
97 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
98
99 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
100
101 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
102
103 /* When set, stop the 'step' command if we enter a function which has
104 no line number information. The normal behavior is that we step
105 over such function. */
106 int step_stop_if_no_debug = 0;
107 static void
108 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
109 struct cmd_list_element *c, const char *value)
110 {
111 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
112 }
113
114 /* In asynchronous mode, but simulating synchronous execution. */
115
116 int sync_execution = 0;
117
118 /* proceed and normal_stop use this to notify the user when the
119 inferior stopped in a different thread than it had been running
120 in. */
121
122 static ptid_t previous_inferior_ptid;
123
124 /* If set (default for legacy reasons), when following a fork, GDB
125 will detach from one of the fork branches, child or parent.
126 Exactly which branch is detached depends on 'set follow-fork-mode'
127 setting. */
128
129 static int detach_fork = 1;
130
131 int debug_displaced = 0;
132 static void
133 show_debug_displaced (struct ui_file *file, int from_tty,
134 struct cmd_list_element *c, const char *value)
135 {
136 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
137 }
138
139 unsigned int debug_infrun = 0;
140 static void
141 show_debug_infrun (struct ui_file *file, int from_tty,
142 struct cmd_list_element *c, const char *value)
143 {
144 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
145 }
146
147
148 /* Support for disabling address space randomization. */
149
150 int disable_randomization = 1;
151
152 static void
153 show_disable_randomization (struct ui_file *file, int from_tty,
154 struct cmd_list_element *c, const char *value)
155 {
156 if (target_supports_disable_randomization ())
157 fprintf_filtered (file,
158 _("Disabling randomization of debuggee's "
159 "virtual address space is %s.\n"),
160 value);
161 else
162 fputs_filtered (_("Disabling randomization of debuggee's "
163 "virtual address space is unsupported on\n"
164 "this platform.\n"), file);
165 }
166
167 static void
168 set_disable_randomization (char *args, int from_tty,
169 struct cmd_list_element *c)
170 {
171 if (!target_supports_disable_randomization ())
172 error (_("Disabling randomization of debuggee's "
173 "virtual address space is unsupported on\n"
174 "this platform."));
175 }
176
177 /* User interface for non-stop mode. */
178
179 int non_stop = 0;
180 static int non_stop_1 = 0;
181
182 static void
183 set_non_stop (char *args, int from_tty,
184 struct cmd_list_element *c)
185 {
186 if (target_has_execution)
187 {
188 non_stop_1 = non_stop;
189 error (_("Cannot change this setting while the inferior is running."));
190 }
191
192 non_stop = non_stop_1;
193 }
194
195 static void
196 show_non_stop (struct ui_file *file, int from_tty,
197 struct cmd_list_element *c, const char *value)
198 {
199 fprintf_filtered (file,
200 _("Controlling the inferior in non-stop mode is %s.\n"),
201 value);
202 }
203
204 /* "Observer mode" is somewhat like a more extreme version of
205 non-stop, in which all GDB operations that might affect the
206 target's execution have been disabled. */
207
208 int observer_mode = 0;
209 static int observer_mode_1 = 0;
210
211 static void
212 set_observer_mode (char *args, int from_tty,
213 struct cmd_list_element *c)
214 {
215 if (target_has_execution)
216 {
217 observer_mode_1 = observer_mode;
218 error (_("Cannot change this setting while the inferior is running."));
219 }
220
221 observer_mode = observer_mode_1;
222
223 may_write_registers = !observer_mode;
224 may_write_memory = !observer_mode;
225 may_insert_breakpoints = !observer_mode;
226 may_insert_tracepoints = !observer_mode;
227 /* We can insert fast tracepoints in or out of observer mode,
228 but enable them if we're going into this mode. */
229 if (observer_mode)
230 may_insert_fast_tracepoints = 1;
231 may_stop = !observer_mode;
232 update_target_permissions ();
233
234 /* Going *into* observer mode we must force non-stop, then
235 going out we leave it that way. */
236 if (observer_mode)
237 {
238 pagination_enabled = 0;
239 non_stop = non_stop_1 = 1;
240 }
241
242 if (from_tty)
243 printf_filtered (_("Observer mode is now %s.\n"),
244 (observer_mode ? "on" : "off"));
245 }
246
247 static void
248 show_observer_mode (struct ui_file *file, int from_tty,
249 struct cmd_list_element *c, const char *value)
250 {
251 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
252 }
253
254 /* This updates the value of observer mode based on changes in
255 permissions. Note that we are deliberately ignoring the values of
256 may-write-registers and may-write-memory, since the user may have
257 reason to enable these during a session, for instance to turn on a
258 debugging-related global. */
259
260 void
261 update_observer_mode (void)
262 {
263 int newval;
264
265 newval = (!may_insert_breakpoints
266 && !may_insert_tracepoints
267 && may_insert_fast_tracepoints
268 && !may_stop
269 && non_stop);
270
271 /* Let the user know if things change. */
272 if (newval != observer_mode)
273 printf_filtered (_("Observer mode is now %s.\n"),
274 (newval ? "on" : "off"));
275
276 observer_mode = observer_mode_1 = newval;
277 }
278
279 /* Tables of how to react to signals; the user sets them. */
280
281 static unsigned char *signal_stop;
282 static unsigned char *signal_print;
283 static unsigned char *signal_program;
284
285 /* Table of signals that are registered with "catch signal". A
286 non-zero entry indicates that the signal is caught by some "catch
287 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
288 signals. */
289 static unsigned char *signal_catch;
290
291 /* Table of signals that the target may silently handle.
292 This is automatically determined from the flags above,
293 and simply cached here. */
294 static unsigned char *signal_pass;
295
296 #define SET_SIGS(nsigs,sigs,flags) \
297 do { \
298 int signum = (nsigs); \
299 while (signum-- > 0) \
300 if ((sigs)[signum]) \
301 (flags)[signum] = 1; \
302 } while (0)
303
304 #define UNSET_SIGS(nsigs,sigs,flags) \
305 do { \
306 int signum = (nsigs); \
307 while (signum-- > 0) \
308 if ((sigs)[signum]) \
309 (flags)[signum] = 0; \
310 } while (0)
311
312 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
313 this function is to avoid exporting `signal_program'. */
314
315 void
316 update_signals_program_target (void)
317 {
318 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
319 }
320
321 /* Value to pass to target_resume() to cause all threads to resume. */
322
323 #define RESUME_ALL minus_one_ptid
324
325 /* Command list pointer for the "stop" placeholder. */
326
327 static struct cmd_list_element *stop_command;
328
329 /* Function inferior was in as of last step command. */
330
331 static struct symbol *step_start_function;
332
333 /* Nonzero if we want to give control to the user when we're notified
334 of shared library events by the dynamic linker. */
335 int stop_on_solib_events;
336
337 /* Enable or disable optional shared library event breakpoints
338 as appropriate when the above flag is changed. */
339
340 static void
341 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
342 {
343 update_solib_breakpoints ();
344 }
345
346 static void
347 show_stop_on_solib_events (struct ui_file *file, int from_tty,
348 struct cmd_list_element *c, const char *value)
349 {
350 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
351 value);
352 }
353
354 /* Nonzero means expecting a trace trap
355 and should stop the inferior and return silently when it happens. */
356
357 int stop_after_trap;
358
359 /* Save register contents here when executing a "finish" command or are
360 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
361 Thus this contains the return value from the called function (assuming
362 values are returned in a register). */
363
364 struct regcache *stop_registers;
365
366 /* Nonzero after stop if current stack frame should be printed. */
367
368 static int stop_print_frame;
369
370 /* This is a cached copy of the pid/waitstatus of the last event
371 returned by target_wait()/deprecated_target_wait_hook(). This
372 information is returned by get_last_target_status(). */
373 static ptid_t target_last_wait_ptid;
374 static struct target_waitstatus target_last_waitstatus;
375
376 static void context_switch (ptid_t ptid);
377
378 void init_thread_stepping_state (struct thread_info *tss);
379
380 static const char follow_fork_mode_child[] = "child";
381 static const char follow_fork_mode_parent[] = "parent";
382
383 static const char *const follow_fork_mode_kind_names[] = {
384 follow_fork_mode_child,
385 follow_fork_mode_parent,
386 NULL
387 };
388
389 static const char *follow_fork_mode_string = follow_fork_mode_parent;
390 static void
391 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
392 struct cmd_list_element *c, const char *value)
393 {
394 fprintf_filtered (file,
395 _("Debugger response to a program "
396 "call of fork or vfork is \"%s\".\n"),
397 value);
398 }
399 \f
400
401 /* Handle changes to the inferior list based on the type of fork,
402 which process is being followed, and whether the other process
403 should be detached. On entry inferior_ptid must be the ptid of
404 the fork parent. At return inferior_ptid is the ptid of the
405 followed inferior. */
406
407 static int
408 follow_fork_inferior (int follow_child, int detach_fork)
409 {
410 int has_vforked;
411 int parent_pid, child_pid;
412
413 has_vforked = (inferior_thread ()->pending_follow.kind
414 == TARGET_WAITKIND_VFORKED);
415 parent_pid = ptid_get_lwp (inferior_ptid);
416 if (parent_pid == 0)
417 parent_pid = ptid_get_pid (inferior_ptid);
418 child_pid
419 = ptid_get_pid (inferior_thread ()->pending_follow.value.related_pid);
420
421 if (has_vforked
422 && !non_stop /* Non-stop always resumes both branches. */
423 && (!target_is_async_p () || sync_execution)
424 && !(follow_child || detach_fork || sched_multi))
425 {
426 /* The parent stays blocked inside the vfork syscall until the
427 child execs or exits. If we don't let the child run, then
428 the parent stays blocked. If we're telling the parent to run
429 in the foreground, the user will not be able to ctrl-c to get
430 back the terminal, effectively hanging the debug session. */
431 fprintf_filtered (gdb_stderr, _("\
432 Can not resume the parent process over vfork in the foreground while\n\
433 holding the child stopped. Try \"set detach-on-fork\" or \
434 \"set schedule-multiple\".\n"));
435 /* FIXME output string > 80 columns. */
436 return 1;
437 }
438
439 if (!follow_child)
440 {
441 /* Detach new forked process? */
442 if (detach_fork)
443 {
444 struct cleanup *old_chain;
445
446 /* Before detaching from the child, remove all breakpoints
447 from it. If we forked, then this has already been taken
448 care of by infrun.c. If we vforked however, any
449 breakpoint inserted in the parent is visible in the
450 child, even those added while stopped in a vfork
451 catchpoint. This will remove the breakpoints from the
452 parent also, but they'll be reinserted below. */
453 if (has_vforked)
454 {
455 /* Keep breakpoints list in sync. */
456 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
457 }
458
459 if (info_verbose || debug_infrun)
460 {
461 target_terminal_ours ();
462 fprintf_filtered (gdb_stdlog,
463 "Detaching after fork from "
464 "child process %d.\n",
465 child_pid);
466 }
467 }
468 else
469 {
470 struct inferior *parent_inf, *child_inf;
471 struct cleanup *old_chain;
472
473 /* Add process to GDB's tables. */
474 child_inf = add_inferior (child_pid);
475
476 parent_inf = current_inferior ();
477 child_inf->attach_flag = parent_inf->attach_flag;
478 copy_terminal_info (child_inf, parent_inf);
479 child_inf->gdbarch = parent_inf->gdbarch;
480 copy_inferior_target_desc_info (child_inf, parent_inf);
481
482 old_chain = save_inferior_ptid ();
483 save_current_program_space ();
484
485 inferior_ptid = ptid_build (child_pid, child_pid, 0);
486 add_thread (inferior_ptid);
487 child_inf->symfile_flags = SYMFILE_NO_READ;
488
489 /* If this is a vfork child, then the address-space is
490 shared with the parent. */
491 if (has_vforked)
492 {
493 child_inf->pspace = parent_inf->pspace;
494 child_inf->aspace = parent_inf->aspace;
495
496 /* The parent will be frozen until the child is done
497 with the shared region. Keep track of the
498 parent. */
499 child_inf->vfork_parent = parent_inf;
500 child_inf->pending_detach = 0;
501 parent_inf->vfork_child = child_inf;
502 parent_inf->pending_detach = 0;
503 }
504 else
505 {
506 child_inf->aspace = new_address_space ();
507 child_inf->pspace = add_program_space (child_inf->aspace);
508 child_inf->removable = 1;
509 set_current_program_space (child_inf->pspace);
510 clone_program_space (child_inf->pspace, parent_inf->pspace);
511
512 /* Let the shared library layer (e.g., solib-svr4) learn
513 about this new process, relocate the cloned exec, pull
514 in shared libraries, and install the solib event
515 breakpoint. If a "cloned-VM" event was propagated
516 better throughout the core, this wouldn't be
517 required. */
518 solib_create_inferior_hook (0);
519 }
520
521 do_cleanups (old_chain);
522 }
523
524 if (has_vforked)
525 {
526 struct inferior *parent_inf;
527
528 parent_inf = current_inferior ();
529
530 /* If we detached from the child, then we have to be careful
531 to not insert breakpoints in the parent until the child
532 is done with the shared memory region. However, if we're
533 staying attached to the child, then we can and should
534 insert breakpoints, so that we can debug it. A
535 subsequent child exec or exit is enough to know when does
536 the child stops using the parent's address space. */
537 parent_inf->waiting_for_vfork_done = detach_fork;
538 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
539 }
540 }
541 else
542 {
543 /* Follow the child. */
544 struct inferior *parent_inf, *child_inf;
545 struct program_space *parent_pspace;
546
547 if (info_verbose || debug_infrun)
548 {
549 target_terminal_ours ();
550 if (has_vforked)
551 fprintf_filtered (gdb_stdlog,
552 _("Attaching after process %d "
553 "vfork to child process %d.\n"),
554 parent_pid, child_pid);
555 else
556 fprintf_filtered (gdb_stdlog,
557 _("Attaching after process %d "
558 "fork to child process %d.\n"),
559 parent_pid, child_pid);
560 }
561
562 /* Add the new inferior first, so that the target_detach below
563 doesn't unpush the target. */
564
565 child_inf = add_inferior (child_pid);
566
567 parent_inf = current_inferior ();
568 child_inf->attach_flag = parent_inf->attach_flag;
569 copy_terminal_info (child_inf, parent_inf);
570 child_inf->gdbarch = parent_inf->gdbarch;
571 copy_inferior_target_desc_info (child_inf, parent_inf);
572
573 parent_pspace = parent_inf->pspace;
574
575 /* If we're vforking, we want to hold on to the parent until the
576 child exits or execs. At child exec or exit time we can
577 remove the old breakpoints from the parent and detach or
578 resume debugging it. Otherwise, detach the parent now; we'll
579 want to reuse it's program/address spaces, but we can't set
580 them to the child before removing breakpoints from the
581 parent, otherwise, the breakpoints module could decide to
582 remove breakpoints from the wrong process (since they'd be
583 assigned to the same address space). */
584
585 if (has_vforked)
586 {
587 gdb_assert (child_inf->vfork_parent == NULL);
588 gdb_assert (parent_inf->vfork_child == NULL);
589 child_inf->vfork_parent = parent_inf;
590 child_inf->pending_detach = 0;
591 parent_inf->vfork_child = child_inf;
592 parent_inf->pending_detach = detach_fork;
593 parent_inf->waiting_for_vfork_done = 0;
594 }
595 else if (detach_fork)
596 target_detach (NULL, 0);
597
598 /* Note that the detach above makes PARENT_INF dangling. */
599
600 /* Add the child thread to the appropriate lists, and switch to
601 this new thread, before cloning the program space, and
602 informing the solib layer about this new process. */
603
604 inferior_ptid = ptid_build (child_pid, child_pid, 0);
605 add_thread (inferior_ptid);
606
607 /* If this is a vfork child, then the address-space is shared
608 with the parent. If we detached from the parent, then we can
609 reuse the parent's program/address spaces. */
610 if (has_vforked || detach_fork)
611 {
612 child_inf->pspace = parent_pspace;
613 child_inf->aspace = child_inf->pspace->aspace;
614 }
615 else
616 {
617 child_inf->aspace = new_address_space ();
618 child_inf->pspace = add_program_space (child_inf->aspace);
619 child_inf->removable = 1;
620 child_inf->symfile_flags = SYMFILE_NO_READ;
621 set_current_program_space (child_inf->pspace);
622 clone_program_space (child_inf->pspace, parent_pspace);
623
624 /* Let the shared library layer (e.g., solib-svr4) learn
625 about this new process, relocate the cloned exec, pull in
626 shared libraries, and install the solib event breakpoint.
627 If a "cloned-VM" event was propagated better throughout
628 the core, this wouldn't be required. */
629 solib_create_inferior_hook (0);
630 }
631 }
632
633 return target_follow_fork (follow_child, detach_fork);
634 }
635
636 /* Tell the target to follow the fork we're stopped at. Returns true
637 if the inferior should be resumed; false, if the target for some
638 reason decided it's best not to resume. */
639
640 static int
641 follow_fork (void)
642 {
643 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
644 int should_resume = 1;
645 struct thread_info *tp;
646
647 /* Copy user stepping state to the new inferior thread. FIXME: the
648 followed fork child thread should have a copy of most of the
649 parent thread structure's run control related fields, not just these.
650 Initialized to avoid "may be used uninitialized" warnings from gcc. */
651 struct breakpoint *step_resume_breakpoint = NULL;
652 struct breakpoint *exception_resume_breakpoint = NULL;
653 CORE_ADDR step_range_start = 0;
654 CORE_ADDR step_range_end = 0;
655 struct frame_id step_frame_id = { 0 };
656 struct interp *command_interp = NULL;
657
658 if (!non_stop)
659 {
660 ptid_t wait_ptid;
661 struct target_waitstatus wait_status;
662
663 /* Get the last target status returned by target_wait(). */
664 get_last_target_status (&wait_ptid, &wait_status);
665
666 /* If not stopped at a fork event, then there's nothing else to
667 do. */
668 if (wait_status.kind != TARGET_WAITKIND_FORKED
669 && wait_status.kind != TARGET_WAITKIND_VFORKED)
670 return 1;
671
672 /* Check if we switched over from WAIT_PTID, since the event was
673 reported. */
674 if (!ptid_equal (wait_ptid, minus_one_ptid)
675 && !ptid_equal (inferior_ptid, wait_ptid))
676 {
677 /* We did. Switch back to WAIT_PTID thread, to tell the
678 target to follow it (in either direction). We'll
679 afterwards refuse to resume, and inform the user what
680 happened. */
681 switch_to_thread (wait_ptid);
682 should_resume = 0;
683 }
684 }
685
686 tp = inferior_thread ();
687
688 /* If there were any forks/vforks that were caught and are now to be
689 followed, then do so now. */
690 switch (tp->pending_follow.kind)
691 {
692 case TARGET_WAITKIND_FORKED:
693 case TARGET_WAITKIND_VFORKED:
694 {
695 ptid_t parent, child;
696
697 /* If the user did a next/step, etc, over a fork call,
698 preserve the stepping state in the fork child. */
699 if (follow_child && should_resume)
700 {
701 step_resume_breakpoint = clone_momentary_breakpoint
702 (tp->control.step_resume_breakpoint);
703 step_range_start = tp->control.step_range_start;
704 step_range_end = tp->control.step_range_end;
705 step_frame_id = tp->control.step_frame_id;
706 exception_resume_breakpoint
707 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
708 command_interp = tp->control.command_interp;
709
710 /* For now, delete the parent's sr breakpoint, otherwise,
711 parent/child sr breakpoints are considered duplicates,
712 and the child version will not be installed. Remove
713 this when the breakpoints module becomes aware of
714 inferiors and address spaces. */
715 delete_step_resume_breakpoint (tp);
716 tp->control.step_range_start = 0;
717 tp->control.step_range_end = 0;
718 tp->control.step_frame_id = null_frame_id;
719 delete_exception_resume_breakpoint (tp);
720 tp->control.command_interp = NULL;
721 }
722
723 parent = inferior_ptid;
724 child = tp->pending_follow.value.related_pid;
725
726 /* Set up inferior(s) as specified by the caller, and tell the
727 target to do whatever is necessary to follow either parent
728 or child. */
729 if (follow_fork_inferior (follow_child, detach_fork))
730 {
731 /* Target refused to follow, or there's some other reason
732 we shouldn't resume. */
733 should_resume = 0;
734 }
735 else
736 {
737 /* This pending follow fork event is now handled, one way
738 or another. The previous selected thread may be gone
739 from the lists by now, but if it is still around, need
740 to clear the pending follow request. */
741 tp = find_thread_ptid (parent);
742 if (tp)
743 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
744
745 /* This makes sure we don't try to apply the "Switched
746 over from WAIT_PID" logic above. */
747 nullify_last_target_wait_ptid ();
748
749 /* If we followed the child, switch to it... */
750 if (follow_child)
751 {
752 switch_to_thread (child);
753
754 /* ... and preserve the stepping state, in case the
755 user was stepping over the fork call. */
756 if (should_resume)
757 {
758 tp = inferior_thread ();
759 tp->control.step_resume_breakpoint
760 = step_resume_breakpoint;
761 tp->control.step_range_start = step_range_start;
762 tp->control.step_range_end = step_range_end;
763 tp->control.step_frame_id = step_frame_id;
764 tp->control.exception_resume_breakpoint
765 = exception_resume_breakpoint;
766 tp->control.command_interp = command_interp;
767 }
768 else
769 {
770 /* If we get here, it was because we're trying to
771 resume from a fork catchpoint, but, the user
772 has switched threads away from the thread that
773 forked. In that case, the resume command
774 issued is most likely not applicable to the
775 child, so just warn, and refuse to resume. */
776 warning (_("Not resuming: switched threads "
777 "before following fork child.\n"));
778 }
779
780 /* Reset breakpoints in the child as appropriate. */
781 follow_inferior_reset_breakpoints ();
782 }
783 else
784 switch_to_thread (parent);
785 }
786 }
787 break;
788 case TARGET_WAITKIND_SPURIOUS:
789 /* Nothing to follow. */
790 break;
791 default:
792 internal_error (__FILE__, __LINE__,
793 "Unexpected pending_follow.kind %d\n",
794 tp->pending_follow.kind);
795 break;
796 }
797
798 return should_resume;
799 }
800
801 static void
802 follow_inferior_reset_breakpoints (void)
803 {
804 struct thread_info *tp = inferior_thread ();
805
806 /* Was there a step_resume breakpoint? (There was if the user
807 did a "next" at the fork() call.) If so, explicitly reset its
808 thread number. Cloned step_resume breakpoints are disabled on
809 creation, so enable it here now that it is associated with the
810 correct thread.
811
812 step_resumes are a form of bp that are made to be per-thread.
813 Since we created the step_resume bp when the parent process
814 was being debugged, and now are switching to the child process,
815 from the breakpoint package's viewpoint, that's a switch of
816 "threads". We must update the bp's notion of which thread
817 it is for, or it'll be ignored when it triggers. */
818
819 if (tp->control.step_resume_breakpoint)
820 {
821 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
822 tp->control.step_resume_breakpoint->loc->enabled = 1;
823 }
824
825 /* Treat exception_resume breakpoints like step_resume breakpoints. */
826 if (tp->control.exception_resume_breakpoint)
827 {
828 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
829 tp->control.exception_resume_breakpoint->loc->enabled = 1;
830 }
831
832 /* Reinsert all breakpoints in the child. The user may have set
833 breakpoints after catching the fork, in which case those
834 were never set in the child, but only in the parent. This makes
835 sure the inserted breakpoints match the breakpoint list. */
836
837 breakpoint_re_set ();
838 insert_breakpoints ();
839 }
840
841 /* The child has exited or execed: resume threads of the parent the
842 user wanted to be executing. */
843
844 static int
845 proceed_after_vfork_done (struct thread_info *thread,
846 void *arg)
847 {
848 int pid = * (int *) arg;
849
850 if (ptid_get_pid (thread->ptid) == pid
851 && is_running (thread->ptid)
852 && !is_executing (thread->ptid)
853 && !thread->stop_requested
854 && thread->suspend.stop_signal == GDB_SIGNAL_0)
855 {
856 if (debug_infrun)
857 fprintf_unfiltered (gdb_stdlog,
858 "infrun: resuming vfork parent thread %s\n",
859 target_pid_to_str (thread->ptid));
860
861 switch_to_thread (thread->ptid);
862 clear_proceed_status (0);
863 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT, 0);
864 }
865
866 return 0;
867 }
868
869 /* Called whenever we notice an exec or exit event, to handle
870 detaching or resuming a vfork parent. */
871
872 static void
873 handle_vfork_child_exec_or_exit (int exec)
874 {
875 struct inferior *inf = current_inferior ();
876
877 if (inf->vfork_parent)
878 {
879 int resume_parent = -1;
880
881 /* This exec or exit marks the end of the shared memory region
882 between the parent and the child. If the user wanted to
883 detach from the parent, now is the time. */
884
885 if (inf->vfork_parent->pending_detach)
886 {
887 struct thread_info *tp;
888 struct cleanup *old_chain;
889 struct program_space *pspace;
890 struct address_space *aspace;
891
892 /* follow-fork child, detach-on-fork on. */
893
894 inf->vfork_parent->pending_detach = 0;
895
896 if (!exec)
897 {
898 /* If we're handling a child exit, then inferior_ptid
899 points at the inferior's pid, not to a thread. */
900 old_chain = save_inferior_ptid ();
901 save_current_program_space ();
902 save_current_inferior ();
903 }
904 else
905 old_chain = save_current_space_and_thread ();
906
907 /* We're letting loose of the parent. */
908 tp = any_live_thread_of_process (inf->vfork_parent->pid);
909 switch_to_thread (tp->ptid);
910
911 /* We're about to detach from the parent, which implicitly
912 removes breakpoints from its address space. There's a
913 catch here: we want to reuse the spaces for the child,
914 but, parent/child are still sharing the pspace at this
915 point, although the exec in reality makes the kernel give
916 the child a fresh set of new pages. The problem here is
917 that the breakpoints module being unaware of this, would
918 likely chose the child process to write to the parent
919 address space. Swapping the child temporarily away from
920 the spaces has the desired effect. Yes, this is "sort
921 of" a hack. */
922
923 pspace = inf->pspace;
924 aspace = inf->aspace;
925 inf->aspace = NULL;
926 inf->pspace = NULL;
927
928 if (debug_infrun || info_verbose)
929 {
930 target_terminal_ours ();
931
932 if (exec)
933 fprintf_filtered (gdb_stdlog,
934 "Detaching vfork parent process "
935 "%d after child exec.\n",
936 inf->vfork_parent->pid);
937 else
938 fprintf_filtered (gdb_stdlog,
939 "Detaching vfork parent process "
940 "%d after child exit.\n",
941 inf->vfork_parent->pid);
942 }
943
944 target_detach (NULL, 0);
945
946 /* Put it back. */
947 inf->pspace = pspace;
948 inf->aspace = aspace;
949
950 do_cleanups (old_chain);
951 }
952 else if (exec)
953 {
954 /* We're staying attached to the parent, so, really give the
955 child a new address space. */
956 inf->pspace = add_program_space (maybe_new_address_space ());
957 inf->aspace = inf->pspace->aspace;
958 inf->removable = 1;
959 set_current_program_space (inf->pspace);
960
961 resume_parent = inf->vfork_parent->pid;
962
963 /* Break the bonds. */
964 inf->vfork_parent->vfork_child = NULL;
965 }
966 else
967 {
968 struct cleanup *old_chain;
969 struct program_space *pspace;
970
971 /* If this is a vfork child exiting, then the pspace and
972 aspaces were shared with the parent. Since we're
973 reporting the process exit, we'll be mourning all that is
974 found in the address space, and switching to null_ptid,
975 preparing to start a new inferior. But, since we don't
976 want to clobber the parent's address/program spaces, we
977 go ahead and create a new one for this exiting
978 inferior. */
979
980 /* Switch to null_ptid, so that clone_program_space doesn't want
981 to read the selected frame of a dead process. */
982 old_chain = save_inferior_ptid ();
983 inferior_ptid = null_ptid;
984
985 /* This inferior is dead, so avoid giving the breakpoints
986 module the option to write through to it (cloning a
987 program space resets breakpoints). */
988 inf->aspace = NULL;
989 inf->pspace = NULL;
990 pspace = add_program_space (maybe_new_address_space ());
991 set_current_program_space (pspace);
992 inf->removable = 1;
993 inf->symfile_flags = SYMFILE_NO_READ;
994 clone_program_space (pspace, inf->vfork_parent->pspace);
995 inf->pspace = pspace;
996 inf->aspace = pspace->aspace;
997
998 /* Put back inferior_ptid. We'll continue mourning this
999 inferior. */
1000 do_cleanups (old_chain);
1001
1002 resume_parent = inf->vfork_parent->pid;
1003 /* Break the bonds. */
1004 inf->vfork_parent->vfork_child = NULL;
1005 }
1006
1007 inf->vfork_parent = NULL;
1008
1009 gdb_assert (current_program_space == inf->pspace);
1010
1011 if (non_stop && resume_parent != -1)
1012 {
1013 /* If the user wanted the parent to be running, let it go
1014 free now. */
1015 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
1016
1017 if (debug_infrun)
1018 fprintf_unfiltered (gdb_stdlog,
1019 "infrun: resuming vfork parent process %d\n",
1020 resume_parent);
1021
1022 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
1023
1024 do_cleanups (old_chain);
1025 }
1026 }
1027 }
1028
1029 /* Enum strings for "set|show follow-exec-mode". */
1030
1031 static const char follow_exec_mode_new[] = "new";
1032 static const char follow_exec_mode_same[] = "same";
1033 static const char *const follow_exec_mode_names[] =
1034 {
1035 follow_exec_mode_new,
1036 follow_exec_mode_same,
1037 NULL,
1038 };
1039
1040 static const char *follow_exec_mode_string = follow_exec_mode_same;
1041 static void
1042 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1043 struct cmd_list_element *c, const char *value)
1044 {
1045 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1046 }
1047
1048 /* EXECD_PATHNAME is assumed to be non-NULL. */
1049
1050 static void
1051 follow_exec (ptid_t pid, char *execd_pathname)
1052 {
1053 struct thread_info *th = inferior_thread ();
1054 struct inferior *inf = current_inferior ();
1055
1056 /* This is an exec event that we actually wish to pay attention to.
1057 Refresh our symbol table to the newly exec'd program, remove any
1058 momentary bp's, etc.
1059
1060 If there are breakpoints, they aren't really inserted now,
1061 since the exec() transformed our inferior into a fresh set
1062 of instructions.
1063
1064 We want to preserve symbolic breakpoints on the list, since
1065 we have hopes that they can be reset after the new a.out's
1066 symbol table is read.
1067
1068 However, any "raw" breakpoints must be removed from the list
1069 (e.g., the solib bp's), since their address is probably invalid
1070 now.
1071
1072 And, we DON'T want to call delete_breakpoints() here, since
1073 that may write the bp's "shadow contents" (the instruction
1074 value that was overwritten witha TRAP instruction). Since
1075 we now have a new a.out, those shadow contents aren't valid. */
1076
1077 mark_breakpoints_out ();
1078
1079 update_breakpoints_after_exec ();
1080
1081 /* If there was one, it's gone now. We cannot truly step-to-next
1082 statement through an exec(). */
1083 th->control.step_resume_breakpoint = NULL;
1084 th->control.exception_resume_breakpoint = NULL;
1085 th->control.step_range_start = 0;
1086 th->control.step_range_end = 0;
1087
1088 /* The target reports the exec event to the main thread, even if
1089 some other thread does the exec, and even if the main thread was
1090 already stopped --- if debugging in non-stop mode, it's possible
1091 the user had the main thread held stopped in the previous image
1092 --- release it now. This is the same behavior as step-over-exec
1093 with scheduler-locking on in all-stop mode. */
1094 th->stop_requested = 0;
1095
1096 /* What is this a.out's name? */
1097 printf_unfiltered (_("%s is executing new program: %s\n"),
1098 target_pid_to_str (inferior_ptid),
1099 execd_pathname);
1100
1101 /* We've followed the inferior through an exec. Therefore, the
1102 inferior has essentially been killed & reborn. */
1103
1104 gdb_flush (gdb_stdout);
1105
1106 breakpoint_init_inferior (inf_execd);
1107
1108 if (gdb_sysroot && *gdb_sysroot)
1109 {
1110 char *name = alloca (strlen (gdb_sysroot)
1111 + strlen (execd_pathname)
1112 + 1);
1113
1114 strcpy (name, gdb_sysroot);
1115 strcat (name, execd_pathname);
1116 execd_pathname = name;
1117 }
1118
1119 /* Reset the shared library package. This ensures that we get a
1120 shlib event when the child reaches "_start", at which point the
1121 dld will have had a chance to initialize the child. */
1122 /* Also, loading a symbol file below may trigger symbol lookups, and
1123 we don't want those to be satisfied by the libraries of the
1124 previous incarnation of this process. */
1125 no_shared_libraries (NULL, 0);
1126
1127 if (follow_exec_mode_string == follow_exec_mode_new)
1128 {
1129 struct program_space *pspace;
1130
1131 /* The user wants to keep the old inferior and program spaces
1132 around. Create a new fresh one, and switch to it. */
1133
1134 inf = add_inferior (current_inferior ()->pid);
1135 pspace = add_program_space (maybe_new_address_space ());
1136 inf->pspace = pspace;
1137 inf->aspace = pspace->aspace;
1138
1139 exit_inferior_num_silent (current_inferior ()->num);
1140
1141 set_current_inferior (inf);
1142 set_current_program_space (pspace);
1143 }
1144 else
1145 {
1146 /* The old description may no longer be fit for the new image.
1147 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1148 old description; we'll read a new one below. No need to do
1149 this on "follow-exec-mode new", as the old inferior stays
1150 around (its description is later cleared/refetched on
1151 restart). */
1152 target_clear_description ();
1153 }
1154
1155 gdb_assert (current_program_space == inf->pspace);
1156
1157 /* That a.out is now the one to use. */
1158 exec_file_attach (execd_pathname, 0);
1159
1160 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
1161 (Position Independent Executable) main symbol file will get applied by
1162 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
1163 the breakpoints with the zero displacement. */
1164
1165 symbol_file_add (execd_pathname,
1166 (inf->symfile_flags
1167 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
1168 NULL, 0);
1169
1170 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
1171 set_initial_language ();
1172
1173 /* If the target can specify a description, read it. Must do this
1174 after flipping to the new executable (because the target supplied
1175 description must be compatible with the executable's
1176 architecture, and the old executable may e.g., be 32-bit, while
1177 the new one 64-bit), and before anything involving memory or
1178 registers. */
1179 target_find_description ();
1180
1181 solib_create_inferior_hook (0);
1182
1183 jit_inferior_created_hook ();
1184
1185 breakpoint_re_set ();
1186
1187 /* Reinsert all breakpoints. (Those which were symbolic have
1188 been reset to the proper address in the new a.out, thanks
1189 to symbol_file_command...). */
1190 insert_breakpoints ();
1191
1192 /* The next resume of this inferior should bring it to the shlib
1193 startup breakpoints. (If the user had also set bp's on
1194 "main" from the old (parent) process, then they'll auto-
1195 matically get reset there in the new process.). */
1196 }
1197
1198 /* Non-zero if we just simulating a single-step. This is needed
1199 because we cannot remove the breakpoints in the inferior process
1200 until after the `wait' in `wait_for_inferior'. */
1201 static int singlestep_breakpoints_inserted_p = 0;
1202
1203 /* The thread we inserted single-step breakpoints for. */
1204 static ptid_t singlestep_ptid;
1205
1206 /* PC when we started this single-step. */
1207 static CORE_ADDR singlestep_pc;
1208
1209 /* Info about an instruction that is being stepped over. */
1210
1211 struct step_over_info
1212 {
1213 /* If we're stepping past a breakpoint, this is the address space
1214 and address of the instruction the breakpoint is set at. We'll
1215 skip inserting all breakpoints here. Valid iff ASPACE is
1216 non-NULL. */
1217 struct address_space *aspace;
1218 CORE_ADDR address;
1219
1220 /* The instruction being stepped over triggers a nonsteppable
1221 watchpoint. If true, we'll skip inserting watchpoints. */
1222 int nonsteppable_watchpoint_p;
1223 };
1224
1225 /* The step-over info of the location that is being stepped over.
1226
1227 Note that with async/breakpoint always-inserted mode, a user might
1228 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1229 being stepped over. As setting a new breakpoint inserts all
1230 breakpoints, we need to make sure the breakpoint being stepped over
1231 isn't inserted then. We do that by only clearing the step-over
1232 info when the step-over is actually finished (or aborted).
1233
1234 Presently GDB can only step over one breakpoint at any given time.
1235 Given threads that can't run code in the same address space as the
1236 breakpoint's can't really miss the breakpoint, GDB could be taught
1237 to step-over at most one breakpoint per address space (so this info
1238 could move to the address space object if/when GDB is extended).
1239 The set of breakpoints being stepped over will normally be much
1240 smaller than the set of all breakpoints, so a flag in the
1241 breakpoint location structure would be wasteful. A separate list
1242 also saves complexity and run-time, as otherwise we'd have to go
1243 through all breakpoint locations clearing their flag whenever we
1244 start a new sequence. Similar considerations weigh against storing
1245 this info in the thread object. Plus, not all step overs actually
1246 have breakpoint locations -- e.g., stepping past a single-step
1247 breakpoint, or stepping to complete a non-continuable
1248 watchpoint. */
1249 static struct step_over_info step_over_info;
1250
1251 /* Record the address of the breakpoint/instruction we're currently
1252 stepping over. */
1253
1254 static void
1255 set_step_over_info (struct address_space *aspace, CORE_ADDR address,
1256 int nonsteppable_watchpoint_p)
1257 {
1258 step_over_info.aspace = aspace;
1259 step_over_info.address = address;
1260 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1261 }
1262
1263 /* Called when we're not longer stepping over a breakpoint / an
1264 instruction, so all breakpoints are free to be (re)inserted. */
1265
1266 static void
1267 clear_step_over_info (void)
1268 {
1269 step_over_info.aspace = NULL;
1270 step_over_info.address = 0;
1271 step_over_info.nonsteppable_watchpoint_p = 0;
1272 }
1273
1274 /* See infrun.h. */
1275
1276 int
1277 stepping_past_instruction_at (struct address_space *aspace,
1278 CORE_ADDR address)
1279 {
1280 return (step_over_info.aspace != NULL
1281 && breakpoint_address_match (aspace, address,
1282 step_over_info.aspace,
1283 step_over_info.address));
1284 }
1285
1286 /* See infrun.h. */
1287
1288 int
1289 stepping_past_nonsteppable_watchpoint (void)
1290 {
1291 return step_over_info.nonsteppable_watchpoint_p;
1292 }
1293
1294 /* Returns true if step-over info is valid. */
1295
1296 static int
1297 step_over_info_valid_p (void)
1298 {
1299 return (step_over_info.aspace != NULL
1300 || stepping_past_nonsteppable_watchpoint ());
1301 }
1302
1303 \f
1304 /* Displaced stepping. */
1305
1306 /* In non-stop debugging mode, we must take special care to manage
1307 breakpoints properly; in particular, the traditional strategy for
1308 stepping a thread past a breakpoint it has hit is unsuitable.
1309 'Displaced stepping' is a tactic for stepping one thread past a
1310 breakpoint it has hit while ensuring that other threads running
1311 concurrently will hit the breakpoint as they should.
1312
1313 The traditional way to step a thread T off a breakpoint in a
1314 multi-threaded program in all-stop mode is as follows:
1315
1316 a0) Initially, all threads are stopped, and breakpoints are not
1317 inserted.
1318 a1) We single-step T, leaving breakpoints uninserted.
1319 a2) We insert breakpoints, and resume all threads.
1320
1321 In non-stop debugging, however, this strategy is unsuitable: we
1322 don't want to have to stop all threads in the system in order to
1323 continue or step T past a breakpoint. Instead, we use displaced
1324 stepping:
1325
1326 n0) Initially, T is stopped, other threads are running, and
1327 breakpoints are inserted.
1328 n1) We copy the instruction "under" the breakpoint to a separate
1329 location, outside the main code stream, making any adjustments
1330 to the instruction, register, and memory state as directed by
1331 T's architecture.
1332 n2) We single-step T over the instruction at its new location.
1333 n3) We adjust the resulting register and memory state as directed
1334 by T's architecture. This includes resetting T's PC to point
1335 back into the main instruction stream.
1336 n4) We resume T.
1337
1338 This approach depends on the following gdbarch methods:
1339
1340 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1341 indicate where to copy the instruction, and how much space must
1342 be reserved there. We use these in step n1.
1343
1344 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1345 address, and makes any necessary adjustments to the instruction,
1346 register contents, and memory. We use this in step n1.
1347
1348 - gdbarch_displaced_step_fixup adjusts registers and memory after
1349 we have successfuly single-stepped the instruction, to yield the
1350 same effect the instruction would have had if we had executed it
1351 at its original address. We use this in step n3.
1352
1353 - gdbarch_displaced_step_free_closure provides cleanup.
1354
1355 The gdbarch_displaced_step_copy_insn and
1356 gdbarch_displaced_step_fixup functions must be written so that
1357 copying an instruction with gdbarch_displaced_step_copy_insn,
1358 single-stepping across the copied instruction, and then applying
1359 gdbarch_displaced_insn_fixup should have the same effects on the
1360 thread's memory and registers as stepping the instruction in place
1361 would have. Exactly which responsibilities fall to the copy and
1362 which fall to the fixup is up to the author of those functions.
1363
1364 See the comments in gdbarch.sh for details.
1365
1366 Note that displaced stepping and software single-step cannot
1367 currently be used in combination, although with some care I think
1368 they could be made to. Software single-step works by placing
1369 breakpoints on all possible subsequent instructions; if the
1370 displaced instruction is a PC-relative jump, those breakpoints
1371 could fall in very strange places --- on pages that aren't
1372 executable, or at addresses that are not proper instruction
1373 boundaries. (We do generally let other threads run while we wait
1374 to hit the software single-step breakpoint, and they might
1375 encounter such a corrupted instruction.) One way to work around
1376 this would be to have gdbarch_displaced_step_copy_insn fully
1377 simulate the effect of PC-relative instructions (and return NULL)
1378 on architectures that use software single-stepping.
1379
1380 In non-stop mode, we can have independent and simultaneous step
1381 requests, so more than one thread may need to simultaneously step
1382 over a breakpoint. The current implementation assumes there is
1383 only one scratch space per process. In this case, we have to
1384 serialize access to the scratch space. If thread A wants to step
1385 over a breakpoint, but we are currently waiting for some other
1386 thread to complete a displaced step, we leave thread A stopped and
1387 place it in the displaced_step_request_queue. Whenever a displaced
1388 step finishes, we pick the next thread in the queue and start a new
1389 displaced step operation on it. See displaced_step_prepare and
1390 displaced_step_fixup for details. */
1391
1392 struct displaced_step_request
1393 {
1394 ptid_t ptid;
1395 struct displaced_step_request *next;
1396 };
1397
1398 /* Per-inferior displaced stepping state. */
1399 struct displaced_step_inferior_state
1400 {
1401 /* Pointer to next in linked list. */
1402 struct displaced_step_inferior_state *next;
1403
1404 /* The process this displaced step state refers to. */
1405 int pid;
1406
1407 /* A queue of pending displaced stepping requests. One entry per
1408 thread that needs to do a displaced step. */
1409 struct displaced_step_request *step_request_queue;
1410
1411 /* If this is not null_ptid, this is the thread carrying out a
1412 displaced single-step in process PID. This thread's state will
1413 require fixing up once it has completed its step. */
1414 ptid_t step_ptid;
1415
1416 /* The architecture the thread had when we stepped it. */
1417 struct gdbarch *step_gdbarch;
1418
1419 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1420 for post-step cleanup. */
1421 struct displaced_step_closure *step_closure;
1422
1423 /* The address of the original instruction, and the copy we
1424 made. */
1425 CORE_ADDR step_original, step_copy;
1426
1427 /* Saved contents of copy area. */
1428 gdb_byte *step_saved_copy;
1429 };
1430
1431 /* The list of states of processes involved in displaced stepping
1432 presently. */
1433 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1434
1435 /* Get the displaced stepping state of process PID. */
1436
1437 static struct displaced_step_inferior_state *
1438 get_displaced_stepping_state (int pid)
1439 {
1440 struct displaced_step_inferior_state *state;
1441
1442 for (state = displaced_step_inferior_states;
1443 state != NULL;
1444 state = state->next)
1445 if (state->pid == pid)
1446 return state;
1447
1448 return NULL;
1449 }
1450
1451 /* Add a new displaced stepping state for process PID to the displaced
1452 stepping state list, or return a pointer to an already existing
1453 entry, if it already exists. Never returns NULL. */
1454
1455 static struct displaced_step_inferior_state *
1456 add_displaced_stepping_state (int pid)
1457 {
1458 struct displaced_step_inferior_state *state;
1459
1460 for (state = displaced_step_inferior_states;
1461 state != NULL;
1462 state = state->next)
1463 if (state->pid == pid)
1464 return state;
1465
1466 state = xcalloc (1, sizeof (*state));
1467 state->pid = pid;
1468 state->next = displaced_step_inferior_states;
1469 displaced_step_inferior_states = state;
1470
1471 return state;
1472 }
1473
1474 /* If inferior is in displaced stepping, and ADDR equals to starting address
1475 of copy area, return corresponding displaced_step_closure. Otherwise,
1476 return NULL. */
1477
1478 struct displaced_step_closure*
1479 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1480 {
1481 struct displaced_step_inferior_state *displaced
1482 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1483
1484 /* If checking the mode of displaced instruction in copy area. */
1485 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1486 && (displaced->step_copy == addr))
1487 return displaced->step_closure;
1488
1489 return NULL;
1490 }
1491
1492 /* Remove the displaced stepping state of process PID. */
1493
1494 static void
1495 remove_displaced_stepping_state (int pid)
1496 {
1497 struct displaced_step_inferior_state *it, **prev_next_p;
1498
1499 gdb_assert (pid != 0);
1500
1501 it = displaced_step_inferior_states;
1502 prev_next_p = &displaced_step_inferior_states;
1503 while (it)
1504 {
1505 if (it->pid == pid)
1506 {
1507 *prev_next_p = it->next;
1508 xfree (it);
1509 return;
1510 }
1511
1512 prev_next_p = &it->next;
1513 it = *prev_next_p;
1514 }
1515 }
1516
1517 static void
1518 infrun_inferior_exit (struct inferior *inf)
1519 {
1520 remove_displaced_stepping_state (inf->pid);
1521 }
1522
1523 /* If ON, and the architecture supports it, GDB will use displaced
1524 stepping to step over breakpoints. If OFF, or if the architecture
1525 doesn't support it, GDB will instead use the traditional
1526 hold-and-step approach. If AUTO (which is the default), GDB will
1527 decide which technique to use to step over breakpoints depending on
1528 which of all-stop or non-stop mode is active --- displaced stepping
1529 in non-stop mode; hold-and-step in all-stop mode. */
1530
1531 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1532
1533 static void
1534 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1535 struct cmd_list_element *c,
1536 const char *value)
1537 {
1538 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1539 fprintf_filtered (file,
1540 _("Debugger's willingness to use displaced stepping "
1541 "to step over breakpoints is %s (currently %s).\n"),
1542 value, non_stop ? "on" : "off");
1543 else
1544 fprintf_filtered (file,
1545 _("Debugger's willingness to use displaced stepping "
1546 "to step over breakpoints is %s.\n"), value);
1547 }
1548
1549 /* Return non-zero if displaced stepping can/should be used to step
1550 over breakpoints. */
1551
1552 static int
1553 use_displaced_stepping (struct gdbarch *gdbarch)
1554 {
1555 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1556 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1557 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1558 && find_record_target () == NULL);
1559 }
1560
1561 /* Clean out any stray displaced stepping state. */
1562 static void
1563 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1564 {
1565 /* Indicate that there is no cleanup pending. */
1566 displaced->step_ptid = null_ptid;
1567
1568 if (displaced->step_closure)
1569 {
1570 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1571 displaced->step_closure);
1572 displaced->step_closure = NULL;
1573 }
1574 }
1575
1576 static void
1577 displaced_step_clear_cleanup (void *arg)
1578 {
1579 struct displaced_step_inferior_state *state = arg;
1580
1581 displaced_step_clear (state);
1582 }
1583
1584 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1585 void
1586 displaced_step_dump_bytes (struct ui_file *file,
1587 const gdb_byte *buf,
1588 size_t len)
1589 {
1590 int i;
1591
1592 for (i = 0; i < len; i++)
1593 fprintf_unfiltered (file, "%02x ", buf[i]);
1594 fputs_unfiltered ("\n", file);
1595 }
1596
1597 /* Prepare to single-step, using displaced stepping.
1598
1599 Note that we cannot use displaced stepping when we have a signal to
1600 deliver. If we have a signal to deliver and an instruction to step
1601 over, then after the step, there will be no indication from the
1602 target whether the thread entered a signal handler or ignored the
1603 signal and stepped over the instruction successfully --- both cases
1604 result in a simple SIGTRAP. In the first case we mustn't do a
1605 fixup, and in the second case we must --- but we can't tell which.
1606 Comments in the code for 'random signals' in handle_inferior_event
1607 explain how we handle this case instead.
1608
1609 Returns 1 if preparing was successful -- this thread is going to be
1610 stepped now; or 0 if displaced stepping this thread got queued. */
1611 static int
1612 displaced_step_prepare (ptid_t ptid)
1613 {
1614 struct cleanup *old_cleanups, *ignore_cleanups;
1615 struct thread_info *tp = find_thread_ptid (ptid);
1616 struct regcache *regcache = get_thread_regcache (ptid);
1617 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1618 CORE_ADDR original, copy;
1619 ULONGEST len;
1620 struct displaced_step_closure *closure;
1621 struct displaced_step_inferior_state *displaced;
1622 int status;
1623
1624 /* We should never reach this function if the architecture does not
1625 support displaced stepping. */
1626 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1627
1628 /* Disable range stepping while executing in the scratch pad. We
1629 want a single-step even if executing the displaced instruction in
1630 the scratch buffer lands within the stepping range (e.g., a
1631 jump/branch). */
1632 tp->control.may_range_step = 0;
1633
1634 /* We have to displaced step one thread at a time, as we only have
1635 access to a single scratch space per inferior. */
1636
1637 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1638
1639 if (!ptid_equal (displaced->step_ptid, null_ptid))
1640 {
1641 /* Already waiting for a displaced step to finish. Defer this
1642 request and place in queue. */
1643 struct displaced_step_request *req, *new_req;
1644
1645 if (debug_displaced)
1646 fprintf_unfiltered (gdb_stdlog,
1647 "displaced: defering step of %s\n",
1648 target_pid_to_str (ptid));
1649
1650 new_req = xmalloc (sizeof (*new_req));
1651 new_req->ptid = ptid;
1652 new_req->next = NULL;
1653
1654 if (displaced->step_request_queue)
1655 {
1656 for (req = displaced->step_request_queue;
1657 req && req->next;
1658 req = req->next)
1659 ;
1660 req->next = new_req;
1661 }
1662 else
1663 displaced->step_request_queue = new_req;
1664
1665 return 0;
1666 }
1667 else
1668 {
1669 if (debug_displaced)
1670 fprintf_unfiltered (gdb_stdlog,
1671 "displaced: stepping %s now\n",
1672 target_pid_to_str (ptid));
1673 }
1674
1675 displaced_step_clear (displaced);
1676
1677 old_cleanups = save_inferior_ptid ();
1678 inferior_ptid = ptid;
1679
1680 original = regcache_read_pc (regcache);
1681
1682 copy = gdbarch_displaced_step_location (gdbarch);
1683 len = gdbarch_max_insn_length (gdbarch);
1684
1685 /* Save the original contents of the copy area. */
1686 displaced->step_saved_copy = xmalloc (len);
1687 ignore_cleanups = make_cleanup (free_current_contents,
1688 &displaced->step_saved_copy);
1689 status = target_read_memory (copy, displaced->step_saved_copy, len);
1690 if (status != 0)
1691 throw_error (MEMORY_ERROR,
1692 _("Error accessing memory address %s (%s) for "
1693 "displaced-stepping scratch space."),
1694 paddress (gdbarch, copy), safe_strerror (status));
1695 if (debug_displaced)
1696 {
1697 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1698 paddress (gdbarch, copy));
1699 displaced_step_dump_bytes (gdb_stdlog,
1700 displaced->step_saved_copy,
1701 len);
1702 };
1703
1704 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1705 original, copy, regcache);
1706
1707 /* We don't support the fully-simulated case at present. */
1708 gdb_assert (closure);
1709
1710 /* Save the information we need to fix things up if the step
1711 succeeds. */
1712 displaced->step_ptid = ptid;
1713 displaced->step_gdbarch = gdbarch;
1714 displaced->step_closure = closure;
1715 displaced->step_original = original;
1716 displaced->step_copy = copy;
1717
1718 make_cleanup (displaced_step_clear_cleanup, displaced);
1719
1720 /* Resume execution at the copy. */
1721 regcache_write_pc (regcache, copy);
1722
1723 discard_cleanups (ignore_cleanups);
1724
1725 do_cleanups (old_cleanups);
1726
1727 if (debug_displaced)
1728 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1729 paddress (gdbarch, copy));
1730
1731 return 1;
1732 }
1733
1734 static void
1735 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1736 const gdb_byte *myaddr, int len)
1737 {
1738 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1739
1740 inferior_ptid = ptid;
1741 write_memory (memaddr, myaddr, len);
1742 do_cleanups (ptid_cleanup);
1743 }
1744
1745 /* Restore the contents of the copy area for thread PTID. */
1746
1747 static void
1748 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1749 ptid_t ptid)
1750 {
1751 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1752
1753 write_memory_ptid (ptid, displaced->step_copy,
1754 displaced->step_saved_copy, len);
1755 if (debug_displaced)
1756 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1757 target_pid_to_str (ptid),
1758 paddress (displaced->step_gdbarch,
1759 displaced->step_copy));
1760 }
1761
1762 static void
1763 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1764 {
1765 struct cleanup *old_cleanups;
1766 struct displaced_step_inferior_state *displaced
1767 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1768
1769 /* Was any thread of this process doing a displaced step? */
1770 if (displaced == NULL)
1771 return;
1772
1773 /* Was this event for the pid we displaced? */
1774 if (ptid_equal (displaced->step_ptid, null_ptid)
1775 || ! ptid_equal (displaced->step_ptid, event_ptid))
1776 return;
1777
1778 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1779
1780 displaced_step_restore (displaced, displaced->step_ptid);
1781
1782 /* Did the instruction complete successfully? */
1783 if (signal == GDB_SIGNAL_TRAP)
1784 {
1785 /* Fix up the resulting state. */
1786 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1787 displaced->step_closure,
1788 displaced->step_original,
1789 displaced->step_copy,
1790 get_thread_regcache (displaced->step_ptid));
1791 }
1792 else
1793 {
1794 /* Since the instruction didn't complete, all we can do is
1795 relocate the PC. */
1796 struct regcache *regcache = get_thread_regcache (event_ptid);
1797 CORE_ADDR pc = regcache_read_pc (regcache);
1798
1799 pc = displaced->step_original + (pc - displaced->step_copy);
1800 regcache_write_pc (regcache, pc);
1801 }
1802
1803 do_cleanups (old_cleanups);
1804
1805 displaced->step_ptid = null_ptid;
1806
1807 /* Are there any pending displaced stepping requests? If so, run
1808 one now. Leave the state object around, since we're likely to
1809 need it again soon. */
1810 while (displaced->step_request_queue)
1811 {
1812 struct displaced_step_request *head;
1813 ptid_t ptid;
1814 struct regcache *regcache;
1815 struct gdbarch *gdbarch;
1816 CORE_ADDR actual_pc;
1817 struct address_space *aspace;
1818
1819 head = displaced->step_request_queue;
1820 ptid = head->ptid;
1821 displaced->step_request_queue = head->next;
1822 xfree (head);
1823
1824 context_switch (ptid);
1825
1826 regcache = get_thread_regcache (ptid);
1827 actual_pc = regcache_read_pc (regcache);
1828 aspace = get_regcache_aspace (regcache);
1829
1830 if (breakpoint_here_p (aspace, actual_pc))
1831 {
1832 if (debug_displaced)
1833 fprintf_unfiltered (gdb_stdlog,
1834 "displaced: stepping queued %s now\n",
1835 target_pid_to_str (ptid));
1836
1837 displaced_step_prepare (ptid);
1838
1839 gdbarch = get_regcache_arch (regcache);
1840
1841 if (debug_displaced)
1842 {
1843 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1844 gdb_byte buf[4];
1845
1846 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1847 paddress (gdbarch, actual_pc));
1848 read_memory (actual_pc, buf, sizeof (buf));
1849 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1850 }
1851
1852 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1853 displaced->step_closure))
1854 target_resume (ptid, 1, GDB_SIGNAL_0);
1855 else
1856 target_resume (ptid, 0, GDB_SIGNAL_0);
1857
1858 /* Done, we're stepping a thread. */
1859 break;
1860 }
1861 else
1862 {
1863 int step;
1864 struct thread_info *tp = inferior_thread ();
1865
1866 /* The breakpoint we were sitting under has since been
1867 removed. */
1868 tp->control.trap_expected = 0;
1869
1870 /* Go back to what we were trying to do. */
1871 step = currently_stepping (tp);
1872
1873 if (debug_displaced)
1874 fprintf_unfiltered (gdb_stdlog,
1875 "displaced: breakpoint is gone: %s, step(%d)\n",
1876 target_pid_to_str (tp->ptid), step);
1877
1878 target_resume (ptid, step, GDB_SIGNAL_0);
1879 tp->suspend.stop_signal = GDB_SIGNAL_0;
1880
1881 /* This request was discarded. See if there's any other
1882 thread waiting for its turn. */
1883 }
1884 }
1885 }
1886
1887 /* Update global variables holding ptids to hold NEW_PTID if they were
1888 holding OLD_PTID. */
1889 static void
1890 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1891 {
1892 struct displaced_step_request *it;
1893 struct displaced_step_inferior_state *displaced;
1894
1895 if (ptid_equal (inferior_ptid, old_ptid))
1896 inferior_ptid = new_ptid;
1897
1898 if (ptid_equal (singlestep_ptid, old_ptid))
1899 singlestep_ptid = new_ptid;
1900
1901 for (displaced = displaced_step_inferior_states;
1902 displaced;
1903 displaced = displaced->next)
1904 {
1905 if (ptid_equal (displaced->step_ptid, old_ptid))
1906 displaced->step_ptid = new_ptid;
1907
1908 for (it = displaced->step_request_queue; it; it = it->next)
1909 if (ptid_equal (it->ptid, old_ptid))
1910 it->ptid = new_ptid;
1911 }
1912 }
1913
1914 \f
1915 /* Resuming. */
1916
1917 /* Things to clean up if we QUIT out of resume (). */
1918 static void
1919 resume_cleanups (void *ignore)
1920 {
1921 if (single_step_breakpoints_inserted ())
1922 remove_single_step_breakpoints ();
1923
1924 normal_stop ();
1925 }
1926
1927 static const char schedlock_off[] = "off";
1928 static const char schedlock_on[] = "on";
1929 static const char schedlock_step[] = "step";
1930 static const char *const scheduler_enums[] = {
1931 schedlock_off,
1932 schedlock_on,
1933 schedlock_step,
1934 NULL
1935 };
1936 static const char *scheduler_mode = schedlock_off;
1937 static void
1938 show_scheduler_mode (struct ui_file *file, int from_tty,
1939 struct cmd_list_element *c, const char *value)
1940 {
1941 fprintf_filtered (file,
1942 _("Mode for locking scheduler "
1943 "during execution is \"%s\".\n"),
1944 value);
1945 }
1946
1947 static void
1948 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1949 {
1950 if (!target_can_lock_scheduler)
1951 {
1952 scheduler_mode = schedlock_off;
1953 error (_("Target '%s' cannot support this command."), target_shortname);
1954 }
1955 }
1956
1957 /* True if execution commands resume all threads of all processes by
1958 default; otherwise, resume only threads of the current inferior
1959 process. */
1960 int sched_multi = 0;
1961
1962 /* Try to setup for software single stepping over the specified location.
1963 Return 1 if target_resume() should use hardware single step.
1964
1965 GDBARCH the current gdbarch.
1966 PC the location to step over. */
1967
1968 static int
1969 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1970 {
1971 int hw_step = 1;
1972
1973 if (execution_direction == EXEC_FORWARD
1974 && gdbarch_software_single_step_p (gdbarch)
1975 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1976 {
1977 hw_step = 0;
1978 /* Do not pull these breakpoints until after a `wait' in
1979 `wait_for_inferior'. */
1980 singlestep_breakpoints_inserted_p = 1;
1981 singlestep_ptid = inferior_ptid;
1982 singlestep_pc = pc;
1983 }
1984 return hw_step;
1985 }
1986
1987 ptid_t
1988 user_visible_resume_ptid (int step)
1989 {
1990 /* By default, resume all threads of all processes. */
1991 ptid_t resume_ptid = RESUME_ALL;
1992
1993 /* Maybe resume only all threads of the current process. */
1994 if (!sched_multi && target_supports_multi_process ())
1995 {
1996 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1997 }
1998
1999 /* Maybe resume a single thread after all. */
2000 if (non_stop)
2001 {
2002 /* With non-stop mode on, threads are always handled
2003 individually. */
2004 resume_ptid = inferior_ptid;
2005 }
2006 else if ((scheduler_mode == schedlock_on)
2007 || (scheduler_mode == schedlock_step && step))
2008 {
2009 /* User-settable 'scheduler' mode requires solo thread resume. */
2010 resume_ptid = inferior_ptid;
2011 }
2012
2013 /* We may actually resume fewer threads at first, e.g., if a thread
2014 is stopped at a breakpoint that needs stepping-off, but that
2015 should not be visible to the user/frontend, and neither should
2016 the frontend/user be allowed to proceed any of the threads that
2017 happen to be stopped for internal run control handling, if a
2018 previous command wanted them resumed. */
2019 return resume_ptid;
2020 }
2021
2022 /* Resume the inferior, but allow a QUIT. This is useful if the user
2023 wants to interrupt some lengthy single-stepping operation
2024 (for child processes, the SIGINT goes to the inferior, and so
2025 we get a SIGINT random_signal, but for remote debugging and perhaps
2026 other targets, that's not true).
2027
2028 STEP nonzero if we should step (zero to continue instead).
2029 SIG is the signal to give the inferior (zero for none). */
2030 void
2031 resume (int step, enum gdb_signal sig)
2032 {
2033 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
2034 struct regcache *regcache = get_current_regcache ();
2035 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2036 struct thread_info *tp = inferior_thread ();
2037 CORE_ADDR pc = regcache_read_pc (regcache);
2038 struct address_space *aspace = get_regcache_aspace (regcache);
2039 ptid_t resume_ptid;
2040 /* From here on, this represents the caller's step vs continue
2041 request, while STEP represents what we'll actually request the
2042 target to do. STEP can decay from a step to a continue, if e.g.,
2043 we need to implement single-stepping with breakpoints (software
2044 single-step). When deciding whether "set scheduler-locking step"
2045 applies, it's the callers intention that counts. */
2046 const int entry_step = step;
2047
2048 QUIT;
2049
2050 if (current_inferior ()->waiting_for_vfork_done)
2051 {
2052 /* Don't try to single-step a vfork parent that is waiting for
2053 the child to get out of the shared memory region (by exec'ing
2054 or exiting). This is particularly important on software
2055 single-step archs, as the child process would trip on the
2056 software single step breakpoint inserted for the parent
2057 process. Since the parent will not actually execute any
2058 instruction until the child is out of the shared region (such
2059 are vfork's semantics), it is safe to simply continue it.
2060 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2061 the parent, and tell it to `keep_going', which automatically
2062 re-sets it stepping. */
2063 if (debug_infrun)
2064 fprintf_unfiltered (gdb_stdlog,
2065 "infrun: resume : clear step\n");
2066 step = 0;
2067 }
2068
2069 if (debug_infrun)
2070 fprintf_unfiltered (gdb_stdlog,
2071 "infrun: resume (step=%d, signal=%s), "
2072 "trap_expected=%d, current thread [%s] at %s\n",
2073 step, gdb_signal_to_symbol_string (sig),
2074 tp->control.trap_expected,
2075 target_pid_to_str (inferior_ptid),
2076 paddress (gdbarch, pc));
2077
2078 /* Normally, by the time we reach `resume', the breakpoints are either
2079 removed or inserted, as appropriate. The exception is if we're sitting
2080 at a permanent breakpoint; we need to step over it, but permanent
2081 breakpoints can't be removed. So we have to test for it here. */
2082 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2083 {
2084 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
2085 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2086 else
2087 error (_("\
2088 The program is stopped at a permanent breakpoint, but GDB does not know\n\
2089 how to step past a permanent breakpoint on this architecture. Try using\n\
2090 a command like `return' or `jump' to continue execution."));
2091 }
2092
2093 /* If we have a breakpoint to step over, make sure to do a single
2094 step only. Same if we have software watchpoints. */
2095 if (tp->control.trap_expected || bpstat_should_step ())
2096 tp->control.may_range_step = 0;
2097
2098 /* If enabled, step over breakpoints by executing a copy of the
2099 instruction at a different address.
2100
2101 We can't use displaced stepping when we have a signal to deliver;
2102 the comments for displaced_step_prepare explain why. The
2103 comments in the handle_inferior event for dealing with 'random
2104 signals' explain what we do instead.
2105
2106 We can't use displaced stepping when we are waiting for vfork_done
2107 event, displaced stepping breaks the vfork child similarly as single
2108 step software breakpoint. */
2109 if (use_displaced_stepping (gdbarch)
2110 && (tp->control.trap_expected
2111 || (step && gdbarch_software_single_step_p (gdbarch)))
2112 && sig == GDB_SIGNAL_0
2113 && !current_inferior ()->waiting_for_vfork_done)
2114 {
2115 struct displaced_step_inferior_state *displaced;
2116
2117 if (!displaced_step_prepare (inferior_ptid))
2118 {
2119 /* Got placed in displaced stepping queue. Will be resumed
2120 later when all the currently queued displaced stepping
2121 requests finish. The thread is not executing at this
2122 point, and the call to set_executing will be made later.
2123 But we need to call set_running here, since from the
2124 user/frontend's point of view, threads were set running.
2125 Unless we're calling an inferior function, as in that
2126 case we pretend the inferior doesn't run at all. */
2127 if (!tp->control.in_infcall)
2128 set_running (user_visible_resume_ptid (entry_step), 1);
2129 discard_cleanups (old_cleanups);
2130 return;
2131 }
2132
2133 /* Update pc to reflect the new address from which we will execute
2134 instructions due to displaced stepping. */
2135 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
2136
2137 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
2138 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
2139 displaced->step_closure);
2140 }
2141
2142 /* Do we need to do it the hard way, w/temp breakpoints? */
2143 else if (step)
2144 step = maybe_software_singlestep (gdbarch, pc);
2145
2146 /* Currently, our software single-step implementation leads to different
2147 results than hardware single-stepping in one situation: when stepping
2148 into delivering a signal which has an associated signal handler,
2149 hardware single-step will stop at the first instruction of the handler,
2150 while software single-step will simply skip execution of the handler.
2151
2152 For now, this difference in behavior is accepted since there is no
2153 easy way to actually implement single-stepping into a signal handler
2154 without kernel support.
2155
2156 However, there is one scenario where this difference leads to follow-on
2157 problems: if we're stepping off a breakpoint by removing all breakpoints
2158 and then single-stepping. In this case, the software single-step
2159 behavior means that even if there is a *breakpoint* in the signal
2160 handler, GDB still would not stop.
2161
2162 Fortunately, we can at least fix this particular issue. We detect
2163 here the case where we are about to deliver a signal while software
2164 single-stepping with breakpoints removed. In this situation, we
2165 revert the decisions to remove all breakpoints and insert single-
2166 step breakpoints, and instead we install a step-resume breakpoint
2167 at the current address, deliver the signal without stepping, and
2168 once we arrive back at the step-resume breakpoint, actually step
2169 over the breakpoint we originally wanted to step over. */
2170 if (singlestep_breakpoints_inserted_p
2171 && sig != GDB_SIGNAL_0
2172 && step_over_info_valid_p ())
2173 {
2174 /* If we have nested signals or a pending signal is delivered
2175 immediately after a handler returns, might might already have
2176 a step-resume breakpoint set on the earlier handler. We cannot
2177 set another step-resume breakpoint; just continue on until the
2178 original breakpoint is hit. */
2179 if (tp->control.step_resume_breakpoint == NULL)
2180 {
2181 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2182 tp->step_after_step_resume_breakpoint = 1;
2183 }
2184
2185 remove_single_step_breakpoints ();
2186 singlestep_breakpoints_inserted_p = 0;
2187
2188 clear_step_over_info ();
2189 tp->control.trap_expected = 0;
2190
2191 insert_breakpoints ();
2192 }
2193
2194 /* If STEP is set, it's a request to use hardware stepping
2195 facilities. But in that case, we should never
2196 use singlestep breakpoint. */
2197 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
2198
2199 /* Decide the set of threads to ask the target to resume. Start
2200 by assuming everything will be resumed, than narrow the set
2201 by applying increasingly restricting conditions. */
2202 resume_ptid = user_visible_resume_ptid (entry_step);
2203
2204 /* Even if RESUME_PTID is a wildcard, and we end up resuming less
2205 (e.g., we might need to step over a breakpoint), from the
2206 user/frontend's point of view, all threads in RESUME_PTID are now
2207 running. Unless we're calling an inferior function, as in that
2208 case pretend we inferior doesn't run at all. */
2209 if (!tp->control.in_infcall)
2210 set_running (resume_ptid, 1);
2211
2212 /* Maybe resume a single thread after all. */
2213 if ((step || singlestep_breakpoints_inserted_p)
2214 && tp->control.trap_expected)
2215 {
2216 /* We're allowing a thread to run past a breakpoint it has
2217 hit, by single-stepping the thread with the breakpoint
2218 removed. In which case, we need to single-step only this
2219 thread, and keep others stopped, as they can miss this
2220 breakpoint if allowed to run. */
2221 resume_ptid = inferior_ptid;
2222 }
2223
2224 if (gdbarch_cannot_step_breakpoint (gdbarch))
2225 {
2226 /* Most targets can step a breakpoint instruction, thus
2227 executing it normally. But if this one cannot, just
2228 continue and we will hit it anyway. */
2229 if (step && breakpoint_inserted_here_p (aspace, pc))
2230 step = 0;
2231 }
2232
2233 if (debug_displaced
2234 && use_displaced_stepping (gdbarch)
2235 && tp->control.trap_expected)
2236 {
2237 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
2238 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
2239 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2240 gdb_byte buf[4];
2241
2242 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2243 paddress (resume_gdbarch, actual_pc));
2244 read_memory (actual_pc, buf, sizeof (buf));
2245 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2246 }
2247
2248 if (tp->control.may_range_step)
2249 {
2250 /* If we're resuming a thread with the PC out of the step
2251 range, then we're doing some nested/finer run control
2252 operation, like stepping the thread out of the dynamic
2253 linker or the displaced stepping scratch pad. We
2254 shouldn't have allowed a range step then. */
2255 gdb_assert (pc_in_thread_step_range (pc, tp));
2256 }
2257
2258 /* Install inferior's terminal modes. */
2259 target_terminal_inferior ();
2260
2261 /* Avoid confusing the next resume, if the next stop/resume
2262 happens to apply to another thread. */
2263 tp->suspend.stop_signal = GDB_SIGNAL_0;
2264
2265 /* Advise target which signals may be handled silently. If we have
2266 removed breakpoints because we are stepping over one (in any
2267 thread), we need to receive all signals to avoid accidentally
2268 skipping a breakpoint during execution of a signal handler. */
2269 if (step_over_info_valid_p ())
2270 target_pass_signals (0, NULL);
2271 else
2272 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2273
2274 target_resume (resume_ptid, step, sig);
2275
2276 discard_cleanups (old_cleanups);
2277 }
2278 \f
2279 /* Proceeding. */
2280
2281 /* Clear out all variables saying what to do when inferior is continued.
2282 First do this, then set the ones you want, then call `proceed'. */
2283
2284 static void
2285 clear_proceed_status_thread (struct thread_info *tp)
2286 {
2287 if (debug_infrun)
2288 fprintf_unfiltered (gdb_stdlog,
2289 "infrun: clear_proceed_status_thread (%s)\n",
2290 target_pid_to_str (tp->ptid));
2291
2292 /* If this signal should not be seen by program, give it zero.
2293 Used for debugging signals. */
2294 if (!signal_pass_state (tp->suspend.stop_signal))
2295 tp->suspend.stop_signal = GDB_SIGNAL_0;
2296
2297 tp->control.trap_expected = 0;
2298 tp->control.step_range_start = 0;
2299 tp->control.step_range_end = 0;
2300 tp->control.may_range_step = 0;
2301 tp->control.step_frame_id = null_frame_id;
2302 tp->control.step_stack_frame_id = null_frame_id;
2303 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2304 tp->stop_requested = 0;
2305
2306 tp->control.stop_step = 0;
2307
2308 tp->control.proceed_to_finish = 0;
2309
2310 tp->control.command_interp = NULL;
2311
2312 /* Discard any remaining commands or status from previous stop. */
2313 bpstat_clear (&tp->control.stop_bpstat);
2314 }
2315
2316 void
2317 clear_proceed_status (int step)
2318 {
2319 if (!non_stop)
2320 {
2321 struct thread_info *tp;
2322 ptid_t resume_ptid;
2323
2324 resume_ptid = user_visible_resume_ptid (step);
2325
2326 /* In all-stop mode, delete the per-thread status of all threads
2327 we're about to resume, implicitly and explicitly. */
2328 ALL_NON_EXITED_THREADS (tp)
2329 {
2330 if (!ptid_match (tp->ptid, resume_ptid))
2331 continue;
2332 clear_proceed_status_thread (tp);
2333 }
2334 }
2335
2336 if (!ptid_equal (inferior_ptid, null_ptid))
2337 {
2338 struct inferior *inferior;
2339
2340 if (non_stop)
2341 {
2342 /* If in non-stop mode, only delete the per-thread status of
2343 the current thread. */
2344 clear_proceed_status_thread (inferior_thread ());
2345 }
2346
2347 inferior = current_inferior ();
2348 inferior->control.stop_soon = NO_STOP_QUIETLY;
2349 }
2350
2351 stop_after_trap = 0;
2352
2353 clear_step_over_info ();
2354
2355 observer_notify_about_to_proceed ();
2356
2357 if (stop_registers)
2358 {
2359 regcache_xfree (stop_registers);
2360 stop_registers = NULL;
2361 }
2362 }
2363
2364 /* Returns true if TP is still stopped at a breakpoint that needs
2365 stepping-over in order to make progress. If the breakpoint is gone
2366 meanwhile, we can skip the whole step-over dance. */
2367
2368 static int
2369 thread_still_needs_step_over (struct thread_info *tp)
2370 {
2371 if (tp->stepping_over_breakpoint)
2372 {
2373 struct regcache *regcache = get_thread_regcache (tp->ptid);
2374
2375 if (breakpoint_here_p (get_regcache_aspace (regcache),
2376 regcache_read_pc (regcache)))
2377 return 1;
2378
2379 tp->stepping_over_breakpoint = 0;
2380 }
2381
2382 return 0;
2383 }
2384
2385 /* Returns true if scheduler locking applies. STEP indicates whether
2386 we're about to do a step/next-like command to a thread. */
2387
2388 static int
2389 schedlock_applies (int step)
2390 {
2391 return (scheduler_mode == schedlock_on
2392 || (scheduler_mode == schedlock_step
2393 && step));
2394 }
2395
2396 /* Look a thread other than EXCEPT that has previously reported a
2397 breakpoint event, and thus needs a step-over in order to make
2398 progress. Returns NULL is none is found. STEP indicates whether
2399 we're about to step the current thread, in order to decide whether
2400 "set scheduler-locking step" applies. */
2401
2402 static struct thread_info *
2403 find_thread_needs_step_over (int step, struct thread_info *except)
2404 {
2405 struct thread_info *tp, *current;
2406
2407 /* With non-stop mode on, threads are always handled individually. */
2408 gdb_assert (! non_stop);
2409
2410 current = inferior_thread ();
2411
2412 /* If scheduler locking applies, we can avoid iterating over all
2413 threads. */
2414 if (schedlock_applies (step))
2415 {
2416 if (except != current
2417 && thread_still_needs_step_over (current))
2418 return current;
2419
2420 return NULL;
2421 }
2422
2423 ALL_NON_EXITED_THREADS (tp)
2424 {
2425 /* Ignore the EXCEPT thread. */
2426 if (tp == except)
2427 continue;
2428 /* Ignore threads of processes we're not resuming. */
2429 if (!sched_multi
2430 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
2431 continue;
2432
2433 if (thread_still_needs_step_over (tp))
2434 return tp;
2435 }
2436
2437 return NULL;
2438 }
2439
2440 /* Basic routine for continuing the program in various fashions.
2441
2442 ADDR is the address to resume at, or -1 for resume where stopped.
2443 SIGGNAL is the signal to give it, or 0 for none,
2444 or -1 for act according to how it stopped.
2445 STEP is nonzero if should trap after one instruction.
2446 -1 means return after that and print nothing.
2447 You should probably set various step_... variables
2448 before calling here, if you are stepping.
2449
2450 You should call clear_proceed_status before calling proceed. */
2451
2452 void
2453 proceed (CORE_ADDR addr, enum gdb_signal siggnal, int step)
2454 {
2455 struct regcache *regcache;
2456 struct gdbarch *gdbarch;
2457 struct thread_info *tp;
2458 CORE_ADDR pc;
2459 struct address_space *aspace;
2460
2461 /* If we're stopped at a fork/vfork, follow the branch set by the
2462 "set follow-fork-mode" command; otherwise, we'll just proceed
2463 resuming the current thread. */
2464 if (!follow_fork ())
2465 {
2466 /* The target for some reason decided not to resume. */
2467 normal_stop ();
2468 if (target_can_async_p ())
2469 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2470 return;
2471 }
2472
2473 /* We'll update this if & when we switch to a new thread. */
2474 previous_inferior_ptid = inferior_ptid;
2475
2476 regcache = get_current_regcache ();
2477 gdbarch = get_regcache_arch (regcache);
2478 aspace = get_regcache_aspace (regcache);
2479 pc = regcache_read_pc (regcache);
2480 tp = inferior_thread ();
2481
2482 if (step > 0)
2483 step_start_function = find_pc_function (pc);
2484 if (step < 0)
2485 stop_after_trap = 1;
2486
2487 /* Fill in with reasonable starting values. */
2488 init_thread_stepping_state (tp);
2489
2490 if (addr == (CORE_ADDR) -1)
2491 {
2492 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2493 && execution_direction != EXEC_REVERSE)
2494 /* There is a breakpoint at the address we will resume at,
2495 step one instruction before inserting breakpoints so that
2496 we do not stop right away (and report a second hit at this
2497 breakpoint).
2498
2499 Note, we don't do this in reverse, because we won't
2500 actually be executing the breakpoint insn anyway.
2501 We'll be (un-)executing the previous instruction. */
2502 tp->stepping_over_breakpoint = 1;
2503 else if (gdbarch_single_step_through_delay_p (gdbarch)
2504 && gdbarch_single_step_through_delay (gdbarch,
2505 get_current_frame ()))
2506 /* We stepped onto an instruction that needs to be stepped
2507 again before re-inserting the breakpoint, do so. */
2508 tp->stepping_over_breakpoint = 1;
2509 }
2510 else
2511 {
2512 regcache_write_pc (regcache, addr);
2513 }
2514
2515 if (siggnal != GDB_SIGNAL_DEFAULT)
2516 tp->suspend.stop_signal = siggnal;
2517
2518 /* Record the interpreter that issued the execution command that
2519 caused this thread to resume. If the top level interpreter is
2520 MI/async, and the execution command was a CLI command
2521 (next/step/etc.), we'll want to print stop event output to the MI
2522 console channel (the stepped-to line, etc.), as if the user
2523 entered the execution command on a real GDB console. */
2524 inferior_thread ()->control.command_interp = command_interp ();
2525
2526 if (debug_infrun)
2527 fprintf_unfiltered (gdb_stdlog,
2528 "infrun: proceed (addr=%s, signal=%s, step=%d)\n",
2529 paddress (gdbarch, addr),
2530 gdb_signal_to_symbol_string (siggnal), step);
2531
2532 if (non_stop)
2533 /* In non-stop, each thread is handled individually. The context
2534 must already be set to the right thread here. */
2535 ;
2536 else
2537 {
2538 struct thread_info *step_over;
2539
2540 /* In a multi-threaded task we may select another thread and
2541 then continue or step.
2542
2543 But if the old thread was stopped at a breakpoint, it will
2544 immediately cause another breakpoint stop without any
2545 execution (i.e. it will report a breakpoint hit incorrectly).
2546 So we must step over it first.
2547
2548 Look for a thread other than the current (TP) that reported a
2549 breakpoint hit and hasn't been resumed yet since. */
2550 step_over = find_thread_needs_step_over (step, tp);
2551 if (step_over != NULL)
2552 {
2553 if (debug_infrun)
2554 fprintf_unfiltered (gdb_stdlog,
2555 "infrun: need to step-over [%s] first\n",
2556 target_pid_to_str (step_over->ptid));
2557
2558 /* Store the prev_pc for the stepping thread too, needed by
2559 switch_back_to_stepping thread. */
2560 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2561 switch_to_thread (step_over->ptid);
2562 tp = step_over;
2563 }
2564 }
2565
2566 /* If we need to step over a breakpoint, and we're not using
2567 displaced stepping to do so, insert all breakpoints (watchpoints,
2568 etc.) but the one we're stepping over, step one instruction, and
2569 then re-insert the breakpoint when that step is finished. */
2570 if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
2571 {
2572 struct regcache *regcache = get_current_regcache ();
2573
2574 set_step_over_info (get_regcache_aspace (regcache),
2575 regcache_read_pc (regcache), 0);
2576 }
2577 else
2578 clear_step_over_info ();
2579
2580 insert_breakpoints ();
2581
2582 tp->control.trap_expected = tp->stepping_over_breakpoint;
2583
2584 annotate_starting ();
2585
2586 /* Make sure that output from GDB appears before output from the
2587 inferior. */
2588 gdb_flush (gdb_stdout);
2589
2590 /* Refresh prev_pc value just prior to resuming. This used to be
2591 done in stop_waiting, however, setting prev_pc there did not handle
2592 scenarios such as inferior function calls or returning from
2593 a function via the return command. In those cases, the prev_pc
2594 value was not set properly for subsequent commands. The prev_pc value
2595 is used to initialize the starting line number in the ecs. With an
2596 invalid value, the gdb next command ends up stopping at the position
2597 represented by the next line table entry past our start position.
2598 On platforms that generate one line table entry per line, this
2599 is not a problem. However, on the ia64, the compiler generates
2600 extraneous line table entries that do not increase the line number.
2601 When we issue the gdb next command on the ia64 after an inferior call
2602 or a return command, we often end up a few instructions forward, still
2603 within the original line we started.
2604
2605 An attempt was made to refresh the prev_pc at the same time the
2606 execution_control_state is initialized (for instance, just before
2607 waiting for an inferior event). But this approach did not work
2608 because of platforms that use ptrace, where the pc register cannot
2609 be read unless the inferior is stopped. At that point, we are not
2610 guaranteed the inferior is stopped and so the regcache_read_pc() call
2611 can fail. Setting the prev_pc value here ensures the value is updated
2612 correctly when the inferior is stopped. */
2613 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2614
2615 /* Resume inferior. */
2616 resume (tp->control.trap_expected || step || bpstat_should_step (),
2617 tp->suspend.stop_signal);
2618
2619 /* Wait for it to stop (if not standalone)
2620 and in any case decode why it stopped, and act accordingly. */
2621 /* Do this only if we are not using the event loop, or if the target
2622 does not support asynchronous execution. */
2623 if (!target_can_async_p ())
2624 {
2625 wait_for_inferior ();
2626 normal_stop ();
2627 }
2628 }
2629 \f
2630
2631 /* Start remote-debugging of a machine over a serial link. */
2632
2633 void
2634 start_remote (int from_tty)
2635 {
2636 struct inferior *inferior;
2637
2638 inferior = current_inferior ();
2639 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2640
2641 /* Always go on waiting for the target, regardless of the mode. */
2642 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2643 indicate to wait_for_inferior that a target should timeout if
2644 nothing is returned (instead of just blocking). Because of this,
2645 targets expecting an immediate response need to, internally, set
2646 things up so that the target_wait() is forced to eventually
2647 timeout. */
2648 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2649 differentiate to its caller what the state of the target is after
2650 the initial open has been performed. Here we're assuming that
2651 the target has stopped. It should be possible to eventually have
2652 target_open() return to the caller an indication that the target
2653 is currently running and GDB state should be set to the same as
2654 for an async run. */
2655 wait_for_inferior ();
2656
2657 /* Now that the inferior has stopped, do any bookkeeping like
2658 loading shared libraries. We want to do this before normal_stop,
2659 so that the displayed frame is up to date. */
2660 post_create_inferior (&current_target, from_tty);
2661
2662 normal_stop ();
2663 }
2664
2665 /* Initialize static vars when a new inferior begins. */
2666
2667 void
2668 init_wait_for_inferior (void)
2669 {
2670 /* These are meaningless until the first time through wait_for_inferior. */
2671
2672 breakpoint_init_inferior (inf_starting);
2673
2674 clear_proceed_status (0);
2675
2676 target_last_wait_ptid = minus_one_ptid;
2677
2678 previous_inferior_ptid = inferior_ptid;
2679
2680 /* Discard any skipped inlined frames. */
2681 clear_inline_frame_state (minus_one_ptid);
2682
2683 singlestep_ptid = null_ptid;
2684 singlestep_pc = 0;
2685 }
2686
2687 \f
2688 /* This enum encodes possible reasons for doing a target_wait, so that
2689 wfi can call target_wait in one place. (Ultimately the call will be
2690 moved out of the infinite loop entirely.) */
2691
2692 enum infwait_states
2693 {
2694 infwait_normal_state,
2695 infwait_step_watch_state,
2696 infwait_nonstep_watch_state
2697 };
2698
2699 /* Current inferior wait state. */
2700 static enum infwait_states infwait_state;
2701
2702 /* Data to be passed around while handling an event. This data is
2703 discarded between events. */
2704 struct execution_control_state
2705 {
2706 ptid_t ptid;
2707 /* The thread that got the event, if this was a thread event; NULL
2708 otherwise. */
2709 struct thread_info *event_thread;
2710
2711 struct target_waitstatus ws;
2712 int stop_func_filled_in;
2713 CORE_ADDR stop_func_start;
2714 CORE_ADDR stop_func_end;
2715 const char *stop_func_name;
2716 int wait_some_more;
2717
2718 /* True if the event thread hit the single-step breakpoint of
2719 another thread. Thus the event doesn't cause a stop, the thread
2720 needs to be single-stepped past the single-step breakpoint before
2721 we can switch back to the original stepping thread. */
2722 int hit_singlestep_breakpoint;
2723 };
2724
2725 static void handle_inferior_event (struct execution_control_state *ecs);
2726
2727 static void handle_step_into_function (struct gdbarch *gdbarch,
2728 struct execution_control_state *ecs);
2729 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2730 struct execution_control_state *ecs);
2731 static void handle_signal_stop (struct execution_control_state *ecs);
2732 static void check_exception_resume (struct execution_control_state *,
2733 struct frame_info *);
2734
2735 static void end_stepping_range (struct execution_control_state *ecs);
2736 static void stop_waiting (struct execution_control_state *ecs);
2737 static void prepare_to_wait (struct execution_control_state *ecs);
2738 static void keep_going (struct execution_control_state *ecs);
2739 static void process_event_stop_test (struct execution_control_state *ecs);
2740 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2741
2742 /* Callback for iterate over threads. If the thread is stopped, but
2743 the user/frontend doesn't know about that yet, go through
2744 normal_stop, as if the thread had just stopped now. ARG points at
2745 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2746 ptid_is_pid(PTID) is true, applies to all threads of the process
2747 pointed at by PTID. Otherwise, apply only to the thread pointed by
2748 PTID. */
2749
2750 static int
2751 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2752 {
2753 ptid_t ptid = * (ptid_t *) arg;
2754
2755 if ((ptid_equal (info->ptid, ptid)
2756 || ptid_equal (minus_one_ptid, ptid)
2757 || (ptid_is_pid (ptid)
2758 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2759 && is_running (info->ptid)
2760 && !is_executing (info->ptid))
2761 {
2762 struct cleanup *old_chain;
2763 struct execution_control_state ecss;
2764 struct execution_control_state *ecs = &ecss;
2765
2766 memset (ecs, 0, sizeof (*ecs));
2767
2768 old_chain = make_cleanup_restore_current_thread ();
2769
2770 overlay_cache_invalid = 1;
2771 /* Flush target cache before starting to handle each event.
2772 Target was running and cache could be stale. This is just a
2773 heuristic. Running threads may modify target memory, but we
2774 don't get any event. */
2775 target_dcache_invalidate ();
2776
2777 /* Go through handle_inferior_event/normal_stop, so we always
2778 have consistent output as if the stop event had been
2779 reported. */
2780 ecs->ptid = info->ptid;
2781 ecs->event_thread = find_thread_ptid (info->ptid);
2782 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2783 ecs->ws.value.sig = GDB_SIGNAL_0;
2784
2785 handle_inferior_event (ecs);
2786
2787 if (!ecs->wait_some_more)
2788 {
2789 struct thread_info *tp;
2790
2791 normal_stop ();
2792
2793 /* Finish off the continuations. */
2794 tp = inferior_thread ();
2795 do_all_intermediate_continuations_thread (tp, 1);
2796 do_all_continuations_thread (tp, 1);
2797 }
2798
2799 do_cleanups (old_chain);
2800 }
2801
2802 return 0;
2803 }
2804
2805 /* This function is attached as a "thread_stop_requested" observer.
2806 Cleanup local state that assumed the PTID was to be resumed, and
2807 report the stop to the frontend. */
2808
2809 static void
2810 infrun_thread_stop_requested (ptid_t ptid)
2811 {
2812 struct displaced_step_inferior_state *displaced;
2813
2814 /* PTID was requested to stop. Remove it from the displaced
2815 stepping queue, so we don't try to resume it automatically. */
2816
2817 for (displaced = displaced_step_inferior_states;
2818 displaced;
2819 displaced = displaced->next)
2820 {
2821 struct displaced_step_request *it, **prev_next_p;
2822
2823 it = displaced->step_request_queue;
2824 prev_next_p = &displaced->step_request_queue;
2825 while (it)
2826 {
2827 if (ptid_match (it->ptid, ptid))
2828 {
2829 *prev_next_p = it->next;
2830 it->next = NULL;
2831 xfree (it);
2832 }
2833 else
2834 {
2835 prev_next_p = &it->next;
2836 }
2837
2838 it = *prev_next_p;
2839 }
2840 }
2841
2842 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2843 }
2844
2845 static void
2846 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2847 {
2848 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2849 nullify_last_target_wait_ptid ();
2850 }
2851
2852 /* Delete the step resume, single-step and longjmp/exception resume
2853 breakpoints of TP. */
2854
2855 static void
2856 delete_thread_infrun_breakpoints (struct thread_info *tp)
2857 {
2858 delete_step_resume_breakpoint (tp);
2859 delete_exception_resume_breakpoint (tp);
2860 }
2861
2862 /* If the target still has execution, call FUNC for each thread that
2863 just stopped. In all-stop, that's all the non-exited threads; in
2864 non-stop, that's the current thread, only. */
2865
2866 typedef void (*for_each_just_stopped_thread_callback_func)
2867 (struct thread_info *tp);
2868
2869 static void
2870 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
2871 {
2872 if (!target_has_execution || ptid_equal (inferior_ptid, null_ptid))
2873 return;
2874
2875 if (non_stop)
2876 {
2877 /* If in non-stop mode, only the current thread stopped. */
2878 func (inferior_thread ());
2879 }
2880 else
2881 {
2882 struct thread_info *tp;
2883
2884 /* In all-stop mode, all threads have stopped. */
2885 ALL_NON_EXITED_THREADS (tp)
2886 {
2887 func (tp);
2888 }
2889 }
2890 }
2891
2892 /* Delete the step resume and longjmp/exception resume breakpoints of
2893 the threads that just stopped. */
2894
2895 static void
2896 delete_just_stopped_threads_infrun_breakpoints (void)
2897 {
2898 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
2899
2900 if (single_step_breakpoints_inserted ())
2901 remove_single_step_breakpoints ();
2902 }
2903
2904 /* A cleanup wrapper. */
2905
2906 static void
2907 delete_just_stopped_threads_infrun_breakpoints_cleanup (void *arg)
2908 {
2909 delete_just_stopped_threads_infrun_breakpoints ();
2910 }
2911
2912 /* Pretty print the results of target_wait, for debugging purposes. */
2913
2914 static void
2915 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2916 const struct target_waitstatus *ws)
2917 {
2918 char *status_string = target_waitstatus_to_string (ws);
2919 struct ui_file *tmp_stream = mem_fileopen ();
2920 char *text;
2921
2922 /* The text is split over several lines because it was getting too long.
2923 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2924 output as a unit; we want only one timestamp printed if debug_timestamp
2925 is set. */
2926
2927 fprintf_unfiltered (tmp_stream,
2928 "infrun: target_wait (%d", ptid_get_pid (waiton_ptid));
2929 if (ptid_get_pid (waiton_ptid) != -1)
2930 fprintf_unfiltered (tmp_stream,
2931 " [%s]", target_pid_to_str (waiton_ptid));
2932 fprintf_unfiltered (tmp_stream, ", status) =\n");
2933 fprintf_unfiltered (tmp_stream,
2934 "infrun: %d [%s],\n",
2935 ptid_get_pid (result_ptid),
2936 target_pid_to_str (result_ptid));
2937 fprintf_unfiltered (tmp_stream,
2938 "infrun: %s\n",
2939 status_string);
2940
2941 text = ui_file_xstrdup (tmp_stream, NULL);
2942
2943 /* This uses %s in part to handle %'s in the text, but also to avoid
2944 a gcc error: the format attribute requires a string literal. */
2945 fprintf_unfiltered (gdb_stdlog, "%s", text);
2946
2947 xfree (status_string);
2948 xfree (text);
2949 ui_file_delete (tmp_stream);
2950 }
2951
2952 /* Prepare and stabilize the inferior for detaching it. E.g.,
2953 detaching while a thread is displaced stepping is a recipe for
2954 crashing it, as nothing would readjust the PC out of the scratch
2955 pad. */
2956
2957 void
2958 prepare_for_detach (void)
2959 {
2960 struct inferior *inf = current_inferior ();
2961 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2962 struct cleanup *old_chain_1;
2963 struct displaced_step_inferior_state *displaced;
2964
2965 displaced = get_displaced_stepping_state (inf->pid);
2966
2967 /* Is any thread of this process displaced stepping? If not,
2968 there's nothing else to do. */
2969 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2970 return;
2971
2972 if (debug_infrun)
2973 fprintf_unfiltered (gdb_stdlog,
2974 "displaced-stepping in-process while detaching");
2975
2976 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2977 inf->detaching = 1;
2978
2979 while (!ptid_equal (displaced->step_ptid, null_ptid))
2980 {
2981 struct cleanup *old_chain_2;
2982 struct execution_control_state ecss;
2983 struct execution_control_state *ecs;
2984
2985 ecs = &ecss;
2986 memset (ecs, 0, sizeof (*ecs));
2987
2988 overlay_cache_invalid = 1;
2989 /* Flush target cache before starting to handle each event.
2990 Target was running and cache could be stale. This is just a
2991 heuristic. Running threads may modify target memory, but we
2992 don't get any event. */
2993 target_dcache_invalidate ();
2994
2995 if (deprecated_target_wait_hook)
2996 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2997 else
2998 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2999
3000 if (debug_infrun)
3001 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3002
3003 /* If an error happens while handling the event, propagate GDB's
3004 knowledge of the executing state to the frontend/user running
3005 state. */
3006 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
3007 &minus_one_ptid);
3008
3009 /* Now figure out what to do with the result of the result. */
3010 handle_inferior_event (ecs);
3011
3012 /* No error, don't finish the state yet. */
3013 discard_cleanups (old_chain_2);
3014
3015 /* Breakpoints and watchpoints are not installed on the target
3016 at this point, and signals are passed directly to the
3017 inferior, so this must mean the process is gone. */
3018 if (!ecs->wait_some_more)
3019 {
3020 discard_cleanups (old_chain_1);
3021 error (_("Program exited while detaching"));
3022 }
3023 }
3024
3025 discard_cleanups (old_chain_1);
3026 }
3027
3028 /* Wait for control to return from inferior to debugger.
3029
3030 If inferior gets a signal, we may decide to start it up again
3031 instead of returning. That is why there is a loop in this function.
3032 When this function actually returns it means the inferior
3033 should be left stopped and GDB should read more commands. */
3034
3035 void
3036 wait_for_inferior (void)
3037 {
3038 struct cleanup *old_cleanups;
3039
3040 if (debug_infrun)
3041 fprintf_unfiltered
3042 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
3043
3044 old_cleanups
3045 = make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup,
3046 NULL);
3047
3048 while (1)
3049 {
3050 struct execution_control_state ecss;
3051 struct execution_control_state *ecs = &ecss;
3052 struct cleanup *old_chain;
3053 ptid_t waiton_ptid = minus_one_ptid;
3054
3055 memset (ecs, 0, sizeof (*ecs));
3056
3057 overlay_cache_invalid = 1;
3058
3059 /* Flush target cache before starting to handle each event.
3060 Target was running and cache could be stale. This is just a
3061 heuristic. Running threads may modify target memory, but we
3062 don't get any event. */
3063 target_dcache_invalidate ();
3064
3065 if (deprecated_target_wait_hook)
3066 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
3067 else
3068 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
3069
3070 if (debug_infrun)
3071 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3072
3073 /* If an error happens while handling the event, propagate GDB's
3074 knowledge of the executing state to the frontend/user running
3075 state. */
3076 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3077
3078 /* Now figure out what to do with the result of the result. */
3079 handle_inferior_event (ecs);
3080
3081 /* No error, don't finish the state yet. */
3082 discard_cleanups (old_chain);
3083
3084 if (!ecs->wait_some_more)
3085 break;
3086 }
3087
3088 do_cleanups (old_cleanups);
3089 }
3090
3091 /* Asynchronous version of wait_for_inferior. It is called by the
3092 event loop whenever a change of state is detected on the file
3093 descriptor corresponding to the target. It can be called more than
3094 once to complete a single execution command. In such cases we need
3095 to keep the state in a global variable ECSS. If it is the last time
3096 that this function is called for a single execution command, then
3097 report to the user that the inferior has stopped, and do the
3098 necessary cleanups. */
3099
3100 void
3101 fetch_inferior_event (void *client_data)
3102 {
3103 struct execution_control_state ecss;
3104 struct execution_control_state *ecs = &ecss;
3105 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
3106 struct cleanup *ts_old_chain;
3107 int was_sync = sync_execution;
3108 int cmd_done = 0;
3109 ptid_t waiton_ptid = minus_one_ptid;
3110
3111 memset (ecs, 0, sizeof (*ecs));
3112
3113 /* We're handling a live event, so make sure we're doing live
3114 debugging. If we're looking at traceframes while the target is
3115 running, we're going to need to get back to that mode after
3116 handling the event. */
3117 if (non_stop)
3118 {
3119 make_cleanup_restore_current_traceframe ();
3120 set_current_traceframe (-1);
3121 }
3122
3123 if (non_stop)
3124 /* In non-stop mode, the user/frontend should not notice a thread
3125 switch due to internal events. Make sure we reverse to the
3126 user selected thread and frame after handling the event and
3127 running any breakpoint commands. */
3128 make_cleanup_restore_current_thread ();
3129
3130 overlay_cache_invalid = 1;
3131 /* Flush target cache before starting to handle each event. Target
3132 was running and cache could be stale. This is just a heuristic.
3133 Running threads may modify target memory, but we don't get any
3134 event. */
3135 target_dcache_invalidate ();
3136
3137 make_cleanup_restore_integer (&execution_direction);
3138 execution_direction = target_execution_direction ();
3139
3140 if (deprecated_target_wait_hook)
3141 ecs->ptid =
3142 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3143 else
3144 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3145
3146 if (debug_infrun)
3147 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3148
3149 /* If an error happens while handling the event, propagate GDB's
3150 knowledge of the executing state to the frontend/user running
3151 state. */
3152 if (!non_stop)
3153 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3154 else
3155 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
3156
3157 /* Get executed before make_cleanup_restore_current_thread above to apply
3158 still for the thread which has thrown the exception. */
3159 make_bpstat_clear_actions_cleanup ();
3160
3161 make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup, NULL);
3162
3163 /* Now figure out what to do with the result of the result. */
3164 handle_inferior_event (ecs);
3165
3166 if (!ecs->wait_some_more)
3167 {
3168 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3169
3170 delete_just_stopped_threads_infrun_breakpoints ();
3171
3172 /* We may not find an inferior if this was a process exit. */
3173 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
3174 normal_stop ();
3175
3176 if (target_has_execution
3177 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
3178 && ecs->ws.kind != TARGET_WAITKIND_EXITED
3179 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3180 && ecs->event_thread->step_multi
3181 && ecs->event_thread->control.stop_step)
3182 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
3183 else
3184 {
3185 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
3186 cmd_done = 1;
3187 }
3188 }
3189
3190 /* No error, don't finish the thread states yet. */
3191 discard_cleanups (ts_old_chain);
3192
3193 /* Revert thread and frame. */
3194 do_cleanups (old_chain);
3195
3196 /* If the inferior was in sync execution mode, and now isn't,
3197 restore the prompt (a synchronous execution command has finished,
3198 and we're ready for input). */
3199 if (interpreter_async && was_sync && !sync_execution)
3200 observer_notify_sync_execution_done ();
3201
3202 if (cmd_done
3203 && !was_sync
3204 && exec_done_display_p
3205 && (ptid_equal (inferior_ptid, null_ptid)
3206 || !is_running (inferior_ptid)))
3207 printf_unfiltered (_("completed.\n"));
3208 }
3209
3210 /* Record the frame and location we're currently stepping through. */
3211 void
3212 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
3213 {
3214 struct thread_info *tp = inferior_thread ();
3215
3216 tp->control.step_frame_id = get_frame_id (frame);
3217 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
3218
3219 tp->current_symtab = sal.symtab;
3220 tp->current_line = sal.line;
3221 }
3222
3223 /* Clear context switchable stepping state. */
3224
3225 void
3226 init_thread_stepping_state (struct thread_info *tss)
3227 {
3228 tss->stepping_over_breakpoint = 0;
3229 tss->stepping_over_watchpoint = 0;
3230 tss->step_after_step_resume_breakpoint = 0;
3231 }
3232
3233 /* Set the cached copy of the last ptid/waitstatus. */
3234
3235 static void
3236 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
3237 {
3238 target_last_wait_ptid = ptid;
3239 target_last_waitstatus = status;
3240 }
3241
3242 /* Return the cached copy of the last pid/waitstatus returned by
3243 target_wait()/deprecated_target_wait_hook(). The data is actually
3244 cached by handle_inferior_event(), which gets called immediately
3245 after target_wait()/deprecated_target_wait_hook(). */
3246
3247 void
3248 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
3249 {
3250 *ptidp = target_last_wait_ptid;
3251 *status = target_last_waitstatus;
3252 }
3253
3254 void
3255 nullify_last_target_wait_ptid (void)
3256 {
3257 target_last_wait_ptid = minus_one_ptid;
3258 }
3259
3260 /* Switch thread contexts. */
3261
3262 static void
3263 context_switch (ptid_t ptid)
3264 {
3265 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
3266 {
3267 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3268 target_pid_to_str (inferior_ptid));
3269 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3270 target_pid_to_str (ptid));
3271 }
3272
3273 switch_to_thread (ptid);
3274 }
3275
3276 static void
3277 adjust_pc_after_break (struct execution_control_state *ecs)
3278 {
3279 struct regcache *regcache;
3280 struct gdbarch *gdbarch;
3281 struct address_space *aspace;
3282 CORE_ADDR breakpoint_pc, decr_pc;
3283
3284 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3285 we aren't, just return.
3286
3287 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3288 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3289 implemented by software breakpoints should be handled through the normal
3290 breakpoint layer.
3291
3292 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3293 different signals (SIGILL or SIGEMT for instance), but it is less
3294 clear where the PC is pointing afterwards. It may not match
3295 gdbarch_decr_pc_after_break. I don't know any specific target that
3296 generates these signals at breakpoints (the code has been in GDB since at
3297 least 1992) so I can not guess how to handle them here.
3298
3299 In earlier versions of GDB, a target with
3300 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3301 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3302 target with both of these set in GDB history, and it seems unlikely to be
3303 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3304
3305 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3306 return;
3307
3308 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3309 return;
3310
3311 /* In reverse execution, when a breakpoint is hit, the instruction
3312 under it has already been de-executed. The reported PC always
3313 points at the breakpoint address, so adjusting it further would
3314 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3315 architecture:
3316
3317 B1 0x08000000 : INSN1
3318 B2 0x08000001 : INSN2
3319 0x08000002 : INSN3
3320 PC -> 0x08000003 : INSN4
3321
3322 Say you're stopped at 0x08000003 as above. Reverse continuing
3323 from that point should hit B2 as below. Reading the PC when the
3324 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3325 been de-executed already.
3326
3327 B1 0x08000000 : INSN1
3328 B2 PC -> 0x08000001 : INSN2
3329 0x08000002 : INSN3
3330 0x08000003 : INSN4
3331
3332 We can't apply the same logic as for forward execution, because
3333 we would wrongly adjust the PC to 0x08000000, since there's a
3334 breakpoint at PC - 1. We'd then report a hit on B1, although
3335 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3336 behaviour. */
3337 if (execution_direction == EXEC_REVERSE)
3338 return;
3339
3340 /* If this target does not decrement the PC after breakpoints, then
3341 we have nothing to do. */
3342 regcache = get_thread_regcache (ecs->ptid);
3343 gdbarch = get_regcache_arch (regcache);
3344
3345 decr_pc = target_decr_pc_after_break (gdbarch);
3346 if (decr_pc == 0)
3347 return;
3348
3349 aspace = get_regcache_aspace (regcache);
3350
3351 /* Find the location where (if we've hit a breakpoint) the
3352 breakpoint would be. */
3353 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3354
3355 /* Check whether there actually is a software breakpoint inserted at
3356 that location.
3357
3358 If in non-stop mode, a race condition is possible where we've
3359 removed a breakpoint, but stop events for that breakpoint were
3360 already queued and arrive later. To suppress those spurious
3361 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3362 and retire them after a number of stop events are reported. */
3363 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3364 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3365 {
3366 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3367
3368 if (record_full_is_used ())
3369 record_full_gdb_operation_disable_set ();
3370
3371 /* When using hardware single-step, a SIGTRAP is reported for both
3372 a completed single-step and a software breakpoint. Need to
3373 differentiate between the two, as the latter needs adjusting
3374 but the former does not.
3375
3376 The SIGTRAP can be due to a completed hardware single-step only if
3377 - we didn't insert software single-step breakpoints
3378 - the thread to be examined is still the current thread
3379 - this thread is currently being stepped
3380
3381 If any of these events did not occur, we must have stopped due
3382 to hitting a software breakpoint, and have to back up to the
3383 breakpoint address.
3384
3385 As a special case, we could have hardware single-stepped a
3386 software breakpoint. In this case (prev_pc == breakpoint_pc),
3387 we also need to back up to the breakpoint address. */
3388
3389 if (singlestep_breakpoints_inserted_p
3390 || !ptid_equal (ecs->ptid, inferior_ptid)
3391 || !currently_stepping (ecs->event_thread)
3392 || ecs->event_thread->prev_pc == breakpoint_pc)
3393 regcache_write_pc (regcache, breakpoint_pc);
3394
3395 do_cleanups (old_cleanups);
3396 }
3397 }
3398
3399 static int
3400 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3401 {
3402 for (frame = get_prev_frame (frame);
3403 frame != NULL;
3404 frame = get_prev_frame (frame))
3405 {
3406 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3407 return 1;
3408 if (get_frame_type (frame) != INLINE_FRAME)
3409 break;
3410 }
3411
3412 return 0;
3413 }
3414
3415 /* Auxiliary function that handles syscall entry/return events.
3416 It returns 1 if the inferior should keep going (and GDB
3417 should ignore the event), or 0 if the event deserves to be
3418 processed. */
3419
3420 static int
3421 handle_syscall_event (struct execution_control_state *ecs)
3422 {
3423 struct regcache *regcache;
3424 int syscall_number;
3425
3426 if (!ptid_equal (ecs->ptid, inferior_ptid))
3427 context_switch (ecs->ptid);
3428
3429 regcache = get_thread_regcache (ecs->ptid);
3430 syscall_number = ecs->ws.value.syscall_number;
3431 stop_pc = regcache_read_pc (regcache);
3432
3433 if (catch_syscall_enabled () > 0
3434 && catching_syscall_number (syscall_number) > 0)
3435 {
3436 if (debug_infrun)
3437 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3438 syscall_number);
3439
3440 ecs->event_thread->control.stop_bpstat
3441 = bpstat_stop_status (get_regcache_aspace (regcache),
3442 stop_pc, ecs->ptid, &ecs->ws);
3443
3444 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3445 {
3446 /* Catchpoint hit. */
3447 return 0;
3448 }
3449 }
3450
3451 /* If no catchpoint triggered for this, then keep going. */
3452 keep_going (ecs);
3453 return 1;
3454 }
3455
3456 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3457
3458 static void
3459 fill_in_stop_func (struct gdbarch *gdbarch,
3460 struct execution_control_state *ecs)
3461 {
3462 if (!ecs->stop_func_filled_in)
3463 {
3464 /* Don't care about return value; stop_func_start and stop_func_name
3465 will both be 0 if it doesn't work. */
3466 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3467 &ecs->stop_func_start, &ecs->stop_func_end);
3468 ecs->stop_func_start
3469 += gdbarch_deprecated_function_start_offset (gdbarch);
3470
3471 if (gdbarch_skip_entrypoint_p (gdbarch))
3472 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3473 ecs->stop_func_start);
3474
3475 ecs->stop_func_filled_in = 1;
3476 }
3477 }
3478
3479
3480 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3481
3482 static enum stop_kind
3483 get_inferior_stop_soon (ptid_t ptid)
3484 {
3485 struct inferior *inf = find_inferior_pid (ptid_get_pid (ptid));
3486
3487 gdb_assert (inf != NULL);
3488 return inf->control.stop_soon;
3489 }
3490
3491 /* Given an execution control state that has been freshly filled in by
3492 an event from the inferior, figure out what it means and take
3493 appropriate action.
3494
3495 The alternatives are:
3496
3497 1) stop_waiting and return; to really stop and return to the
3498 debugger.
3499
3500 2) keep_going and return; to wait for the next event (set
3501 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3502 once). */
3503
3504 static void
3505 handle_inferior_event (struct execution_control_state *ecs)
3506 {
3507 enum stop_kind stop_soon;
3508
3509 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3510 {
3511 /* We had an event in the inferior, but we are not interested in
3512 handling it at this level. The lower layers have already
3513 done what needs to be done, if anything.
3514
3515 One of the possible circumstances for this is when the
3516 inferior produces output for the console. The inferior has
3517 not stopped, and we are ignoring the event. Another possible
3518 circumstance is any event which the lower level knows will be
3519 reported multiple times without an intervening resume. */
3520 if (debug_infrun)
3521 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3522 prepare_to_wait (ecs);
3523 return;
3524 }
3525
3526 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3527 && target_can_async_p () && !sync_execution)
3528 {
3529 /* There were no unwaited-for children left in the target, but,
3530 we're not synchronously waiting for events either. Just
3531 ignore. Otherwise, if we were running a synchronous
3532 execution command, we need to cancel it and give the user
3533 back the terminal. */
3534 if (debug_infrun)
3535 fprintf_unfiltered (gdb_stdlog,
3536 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3537 prepare_to_wait (ecs);
3538 return;
3539 }
3540
3541 /* Cache the last pid/waitstatus. */
3542 set_last_target_status (ecs->ptid, ecs->ws);
3543
3544 /* Always clear state belonging to the previous time we stopped. */
3545 stop_stack_dummy = STOP_NONE;
3546
3547 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3548 {
3549 /* No unwaited-for children left. IOW, all resumed children
3550 have exited. */
3551 if (debug_infrun)
3552 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3553
3554 stop_print_frame = 0;
3555 stop_waiting (ecs);
3556 return;
3557 }
3558
3559 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3560 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3561 {
3562 ecs->event_thread = find_thread_ptid (ecs->ptid);
3563 /* If it's a new thread, add it to the thread database. */
3564 if (ecs->event_thread == NULL)
3565 ecs->event_thread = add_thread (ecs->ptid);
3566
3567 /* Disable range stepping. If the next step request could use a
3568 range, this will be end up re-enabled then. */
3569 ecs->event_thread->control.may_range_step = 0;
3570 }
3571
3572 /* Dependent on valid ECS->EVENT_THREAD. */
3573 adjust_pc_after_break (ecs);
3574
3575 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3576 reinit_frame_cache ();
3577
3578 breakpoint_retire_moribund ();
3579
3580 /* First, distinguish signals caused by the debugger from signals
3581 that have to do with the program's own actions. Note that
3582 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3583 on the operating system version. Here we detect when a SIGILL or
3584 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3585 something similar for SIGSEGV, since a SIGSEGV will be generated
3586 when we're trying to execute a breakpoint instruction on a
3587 non-executable stack. This happens for call dummy breakpoints
3588 for architectures like SPARC that place call dummies on the
3589 stack. */
3590 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3591 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3592 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3593 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3594 {
3595 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3596
3597 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3598 regcache_read_pc (regcache)))
3599 {
3600 if (debug_infrun)
3601 fprintf_unfiltered (gdb_stdlog,
3602 "infrun: Treating signal as SIGTRAP\n");
3603 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3604 }
3605 }
3606
3607 /* Mark the non-executing threads accordingly. In all-stop, all
3608 threads of all processes are stopped when we get any event
3609 reported. In non-stop mode, only the event thread stops. If
3610 we're handling a process exit in non-stop mode, there's nothing
3611 to do, as threads of the dead process are gone, and threads of
3612 any other process were left running. */
3613 if (!non_stop)
3614 set_executing (minus_one_ptid, 0);
3615 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3616 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3617 set_executing (ecs->ptid, 0);
3618
3619 switch (ecs->ws.kind)
3620 {
3621 case TARGET_WAITKIND_LOADED:
3622 if (debug_infrun)
3623 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3624 if (!ptid_equal (ecs->ptid, inferior_ptid))
3625 context_switch (ecs->ptid);
3626 /* Ignore gracefully during startup of the inferior, as it might
3627 be the shell which has just loaded some objects, otherwise
3628 add the symbols for the newly loaded objects. Also ignore at
3629 the beginning of an attach or remote session; we will query
3630 the full list of libraries once the connection is
3631 established. */
3632
3633 stop_soon = get_inferior_stop_soon (ecs->ptid);
3634 if (stop_soon == NO_STOP_QUIETLY)
3635 {
3636 struct regcache *regcache;
3637
3638 regcache = get_thread_regcache (ecs->ptid);
3639
3640 handle_solib_event ();
3641
3642 ecs->event_thread->control.stop_bpstat
3643 = bpstat_stop_status (get_regcache_aspace (regcache),
3644 stop_pc, ecs->ptid, &ecs->ws);
3645
3646 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3647 {
3648 /* A catchpoint triggered. */
3649 process_event_stop_test (ecs);
3650 return;
3651 }
3652
3653 /* If requested, stop when the dynamic linker notifies
3654 gdb of events. This allows the user to get control
3655 and place breakpoints in initializer routines for
3656 dynamically loaded objects (among other things). */
3657 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3658 if (stop_on_solib_events)
3659 {
3660 /* Make sure we print "Stopped due to solib-event" in
3661 normal_stop. */
3662 stop_print_frame = 1;
3663
3664 stop_waiting (ecs);
3665 return;
3666 }
3667 }
3668
3669 /* If we are skipping through a shell, or through shared library
3670 loading that we aren't interested in, resume the program. If
3671 we're running the program normally, also resume. */
3672 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3673 {
3674 /* Loading of shared libraries might have changed breakpoint
3675 addresses. Make sure new breakpoints are inserted. */
3676 if (stop_soon == NO_STOP_QUIETLY)
3677 insert_breakpoints ();
3678 resume (0, GDB_SIGNAL_0);
3679 prepare_to_wait (ecs);
3680 return;
3681 }
3682
3683 /* But stop if we're attaching or setting up a remote
3684 connection. */
3685 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3686 || stop_soon == STOP_QUIETLY_REMOTE)
3687 {
3688 if (debug_infrun)
3689 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3690 stop_waiting (ecs);
3691 return;
3692 }
3693
3694 internal_error (__FILE__, __LINE__,
3695 _("unhandled stop_soon: %d"), (int) stop_soon);
3696
3697 case TARGET_WAITKIND_SPURIOUS:
3698 if (debug_infrun)
3699 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3700 if (!ptid_equal (ecs->ptid, inferior_ptid))
3701 context_switch (ecs->ptid);
3702 resume (0, GDB_SIGNAL_0);
3703 prepare_to_wait (ecs);
3704 return;
3705
3706 case TARGET_WAITKIND_EXITED:
3707 case TARGET_WAITKIND_SIGNALLED:
3708 if (debug_infrun)
3709 {
3710 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3711 fprintf_unfiltered (gdb_stdlog,
3712 "infrun: TARGET_WAITKIND_EXITED\n");
3713 else
3714 fprintf_unfiltered (gdb_stdlog,
3715 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3716 }
3717
3718 inferior_ptid = ecs->ptid;
3719 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3720 set_current_program_space (current_inferior ()->pspace);
3721 handle_vfork_child_exec_or_exit (0);
3722 target_terminal_ours (); /* Must do this before mourn anyway. */
3723
3724 /* Clearing any previous state of convenience variables. */
3725 clear_exit_convenience_vars ();
3726
3727 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3728 {
3729 /* Record the exit code in the convenience variable $_exitcode, so
3730 that the user can inspect this again later. */
3731 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3732 (LONGEST) ecs->ws.value.integer);
3733
3734 /* Also record this in the inferior itself. */
3735 current_inferior ()->has_exit_code = 1;
3736 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3737
3738 /* Support the --return-child-result option. */
3739 return_child_result_value = ecs->ws.value.integer;
3740
3741 observer_notify_exited (ecs->ws.value.integer);
3742 }
3743 else
3744 {
3745 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3746 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3747
3748 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3749 {
3750 /* Set the value of the internal variable $_exitsignal,
3751 which holds the signal uncaught by the inferior. */
3752 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3753 gdbarch_gdb_signal_to_target (gdbarch,
3754 ecs->ws.value.sig));
3755 }
3756 else
3757 {
3758 /* We don't have access to the target's method used for
3759 converting between signal numbers (GDB's internal
3760 representation <-> target's representation).
3761 Therefore, we cannot do a good job at displaying this
3762 information to the user. It's better to just warn
3763 her about it (if infrun debugging is enabled), and
3764 give up. */
3765 if (debug_infrun)
3766 fprintf_filtered (gdb_stdlog, _("\
3767 Cannot fill $_exitsignal with the correct signal number.\n"));
3768 }
3769
3770 observer_notify_signal_exited (ecs->ws.value.sig);
3771 }
3772
3773 gdb_flush (gdb_stdout);
3774 target_mourn_inferior ();
3775 singlestep_breakpoints_inserted_p = 0;
3776 cancel_single_step_breakpoints ();
3777 stop_print_frame = 0;
3778 stop_waiting (ecs);
3779 return;
3780
3781 /* The following are the only cases in which we keep going;
3782 the above cases end in a continue or goto. */
3783 case TARGET_WAITKIND_FORKED:
3784 case TARGET_WAITKIND_VFORKED:
3785 if (debug_infrun)
3786 {
3787 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3788 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3789 else
3790 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3791 }
3792
3793 /* Check whether the inferior is displaced stepping. */
3794 {
3795 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3796 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3797 struct displaced_step_inferior_state *displaced
3798 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3799
3800 /* If checking displaced stepping is supported, and thread
3801 ecs->ptid is displaced stepping. */
3802 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3803 {
3804 struct inferior *parent_inf
3805 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3806 struct regcache *child_regcache;
3807 CORE_ADDR parent_pc;
3808
3809 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3810 indicating that the displaced stepping of syscall instruction
3811 has been done. Perform cleanup for parent process here. Note
3812 that this operation also cleans up the child process for vfork,
3813 because their pages are shared. */
3814 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
3815
3816 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3817 {
3818 /* Restore scratch pad for child process. */
3819 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3820 }
3821
3822 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3823 the child's PC is also within the scratchpad. Set the child's PC
3824 to the parent's PC value, which has already been fixed up.
3825 FIXME: we use the parent's aspace here, although we're touching
3826 the child, because the child hasn't been added to the inferior
3827 list yet at this point. */
3828
3829 child_regcache
3830 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3831 gdbarch,
3832 parent_inf->aspace);
3833 /* Read PC value of parent process. */
3834 parent_pc = regcache_read_pc (regcache);
3835
3836 if (debug_displaced)
3837 fprintf_unfiltered (gdb_stdlog,
3838 "displaced: write child pc from %s to %s\n",
3839 paddress (gdbarch,
3840 regcache_read_pc (child_regcache)),
3841 paddress (gdbarch, parent_pc));
3842
3843 regcache_write_pc (child_regcache, parent_pc);
3844 }
3845 }
3846
3847 if (!ptid_equal (ecs->ptid, inferior_ptid))
3848 context_switch (ecs->ptid);
3849
3850 /* Immediately detach breakpoints from the child before there's
3851 any chance of letting the user delete breakpoints from the
3852 breakpoint lists. If we don't do this early, it's easy to
3853 leave left over traps in the child, vis: "break foo; catch
3854 fork; c; <fork>; del; c; <child calls foo>". We only follow
3855 the fork on the last `continue', and by that time the
3856 breakpoint at "foo" is long gone from the breakpoint table.
3857 If we vforked, then we don't need to unpatch here, since both
3858 parent and child are sharing the same memory pages; we'll
3859 need to unpatch at follow/detach time instead to be certain
3860 that new breakpoints added between catchpoint hit time and
3861 vfork follow are detached. */
3862 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3863 {
3864 /* This won't actually modify the breakpoint list, but will
3865 physically remove the breakpoints from the child. */
3866 detach_breakpoints (ecs->ws.value.related_pid);
3867 }
3868
3869 if (singlestep_breakpoints_inserted_p)
3870 {
3871 /* Pull the single step breakpoints out of the target. */
3872 remove_single_step_breakpoints ();
3873 singlestep_breakpoints_inserted_p = 0;
3874 }
3875
3876 /* In case the event is caught by a catchpoint, remember that
3877 the event is to be followed at the next resume of the thread,
3878 and not immediately. */
3879 ecs->event_thread->pending_follow = ecs->ws;
3880
3881 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3882
3883 ecs->event_thread->control.stop_bpstat
3884 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3885 stop_pc, ecs->ptid, &ecs->ws);
3886
3887 /* If no catchpoint triggered for this, then keep going. Note
3888 that we're interested in knowing the bpstat actually causes a
3889 stop, not just if it may explain the signal. Software
3890 watchpoints, for example, always appear in the bpstat. */
3891 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3892 {
3893 ptid_t parent;
3894 ptid_t child;
3895 int should_resume;
3896 int follow_child
3897 = (follow_fork_mode_string == follow_fork_mode_child);
3898
3899 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3900
3901 should_resume = follow_fork ();
3902
3903 parent = ecs->ptid;
3904 child = ecs->ws.value.related_pid;
3905
3906 /* In non-stop mode, also resume the other branch. */
3907 if (non_stop && !detach_fork)
3908 {
3909 if (follow_child)
3910 switch_to_thread (parent);
3911 else
3912 switch_to_thread (child);
3913
3914 ecs->event_thread = inferior_thread ();
3915 ecs->ptid = inferior_ptid;
3916 keep_going (ecs);
3917 }
3918
3919 if (follow_child)
3920 switch_to_thread (child);
3921 else
3922 switch_to_thread (parent);
3923
3924 ecs->event_thread = inferior_thread ();
3925 ecs->ptid = inferior_ptid;
3926
3927 if (should_resume)
3928 keep_going (ecs);
3929 else
3930 stop_waiting (ecs);
3931 return;
3932 }
3933 process_event_stop_test (ecs);
3934 return;
3935
3936 case TARGET_WAITKIND_VFORK_DONE:
3937 /* Done with the shared memory region. Re-insert breakpoints in
3938 the parent, and keep going. */
3939
3940 if (debug_infrun)
3941 fprintf_unfiltered (gdb_stdlog,
3942 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3943
3944 if (!ptid_equal (ecs->ptid, inferior_ptid))
3945 context_switch (ecs->ptid);
3946
3947 current_inferior ()->waiting_for_vfork_done = 0;
3948 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3949 /* This also takes care of reinserting breakpoints in the
3950 previously locked inferior. */
3951 keep_going (ecs);
3952 return;
3953
3954 case TARGET_WAITKIND_EXECD:
3955 if (debug_infrun)
3956 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3957
3958 if (!ptid_equal (ecs->ptid, inferior_ptid))
3959 context_switch (ecs->ptid);
3960
3961 singlestep_breakpoints_inserted_p = 0;
3962 cancel_single_step_breakpoints ();
3963
3964 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3965
3966 /* Do whatever is necessary to the parent branch of the vfork. */
3967 handle_vfork_child_exec_or_exit (1);
3968
3969 /* This causes the eventpoints and symbol table to be reset.
3970 Must do this now, before trying to determine whether to
3971 stop. */
3972 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3973
3974 ecs->event_thread->control.stop_bpstat
3975 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3976 stop_pc, ecs->ptid, &ecs->ws);
3977
3978 /* Note that this may be referenced from inside
3979 bpstat_stop_status above, through inferior_has_execd. */
3980 xfree (ecs->ws.value.execd_pathname);
3981 ecs->ws.value.execd_pathname = NULL;
3982
3983 /* If no catchpoint triggered for this, then keep going. */
3984 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3985 {
3986 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3987 keep_going (ecs);
3988 return;
3989 }
3990 process_event_stop_test (ecs);
3991 return;
3992
3993 /* Be careful not to try to gather much state about a thread
3994 that's in a syscall. It's frequently a losing proposition. */
3995 case TARGET_WAITKIND_SYSCALL_ENTRY:
3996 if (debug_infrun)
3997 fprintf_unfiltered (gdb_stdlog,
3998 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3999 /* Getting the current syscall number. */
4000 if (handle_syscall_event (ecs) == 0)
4001 process_event_stop_test (ecs);
4002 return;
4003
4004 /* Before examining the threads further, step this thread to
4005 get it entirely out of the syscall. (We get notice of the
4006 event when the thread is just on the verge of exiting a
4007 syscall. Stepping one instruction seems to get it back
4008 into user code.) */
4009 case TARGET_WAITKIND_SYSCALL_RETURN:
4010 if (debug_infrun)
4011 fprintf_unfiltered (gdb_stdlog,
4012 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
4013 if (handle_syscall_event (ecs) == 0)
4014 process_event_stop_test (ecs);
4015 return;
4016
4017 case TARGET_WAITKIND_STOPPED:
4018 if (debug_infrun)
4019 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
4020 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
4021 handle_signal_stop (ecs);
4022 return;
4023
4024 case TARGET_WAITKIND_NO_HISTORY:
4025 if (debug_infrun)
4026 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
4027 /* Reverse execution: target ran out of history info. */
4028
4029 /* Pull the single step breakpoints out of the target. */
4030 if (singlestep_breakpoints_inserted_p)
4031 {
4032 if (!ptid_equal (ecs->ptid, inferior_ptid))
4033 context_switch (ecs->ptid);
4034 remove_single_step_breakpoints ();
4035 singlestep_breakpoints_inserted_p = 0;
4036 }
4037 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4038 observer_notify_no_history ();
4039 stop_waiting (ecs);
4040 return;
4041 }
4042 }
4043
4044 /* Come here when the program has stopped with a signal. */
4045
4046 static void
4047 handle_signal_stop (struct execution_control_state *ecs)
4048 {
4049 struct frame_info *frame;
4050 struct gdbarch *gdbarch;
4051 int stopped_by_watchpoint;
4052 enum stop_kind stop_soon;
4053 int random_signal;
4054
4055 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
4056
4057 /* Do we need to clean up the state of a thread that has
4058 completed a displaced single-step? (Doing so usually affects
4059 the PC, so do it here, before we set stop_pc.) */
4060 displaced_step_fixup (ecs->ptid,
4061 ecs->event_thread->suspend.stop_signal);
4062
4063 /* If we either finished a single-step or hit a breakpoint, but
4064 the user wanted this thread to be stopped, pretend we got a
4065 SIG0 (generic unsignaled stop). */
4066 if (ecs->event_thread->stop_requested
4067 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4068 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4069
4070 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4071
4072 if (debug_infrun)
4073 {
4074 struct regcache *regcache = get_thread_regcache (ecs->ptid);
4075 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4076 struct cleanup *old_chain = save_inferior_ptid ();
4077
4078 inferior_ptid = ecs->ptid;
4079
4080 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
4081 paddress (gdbarch, stop_pc));
4082 if (target_stopped_by_watchpoint ())
4083 {
4084 CORE_ADDR addr;
4085
4086 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
4087
4088 if (target_stopped_data_address (&current_target, &addr))
4089 fprintf_unfiltered (gdb_stdlog,
4090 "infrun: stopped data address = %s\n",
4091 paddress (gdbarch, addr));
4092 else
4093 fprintf_unfiltered (gdb_stdlog,
4094 "infrun: (no data address available)\n");
4095 }
4096
4097 do_cleanups (old_chain);
4098 }
4099
4100 /* This is originated from start_remote(), start_inferior() and
4101 shared libraries hook functions. */
4102 stop_soon = get_inferior_stop_soon (ecs->ptid);
4103 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4104 {
4105 if (!ptid_equal (ecs->ptid, inferior_ptid))
4106 context_switch (ecs->ptid);
4107 if (debug_infrun)
4108 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4109 stop_print_frame = 1;
4110 stop_waiting (ecs);
4111 return;
4112 }
4113
4114 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4115 && stop_after_trap)
4116 {
4117 if (!ptid_equal (ecs->ptid, inferior_ptid))
4118 context_switch (ecs->ptid);
4119 if (debug_infrun)
4120 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4121 stop_print_frame = 0;
4122 stop_waiting (ecs);
4123 return;
4124 }
4125
4126 /* This originates from attach_command(). We need to overwrite
4127 the stop_signal here, because some kernels don't ignore a
4128 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4129 See more comments in inferior.h. On the other hand, if we
4130 get a non-SIGSTOP, report it to the user - assume the backend
4131 will handle the SIGSTOP if it should show up later.
4132
4133 Also consider that the attach is complete when we see a
4134 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4135 target extended-remote report it instead of a SIGSTOP
4136 (e.g. gdbserver). We already rely on SIGTRAP being our
4137 signal, so this is no exception.
4138
4139 Also consider that the attach is complete when we see a
4140 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4141 the target to stop all threads of the inferior, in case the
4142 low level attach operation doesn't stop them implicitly. If
4143 they weren't stopped implicitly, then the stub will report a
4144 GDB_SIGNAL_0, meaning: stopped for no particular reason
4145 other than GDB's request. */
4146 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4147 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
4148 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4149 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
4150 {
4151 stop_print_frame = 1;
4152 stop_waiting (ecs);
4153 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4154 return;
4155 }
4156
4157 /* See if something interesting happened to the non-current thread. If
4158 so, then switch to that thread. */
4159 if (!ptid_equal (ecs->ptid, inferior_ptid))
4160 {
4161 if (debug_infrun)
4162 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
4163
4164 context_switch (ecs->ptid);
4165
4166 if (deprecated_context_hook)
4167 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
4168 }
4169
4170 /* At this point, get hold of the now-current thread's frame. */
4171 frame = get_current_frame ();
4172 gdbarch = get_frame_arch (frame);
4173
4174 /* Pull the single step breakpoints out of the target. */
4175 if (singlestep_breakpoints_inserted_p)
4176 {
4177 /* However, before doing so, if this single-step breakpoint was
4178 actually for another thread, set this thread up for moving
4179 past it. */
4180 if (!ptid_equal (ecs->ptid, singlestep_ptid)
4181 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4182 {
4183 struct regcache *regcache;
4184 struct address_space *aspace;
4185 CORE_ADDR pc;
4186
4187 regcache = get_thread_regcache (ecs->ptid);
4188 aspace = get_regcache_aspace (regcache);
4189 pc = regcache_read_pc (regcache);
4190 if (single_step_breakpoint_inserted_here_p (aspace, pc))
4191 {
4192 if (debug_infrun)
4193 {
4194 fprintf_unfiltered (gdb_stdlog,
4195 "infrun: [%s] hit step over single-step"
4196 " breakpoint of [%s]\n",
4197 target_pid_to_str (ecs->ptid),
4198 target_pid_to_str (singlestep_ptid));
4199 }
4200 ecs->hit_singlestep_breakpoint = 1;
4201 }
4202 }
4203
4204 remove_single_step_breakpoints ();
4205 singlestep_breakpoints_inserted_p = 0;
4206 }
4207
4208 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4209 && ecs->event_thread->control.trap_expected
4210 && ecs->event_thread->stepping_over_watchpoint)
4211 stopped_by_watchpoint = 0;
4212 else
4213 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4214
4215 /* If necessary, step over this watchpoint. We'll be back to display
4216 it in a moment. */
4217 if (stopped_by_watchpoint
4218 && (target_have_steppable_watchpoint
4219 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4220 {
4221 /* At this point, we are stopped at an instruction which has
4222 attempted to write to a piece of memory under control of
4223 a watchpoint. The instruction hasn't actually executed
4224 yet. If we were to evaluate the watchpoint expression
4225 now, we would get the old value, and therefore no change
4226 would seem to have occurred.
4227
4228 In order to make watchpoints work `right', we really need
4229 to complete the memory write, and then evaluate the
4230 watchpoint expression. We do this by single-stepping the
4231 target.
4232
4233 It may not be necessary to disable the watchpoint to step over
4234 it. For example, the PA can (with some kernel cooperation)
4235 single step over a watchpoint without disabling the watchpoint.
4236
4237 It is far more common to need to disable a watchpoint to step
4238 the inferior over it. If we have non-steppable watchpoints,
4239 we must disable the current watchpoint; it's simplest to
4240 disable all watchpoints.
4241
4242 Any breakpoint at PC must also be stepped over -- if there's
4243 one, it will have already triggered before the watchpoint
4244 triggered, and we either already reported it to the user, or
4245 it didn't cause a stop and we called keep_going. In either
4246 case, if there was a breakpoint at PC, we must be trying to
4247 step past it. */
4248 ecs->event_thread->stepping_over_watchpoint = 1;
4249 keep_going (ecs);
4250 return;
4251 }
4252
4253 ecs->event_thread->stepping_over_breakpoint = 0;
4254 ecs->event_thread->stepping_over_watchpoint = 0;
4255 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4256 ecs->event_thread->control.stop_step = 0;
4257 stop_print_frame = 1;
4258 stopped_by_random_signal = 0;
4259
4260 /* Hide inlined functions starting here, unless we just performed stepi or
4261 nexti. After stepi and nexti, always show the innermost frame (not any
4262 inline function call sites). */
4263 if (ecs->event_thread->control.step_range_end != 1)
4264 {
4265 struct address_space *aspace =
4266 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4267
4268 /* skip_inline_frames is expensive, so we avoid it if we can
4269 determine that the address is one where functions cannot have
4270 been inlined. This improves performance with inferiors that
4271 load a lot of shared libraries, because the solib event
4272 breakpoint is defined as the address of a function (i.e. not
4273 inline). Note that we have to check the previous PC as well
4274 as the current one to catch cases when we have just
4275 single-stepped off a breakpoint prior to reinstating it.
4276 Note that we're assuming that the code we single-step to is
4277 not inline, but that's not definitive: there's nothing
4278 preventing the event breakpoint function from containing
4279 inlined code, and the single-step ending up there. If the
4280 user had set a breakpoint on that inlined code, the missing
4281 skip_inline_frames call would break things. Fortunately
4282 that's an extremely unlikely scenario. */
4283 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4284 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4285 && ecs->event_thread->control.trap_expected
4286 && pc_at_non_inline_function (aspace,
4287 ecs->event_thread->prev_pc,
4288 &ecs->ws)))
4289 {
4290 skip_inline_frames (ecs->ptid);
4291
4292 /* Re-fetch current thread's frame in case that invalidated
4293 the frame cache. */
4294 frame = get_current_frame ();
4295 gdbarch = get_frame_arch (frame);
4296 }
4297 }
4298
4299 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4300 && ecs->event_thread->control.trap_expected
4301 && gdbarch_single_step_through_delay_p (gdbarch)
4302 && currently_stepping (ecs->event_thread))
4303 {
4304 /* We're trying to step off a breakpoint. Turns out that we're
4305 also on an instruction that needs to be stepped multiple
4306 times before it's been fully executing. E.g., architectures
4307 with a delay slot. It needs to be stepped twice, once for
4308 the instruction and once for the delay slot. */
4309 int step_through_delay
4310 = gdbarch_single_step_through_delay (gdbarch, frame);
4311
4312 if (debug_infrun && step_through_delay)
4313 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4314 if (ecs->event_thread->control.step_range_end == 0
4315 && step_through_delay)
4316 {
4317 /* The user issued a continue when stopped at a breakpoint.
4318 Set up for another trap and get out of here. */
4319 ecs->event_thread->stepping_over_breakpoint = 1;
4320 keep_going (ecs);
4321 return;
4322 }
4323 else if (step_through_delay)
4324 {
4325 /* The user issued a step when stopped at a breakpoint.
4326 Maybe we should stop, maybe we should not - the delay
4327 slot *might* correspond to a line of source. In any
4328 case, don't decide that here, just set
4329 ecs->stepping_over_breakpoint, making sure we
4330 single-step again before breakpoints are re-inserted. */
4331 ecs->event_thread->stepping_over_breakpoint = 1;
4332 }
4333 }
4334
4335 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4336 handles this event. */
4337 ecs->event_thread->control.stop_bpstat
4338 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4339 stop_pc, ecs->ptid, &ecs->ws);
4340
4341 /* Following in case break condition called a
4342 function. */
4343 stop_print_frame = 1;
4344
4345 /* This is where we handle "moribund" watchpoints. Unlike
4346 software breakpoints traps, hardware watchpoint traps are
4347 always distinguishable from random traps. If no high-level
4348 watchpoint is associated with the reported stop data address
4349 anymore, then the bpstat does not explain the signal ---
4350 simply make sure to ignore it if `stopped_by_watchpoint' is
4351 set. */
4352
4353 if (debug_infrun
4354 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4355 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4356 GDB_SIGNAL_TRAP)
4357 && stopped_by_watchpoint)
4358 fprintf_unfiltered (gdb_stdlog,
4359 "infrun: no user watchpoint explains "
4360 "watchpoint SIGTRAP, ignoring\n");
4361
4362 /* NOTE: cagney/2003-03-29: These checks for a random signal
4363 at one stage in the past included checks for an inferior
4364 function call's call dummy's return breakpoint. The original
4365 comment, that went with the test, read:
4366
4367 ``End of a stack dummy. Some systems (e.g. Sony news) give
4368 another signal besides SIGTRAP, so check here as well as
4369 above.''
4370
4371 If someone ever tries to get call dummys on a
4372 non-executable stack to work (where the target would stop
4373 with something like a SIGSEGV), then those tests might need
4374 to be re-instated. Given, however, that the tests were only
4375 enabled when momentary breakpoints were not being used, I
4376 suspect that it won't be the case.
4377
4378 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4379 be necessary for call dummies on a non-executable stack on
4380 SPARC. */
4381
4382 /* See if the breakpoints module can explain the signal. */
4383 random_signal
4384 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4385 ecs->event_thread->suspend.stop_signal);
4386
4387 /* If not, perhaps stepping/nexting can. */
4388 if (random_signal)
4389 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4390 && currently_stepping (ecs->event_thread));
4391
4392 /* Perhaps the thread hit a single-step breakpoint of _another_
4393 thread. Single-step breakpoints are transparent to the
4394 breakpoints module. */
4395 if (random_signal)
4396 random_signal = !ecs->hit_singlestep_breakpoint;
4397
4398 /* No? Perhaps we got a moribund watchpoint. */
4399 if (random_signal)
4400 random_signal = !stopped_by_watchpoint;
4401
4402 /* For the program's own signals, act according to
4403 the signal handling tables. */
4404
4405 if (random_signal)
4406 {
4407 /* Signal not for debugging purposes. */
4408 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4409 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4410
4411 if (debug_infrun)
4412 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4413 gdb_signal_to_symbol_string (stop_signal));
4414
4415 stopped_by_random_signal = 1;
4416
4417 /* Always stop on signals if we're either just gaining control
4418 of the program, or the user explicitly requested this thread
4419 to remain stopped. */
4420 if (stop_soon != NO_STOP_QUIETLY
4421 || ecs->event_thread->stop_requested
4422 || (!inf->detaching
4423 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4424 {
4425 stop_waiting (ecs);
4426 return;
4427 }
4428
4429 /* Notify observers the signal has "handle print" set. Note we
4430 returned early above if stopping; normal_stop handles the
4431 printing in that case. */
4432 if (signal_print[ecs->event_thread->suspend.stop_signal])
4433 {
4434 /* The signal table tells us to print about this signal. */
4435 target_terminal_ours_for_output ();
4436 observer_notify_signal_received (ecs->event_thread->suspend.stop_signal);
4437 target_terminal_inferior ();
4438 }
4439
4440 /* Clear the signal if it should not be passed. */
4441 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4442 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4443
4444 if (ecs->event_thread->prev_pc == stop_pc
4445 && ecs->event_thread->control.trap_expected
4446 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4447 {
4448 /* We were just starting a new sequence, attempting to
4449 single-step off of a breakpoint and expecting a SIGTRAP.
4450 Instead this signal arrives. This signal will take us out
4451 of the stepping range so GDB needs to remember to, when
4452 the signal handler returns, resume stepping off that
4453 breakpoint. */
4454 /* To simplify things, "continue" is forced to use the same
4455 code paths as single-step - set a breakpoint at the
4456 signal return address and then, once hit, step off that
4457 breakpoint. */
4458 if (debug_infrun)
4459 fprintf_unfiltered (gdb_stdlog,
4460 "infrun: signal arrived while stepping over "
4461 "breakpoint\n");
4462
4463 insert_hp_step_resume_breakpoint_at_frame (frame);
4464 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4465 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4466 ecs->event_thread->control.trap_expected = 0;
4467
4468 /* If we were nexting/stepping some other thread, switch to
4469 it, so that we don't continue it, losing control. */
4470 if (!switch_back_to_stepped_thread (ecs))
4471 keep_going (ecs);
4472 return;
4473 }
4474
4475 if (ecs->event_thread->control.step_range_end != 0
4476 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4477 && pc_in_thread_step_range (stop_pc, ecs->event_thread)
4478 && frame_id_eq (get_stack_frame_id (frame),
4479 ecs->event_thread->control.step_stack_frame_id)
4480 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4481 {
4482 /* The inferior is about to take a signal that will take it
4483 out of the single step range. Set a breakpoint at the
4484 current PC (which is presumably where the signal handler
4485 will eventually return) and then allow the inferior to
4486 run free.
4487
4488 Note that this is only needed for a signal delivered
4489 while in the single-step range. Nested signals aren't a
4490 problem as they eventually all return. */
4491 if (debug_infrun)
4492 fprintf_unfiltered (gdb_stdlog,
4493 "infrun: signal may take us out of "
4494 "single-step range\n");
4495
4496 insert_hp_step_resume_breakpoint_at_frame (frame);
4497 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4498 ecs->event_thread->control.trap_expected = 0;
4499 keep_going (ecs);
4500 return;
4501 }
4502
4503 /* Note: step_resume_breakpoint may be non-NULL. This occures
4504 when either there's a nested signal, or when there's a
4505 pending signal enabled just as the signal handler returns
4506 (leaving the inferior at the step-resume-breakpoint without
4507 actually executing it). Either way continue until the
4508 breakpoint is really hit. */
4509
4510 if (!switch_back_to_stepped_thread (ecs))
4511 {
4512 if (debug_infrun)
4513 fprintf_unfiltered (gdb_stdlog,
4514 "infrun: random signal, keep going\n");
4515
4516 keep_going (ecs);
4517 }
4518 return;
4519 }
4520
4521 process_event_stop_test (ecs);
4522 }
4523
4524 /* Come here when we've got some debug event / signal we can explain
4525 (IOW, not a random signal), and test whether it should cause a
4526 stop, or whether we should resume the inferior (transparently).
4527 E.g., could be a breakpoint whose condition evaluates false; we
4528 could be still stepping within the line; etc. */
4529
4530 static void
4531 process_event_stop_test (struct execution_control_state *ecs)
4532 {
4533 struct symtab_and_line stop_pc_sal;
4534 struct frame_info *frame;
4535 struct gdbarch *gdbarch;
4536 CORE_ADDR jmp_buf_pc;
4537 struct bpstat_what what;
4538
4539 /* Handle cases caused by hitting a breakpoint. */
4540
4541 frame = get_current_frame ();
4542 gdbarch = get_frame_arch (frame);
4543
4544 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4545
4546 if (what.call_dummy)
4547 {
4548 stop_stack_dummy = what.call_dummy;
4549 }
4550
4551 /* If we hit an internal event that triggers symbol changes, the
4552 current frame will be invalidated within bpstat_what (e.g., if we
4553 hit an internal solib event). Re-fetch it. */
4554 frame = get_current_frame ();
4555 gdbarch = get_frame_arch (frame);
4556
4557 switch (what.main_action)
4558 {
4559 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4560 /* If we hit the breakpoint at longjmp while stepping, we
4561 install a momentary breakpoint at the target of the
4562 jmp_buf. */
4563
4564 if (debug_infrun)
4565 fprintf_unfiltered (gdb_stdlog,
4566 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4567
4568 ecs->event_thread->stepping_over_breakpoint = 1;
4569
4570 if (what.is_longjmp)
4571 {
4572 struct value *arg_value;
4573
4574 /* If we set the longjmp breakpoint via a SystemTap probe,
4575 then use it to extract the arguments. The destination PC
4576 is the third argument to the probe. */
4577 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4578 if (arg_value)
4579 {
4580 jmp_buf_pc = value_as_address (arg_value);
4581 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
4582 }
4583 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4584 || !gdbarch_get_longjmp_target (gdbarch,
4585 frame, &jmp_buf_pc))
4586 {
4587 if (debug_infrun)
4588 fprintf_unfiltered (gdb_stdlog,
4589 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4590 "(!gdbarch_get_longjmp_target)\n");
4591 keep_going (ecs);
4592 return;
4593 }
4594
4595 /* Insert a breakpoint at resume address. */
4596 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4597 }
4598 else
4599 check_exception_resume (ecs, frame);
4600 keep_going (ecs);
4601 return;
4602
4603 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4604 {
4605 struct frame_info *init_frame;
4606
4607 /* There are several cases to consider.
4608
4609 1. The initiating frame no longer exists. In this case we
4610 must stop, because the exception or longjmp has gone too
4611 far.
4612
4613 2. The initiating frame exists, and is the same as the
4614 current frame. We stop, because the exception or longjmp
4615 has been caught.
4616
4617 3. The initiating frame exists and is different from the
4618 current frame. This means the exception or longjmp has
4619 been caught beneath the initiating frame, so keep going.
4620
4621 4. longjmp breakpoint has been placed just to protect
4622 against stale dummy frames and user is not interested in
4623 stopping around longjmps. */
4624
4625 if (debug_infrun)
4626 fprintf_unfiltered (gdb_stdlog,
4627 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4628
4629 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4630 != NULL);
4631 delete_exception_resume_breakpoint (ecs->event_thread);
4632
4633 if (what.is_longjmp)
4634 {
4635 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
4636
4637 if (!frame_id_p (ecs->event_thread->initiating_frame))
4638 {
4639 /* Case 4. */
4640 keep_going (ecs);
4641 return;
4642 }
4643 }
4644
4645 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4646
4647 if (init_frame)
4648 {
4649 struct frame_id current_id
4650 = get_frame_id (get_current_frame ());
4651 if (frame_id_eq (current_id,
4652 ecs->event_thread->initiating_frame))
4653 {
4654 /* Case 2. Fall through. */
4655 }
4656 else
4657 {
4658 /* Case 3. */
4659 keep_going (ecs);
4660 return;
4661 }
4662 }
4663
4664 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4665 exists. */
4666 delete_step_resume_breakpoint (ecs->event_thread);
4667
4668 end_stepping_range (ecs);
4669 }
4670 return;
4671
4672 case BPSTAT_WHAT_SINGLE:
4673 if (debug_infrun)
4674 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4675 ecs->event_thread->stepping_over_breakpoint = 1;
4676 /* Still need to check other stuff, at least the case where we
4677 are stepping and step out of the right range. */
4678 break;
4679
4680 case BPSTAT_WHAT_STEP_RESUME:
4681 if (debug_infrun)
4682 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4683
4684 delete_step_resume_breakpoint (ecs->event_thread);
4685 if (ecs->event_thread->control.proceed_to_finish
4686 && execution_direction == EXEC_REVERSE)
4687 {
4688 struct thread_info *tp = ecs->event_thread;
4689
4690 /* We are finishing a function in reverse, and just hit the
4691 step-resume breakpoint at the start address of the
4692 function, and we're almost there -- just need to back up
4693 by one more single-step, which should take us back to the
4694 function call. */
4695 tp->control.step_range_start = tp->control.step_range_end = 1;
4696 keep_going (ecs);
4697 return;
4698 }
4699 fill_in_stop_func (gdbarch, ecs);
4700 if (stop_pc == ecs->stop_func_start
4701 && execution_direction == EXEC_REVERSE)
4702 {
4703 /* We are stepping over a function call in reverse, and just
4704 hit the step-resume breakpoint at the start address of
4705 the function. Go back to single-stepping, which should
4706 take us back to the function call. */
4707 ecs->event_thread->stepping_over_breakpoint = 1;
4708 keep_going (ecs);
4709 return;
4710 }
4711 break;
4712
4713 case BPSTAT_WHAT_STOP_NOISY:
4714 if (debug_infrun)
4715 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4716 stop_print_frame = 1;
4717
4718 /* Assume the thread stopped for a breapoint. We'll still check
4719 whether a/the breakpoint is there when the thread is next
4720 resumed. */
4721 ecs->event_thread->stepping_over_breakpoint = 1;
4722
4723 stop_waiting (ecs);
4724 return;
4725
4726 case BPSTAT_WHAT_STOP_SILENT:
4727 if (debug_infrun)
4728 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4729 stop_print_frame = 0;
4730
4731 /* Assume the thread stopped for a breapoint. We'll still check
4732 whether a/the breakpoint is there when the thread is next
4733 resumed. */
4734 ecs->event_thread->stepping_over_breakpoint = 1;
4735 stop_waiting (ecs);
4736 return;
4737
4738 case BPSTAT_WHAT_HP_STEP_RESUME:
4739 if (debug_infrun)
4740 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4741
4742 delete_step_resume_breakpoint (ecs->event_thread);
4743 if (ecs->event_thread->step_after_step_resume_breakpoint)
4744 {
4745 /* Back when the step-resume breakpoint was inserted, we
4746 were trying to single-step off a breakpoint. Go back to
4747 doing that. */
4748 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4749 ecs->event_thread->stepping_over_breakpoint = 1;
4750 keep_going (ecs);
4751 return;
4752 }
4753 break;
4754
4755 case BPSTAT_WHAT_KEEP_CHECKING:
4756 break;
4757 }
4758
4759 /* We come here if we hit a breakpoint but should not stop for it.
4760 Possibly we also were stepping and should stop for that. So fall
4761 through and test for stepping. But, if not stepping, do not
4762 stop. */
4763
4764 /* In all-stop mode, if we're currently stepping but have stopped in
4765 some other thread, we need to switch back to the stepped thread. */
4766 if (switch_back_to_stepped_thread (ecs))
4767 return;
4768
4769 if (ecs->event_thread->control.step_resume_breakpoint)
4770 {
4771 if (debug_infrun)
4772 fprintf_unfiltered (gdb_stdlog,
4773 "infrun: step-resume breakpoint is inserted\n");
4774
4775 /* Having a step-resume breakpoint overrides anything
4776 else having to do with stepping commands until
4777 that breakpoint is reached. */
4778 keep_going (ecs);
4779 return;
4780 }
4781
4782 if (ecs->event_thread->control.step_range_end == 0)
4783 {
4784 if (debug_infrun)
4785 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4786 /* Likewise if we aren't even stepping. */
4787 keep_going (ecs);
4788 return;
4789 }
4790
4791 /* Re-fetch current thread's frame in case the code above caused
4792 the frame cache to be re-initialized, making our FRAME variable
4793 a dangling pointer. */
4794 frame = get_current_frame ();
4795 gdbarch = get_frame_arch (frame);
4796 fill_in_stop_func (gdbarch, ecs);
4797
4798 /* If stepping through a line, keep going if still within it.
4799
4800 Note that step_range_end is the address of the first instruction
4801 beyond the step range, and NOT the address of the last instruction
4802 within it!
4803
4804 Note also that during reverse execution, we may be stepping
4805 through a function epilogue and therefore must detect when
4806 the current-frame changes in the middle of a line. */
4807
4808 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4809 && (execution_direction != EXEC_REVERSE
4810 || frame_id_eq (get_frame_id (frame),
4811 ecs->event_thread->control.step_frame_id)))
4812 {
4813 if (debug_infrun)
4814 fprintf_unfiltered
4815 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4816 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4817 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4818
4819 /* Tentatively re-enable range stepping; `resume' disables it if
4820 necessary (e.g., if we're stepping over a breakpoint or we
4821 have software watchpoints). */
4822 ecs->event_thread->control.may_range_step = 1;
4823
4824 /* When stepping backward, stop at beginning of line range
4825 (unless it's the function entry point, in which case
4826 keep going back to the call point). */
4827 if (stop_pc == ecs->event_thread->control.step_range_start
4828 && stop_pc != ecs->stop_func_start
4829 && execution_direction == EXEC_REVERSE)
4830 end_stepping_range (ecs);
4831 else
4832 keep_going (ecs);
4833
4834 return;
4835 }
4836
4837 /* We stepped out of the stepping range. */
4838
4839 /* If we are stepping at the source level and entered the runtime
4840 loader dynamic symbol resolution code...
4841
4842 EXEC_FORWARD: we keep on single stepping until we exit the run
4843 time loader code and reach the callee's address.
4844
4845 EXEC_REVERSE: we've already executed the callee (backward), and
4846 the runtime loader code is handled just like any other
4847 undebuggable function call. Now we need only keep stepping
4848 backward through the trampoline code, and that's handled further
4849 down, so there is nothing for us to do here. */
4850
4851 if (execution_direction != EXEC_REVERSE
4852 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4853 && in_solib_dynsym_resolve_code (stop_pc))
4854 {
4855 CORE_ADDR pc_after_resolver =
4856 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4857
4858 if (debug_infrun)
4859 fprintf_unfiltered (gdb_stdlog,
4860 "infrun: stepped into dynsym resolve code\n");
4861
4862 if (pc_after_resolver)
4863 {
4864 /* Set up a step-resume breakpoint at the address
4865 indicated by SKIP_SOLIB_RESOLVER. */
4866 struct symtab_and_line sr_sal;
4867
4868 init_sal (&sr_sal);
4869 sr_sal.pc = pc_after_resolver;
4870 sr_sal.pspace = get_frame_program_space (frame);
4871
4872 insert_step_resume_breakpoint_at_sal (gdbarch,
4873 sr_sal, null_frame_id);
4874 }
4875
4876 keep_going (ecs);
4877 return;
4878 }
4879
4880 if (ecs->event_thread->control.step_range_end != 1
4881 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4882 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4883 && get_frame_type (frame) == SIGTRAMP_FRAME)
4884 {
4885 if (debug_infrun)
4886 fprintf_unfiltered (gdb_stdlog,
4887 "infrun: stepped into signal trampoline\n");
4888 /* The inferior, while doing a "step" or "next", has ended up in
4889 a signal trampoline (either by a signal being delivered or by
4890 the signal handler returning). Just single-step until the
4891 inferior leaves the trampoline (either by calling the handler
4892 or returning). */
4893 keep_going (ecs);
4894 return;
4895 }
4896
4897 /* If we're in the return path from a shared library trampoline,
4898 we want to proceed through the trampoline when stepping. */
4899 /* macro/2012-04-25: This needs to come before the subroutine
4900 call check below as on some targets return trampolines look
4901 like subroutine calls (MIPS16 return thunks). */
4902 if (gdbarch_in_solib_return_trampoline (gdbarch,
4903 stop_pc, ecs->stop_func_name)
4904 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4905 {
4906 /* Determine where this trampoline returns. */
4907 CORE_ADDR real_stop_pc;
4908
4909 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4910
4911 if (debug_infrun)
4912 fprintf_unfiltered (gdb_stdlog,
4913 "infrun: stepped into solib return tramp\n");
4914
4915 /* Only proceed through if we know where it's going. */
4916 if (real_stop_pc)
4917 {
4918 /* And put the step-breakpoint there and go until there. */
4919 struct symtab_and_line sr_sal;
4920
4921 init_sal (&sr_sal); /* initialize to zeroes */
4922 sr_sal.pc = real_stop_pc;
4923 sr_sal.section = find_pc_overlay (sr_sal.pc);
4924 sr_sal.pspace = get_frame_program_space (frame);
4925
4926 /* Do not specify what the fp should be when we stop since
4927 on some machines the prologue is where the new fp value
4928 is established. */
4929 insert_step_resume_breakpoint_at_sal (gdbarch,
4930 sr_sal, null_frame_id);
4931
4932 /* Restart without fiddling with the step ranges or
4933 other state. */
4934 keep_going (ecs);
4935 return;
4936 }
4937 }
4938
4939 /* Check for subroutine calls. The check for the current frame
4940 equalling the step ID is not necessary - the check of the
4941 previous frame's ID is sufficient - but it is a common case and
4942 cheaper than checking the previous frame's ID.
4943
4944 NOTE: frame_id_eq will never report two invalid frame IDs as
4945 being equal, so to get into this block, both the current and
4946 previous frame must have valid frame IDs. */
4947 /* The outer_frame_id check is a heuristic to detect stepping
4948 through startup code. If we step over an instruction which
4949 sets the stack pointer from an invalid value to a valid value,
4950 we may detect that as a subroutine call from the mythical
4951 "outermost" function. This could be fixed by marking
4952 outermost frames as !stack_p,code_p,special_p. Then the
4953 initial outermost frame, before sp was valid, would
4954 have code_addr == &_start. See the comment in frame_id_eq
4955 for more. */
4956 if (!frame_id_eq (get_stack_frame_id (frame),
4957 ecs->event_thread->control.step_stack_frame_id)
4958 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4959 ecs->event_thread->control.step_stack_frame_id)
4960 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4961 outer_frame_id)
4962 || step_start_function != find_pc_function (stop_pc))))
4963 {
4964 CORE_ADDR real_stop_pc;
4965
4966 if (debug_infrun)
4967 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4968
4969 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4970 || ((ecs->event_thread->control.step_range_end == 1)
4971 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4972 ecs->stop_func_start)))
4973 {
4974 /* I presume that step_over_calls is only 0 when we're
4975 supposed to be stepping at the assembly language level
4976 ("stepi"). Just stop. */
4977 /* Also, maybe we just did a "nexti" inside a prolog, so we
4978 thought it was a subroutine call but it was not. Stop as
4979 well. FENN */
4980 /* And this works the same backward as frontward. MVS */
4981 end_stepping_range (ecs);
4982 return;
4983 }
4984
4985 /* Reverse stepping through solib trampolines. */
4986
4987 if (execution_direction == EXEC_REVERSE
4988 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4989 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4990 || (ecs->stop_func_start == 0
4991 && in_solib_dynsym_resolve_code (stop_pc))))
4992 {
4993 /* Any solib trampoline code can be handled in reverse
4994 by simply continuing to single-step. We have already
4995 executed the solib function (backwards), and a few
4996 steps will take us back through the trampoline to the
4997 caller. */
4998 keep_going (ecs);
4999 return;
5000 }
5001
5002 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5003 {
5004 /* We're doing a "next".
5005
5006 Normal (forward) execution: set a breakpoint at the
5007 callee's return address (the address at which the caller
5008 will resume).
5009
5010 Reverse (backward) execution. set the step-resume
5011 breakpoint at the start of the function that we just
5012 stepped into (backwards), and continue to there. When we
5013 get there, we'll need to single-step back to the caller. */
5014
5015 if (execution_direction == EXEC_REVERSE)
5016 {
5017 /* If we're already at the start of the function, we've either
5018 just stepped backward into a single instruction function,
5019 or stepped back out of a signal handler to the first instruction
5020 of the function. Just keep going, which will single-step back
5021 to the caller. */
5022 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
5023 {
5024 struct symtab_and_line sr_sal;
5025
5026 /* Normal function call return (static or dynamic). */
5027 init_sal (&sr_sal);
5028 sr_sal.pc = ecs->stop_func_start;
5029 sr_sal.pspace = get_frame_program_space (frame);
5030 insert_step_resume_breakpoint_at_sal (gdbarch,
5031 sr_sal, null_frame_id);
5032 }
5033 }
5034 else
5035 insert_step_resume_breakpoint_at_caller (frame);
5036
5037 keep_going (ecs);
5038 return;
5039 }
5040
5041 /* If we are in a function call trampoline (a stub between the
5042 calling routine and the real function), locate the real
5043 function. That's what tells us (a) whether we want to step
5044 into it at all, and (b) what prologue we want to run to the
5045 end of, if we do step into it. */
5046 real_stop_pc = skip_language_trampoline (frame, stop_pc);
5047 if (real_stop_pc == 0)
5048 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5049 if (real_stop_pc != 0)
5050 ecs->stop_func_start = real_stop_pc;
5051
5052 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
5053 {
5054 struct symtab_and_line sr_sal;
5055
5056 init_sal (&sr_sal);
5057 sr_sal.pc = ecs->stop_func_start;
5058 sr_sal.pspace = get_frame_program_space (frame);
5059
5060 insert_step_resume_breakpoint_at_sal (gdbarch,
5061 sr_sal, null_frame_id);
5062 keep_going (ecs);
5063 return;
5064 }
5065
5066 /* If we have line number information for the function we are
5067 thinking of stepping into and the function isn't on the skip
5068 list, step into it.
5069
5070 If there are several symtabs at that PC (e.g. with include
5071 files), just want to know whether *any* of them have line
5072 numbers. find_pc_line handles this. */
5073 {
5074 struct symtab_and_line tmp_sal;
5075
5076 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
5077 if (tmp_sal.line != 0
5078 && !function_name_is_marked_for_skip (ecs->stop_func_name,
5079 &tmp_sal))
5080 {
5081 if (execution_direction == EXEC_REVERSE)
5082 handle_step_into_function_backward (gdbarch, ecs);
5083 else
5084 handle_step_into_function (gdbarch, ecs);
5085 return;
5086 }
5087 }
5088
5089 /* If we have no line number and the step-stop-if-no-debug is
5090 set, we stop the step so that the user has a chance to switch
5091 in assembly mode. */
5092 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5093 && step_stop_if_no_debug)
5094 {
5095 end_stepping_range (ecs);
5096 return;
5097 }
5098
5099 if (execution_direction == EXEC_REVERSE)
5100 {
5101 /* If we're already at the start of the function, we've either just
5102 stepped backward into a single instruction function without line
5103 number info, or stepped back out of a signal handler to the first
5104 instruction of the function without line number info. Just keep
5105 going, which will single-step back to the caller. */
5106 if (ecs->stop_func_start != stop_pc)
5107 {
5108 /* Set a breakpoint at callee's start address.
5109 From there we can step once and be back in the caller. */
5110 struct symtab_and_line sr_sal;
5111
5112 init_sal (&sr_sal);
5113 sr_sal.pc = ecs->stop_func_start;
5114 sr_sal.pspace = get_frame_program_space (frame);
5115 insert_step_resume_breakpoint_at_sal (gdbarch,
5116 sr_sal, null_frame_id);
5117 }
5118 }
5119 else
5120 /* Set a breakpoint at callee's return address (the address
5121 at which the caller will resume). */
5122 insert_step_resume_breakpoint_at_caller (frame);
5123
5124 keep_going (ecs);
5125 return;
5126 }
5127
5128 /* Reverse stepping through solib trampolines. */
5129
5130 if (execution_direction == EXEC_REVERSE
5131 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5132 {
5133 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5134 || (ecs->stop_func_start == 0
5135 && in_solib_dynsym_resolve_code (stop_pc)))
5136 {
5137 /* Any solib trampoline code can be handled in reverse
5138 by simply continuing to single-step. We have already
5139 executed the solib function (backwards), and a few
5140 steps will take us back through the trampoline to the
5141 caller. */
5142 keep_going (ecs);
5143 return;
5144 }
5145 else if (in_solib_dynsym_resolve_code (stop_pc))
5146 {
5147 /* Stepped backward into the solib dynsym resolver.
5148 Set a breakpoint at its start and continue, then
5149 one more step will take us out. */
5150 struct symtab_and_line sr_sal;
5151
5152 init_sal (&sr_sal);
5153 sr_sal.pc = ecs->stop_func_start;
5154 sr_sal.pspace = get_frame_program_space (frame);
5155 insert_step_resume_breakpoint_at_sal (gdbarch,
5156 sr_sal, null_frame_id);
5157 keep_going (ecs);
5158 return;
5159 }
5160 }
5161
5162 stop_pc_sal = find_pc_line (stop_pc, 0);
5163
5164 /* NOTE: tausq/2004-05-24: This if block used to be done before all
5165 the trampoline processing logic, however, there are some trampolines
5166 that have no names, so we should do trampoline handling first. */
5167 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5168 && ecs->stop_func_name == NULL
5169 && stop_pc_sal.line == 0)
5170 {
5171 if (debug_infrun)
5172 fprintf_unfiltered (gdb_stdlog,
5173 "infrun: stepped into undebuggable function\n");
5174
5175 /* The inferior just stepped into, or returned to, an
5176 undebuggable function (where there is no debugging information
5177 and no line number corresponding to the address where the
5178 inferior stopped). Since we want to skip this kind of code,
5179 we keep going until the inferior returns from this
5180 function - unless the user has asked us not to (via
5181 set step-mode) or we no longer know how to get back
5182 to the call site. */
5183 if (step_stop_if_no_debug
5184 || !frame_id_p (frame_unwind_caller_id (frame)))
5185 {
5186 /* If we have no line number and the step-stop-if-no-debug
5187 is set, we stop the step so that the user has a chance to
5188 switch in assembly mode. */
5189 end_stepping_range (ecs);
5190 return;
5191 }
5192 else
5193 {
5194 /* Set a breakpoint at callee's return address (the address
5195 at which the caller will resume). */
5196 insert_step_resume_breakpoint_at_caller (frame);
5197 keep_going (ecs);
5198 return;
5199 }
5200 }
5201
5202 if (ecs->event_thread->control.step_range_end == 1)
5203 {
5204 /* It is stepi or nexti. We always want to stop stepping after
5205 one instruction. */
5206 if (debug_infrun)
5207 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5208 end_stepping_range (ecs);
5209 return;
5210 }
5211
5212 if (stop_pc_sal.line == 0)
5213 {
5214 /* We have no line number information. That means to stop
5215 stepping (does this always happen right after one instruction,
5216 when we do "s" in a function with no line numbers,
5217 or can this happen as a result of a return or longjmp?). */
5218 if (debug_infrun)
5219 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5220 end_stepping_range (ecs);
5221 return;
5222 }
5223
5224 /* Look for "calls" to inlined functions, part one. If the inline
5225 frame machinery detected some skipped call sites, we have entered
5226 a new inline function. */
5227
5228 if (frame_id_eq (get_frame_id (get_current_frame ()),
5229 ecs->event_thread->control.step_frame_id)
5230 && inline_skipped_frames (ecs->ptid))
5231 {
5232 struct symtab_and_line call_sal;
5233
5234 if (debug_infrun)
5235 fprintf_unfiltered (gdb_stdlog,
5236 "infrun: stepped into inlined function\n");
5237
5238 find_frame_sal (get_current_frame (), &call_sal);
5239
5240 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5241 {
5242 /* For "step", we're going to stop. But if the call site
5243 for this inlined function is on the same source line as
5244 we were previously stepping, go down into the function
5245 first. Otherwise stop at the call site. */
5246
5247 if (call_sal.line == ecs->event_thread->current_line
5248 && call_sal.symtab == ecs->event_thread->current_symtab)
5249 step_into_inline_frame (ecs->ptid);
5250
5251 end_stepping_range (ecs);
5252 return;
5253 }
5254 else
5255 {
5256 /* For "next", we should stop at the call site if it is on a
5257 different source line. Otherwise continue through the
5258 inlined function. */
5259 if (call_sal.line == ecs->event_thread->current_line
5260 && call_sal.symtab == ecs->event_thread->current_symtab)
5261 keep_going (ecs);
5262 else
5263 end_stepping_range (ecs);
5264 return;
5265 }
5266 }
5267
5268 /* Look for "calls" to inlined functions, part two. If we are still
5269 in the same real function we were stepping through, but we have
5270 to go further up to find the exact frame ID, we are stepping
5271 through a more inlined call beyond its call site. */
5272
5273 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5274 && !frame_id_eq (get_frame_id (get_current_frame ()),
5275 ecs->event_thread->control.step_frame_id)
5276 && stepped_in_from (get_current_frame (),
5277 ecs->event_thread->control.step_frame_id))
5278 {
5279 if (debug_infrun)
5280 fprintf_unfiltered (gdb_stdlog,
5281 "infrun: stepping through inlined function\n");
5282
5283 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5284 keep_going (ecs);
5285 else
5286 end_stepping_range (ecs);
5287 return;
5288 }
5289
5290 if ((stop_pc == stop_pc_sal.pc)
5291 && (ecs->event_thread->current_line != stop_pc_sal.line
5292 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5293 {
5294 /* We are at the start of a different line. So stop. Note that
5295 we don't stop if we step into the middle of a different line.
5296 That is said to make things like for (;;) statements work
5297 better. */
5298 if (debug_infrun)
5299 fprintf_unfiltered (gdb_stdlog,
5300 "infrun: stepped to a different line\n");
5301 end_stepping_range (ecs);
5302 return;
5303 }
5304
5305 /* We aren't done stepping.
5306
5307 Optimize by setting the stepping range to the line.
5308 (We might not be in the original line, but if we entered a
5309 new line in mid-statement, we continue stepping. This makes
5310 things like for(;;) statements work better.) */
5311
5312 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5313 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5314 ecs->event_thread->control.may_range_step = 1;
5315 set_step_info (frame, stop_pc_sal);
5316
5317 if (debug_infrun)
5318 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5319 keep_going (ecs);
5320 }
5321
5322 /* In all-stop mode, if we're currently stepping but have stopped in
5323 some other thread, we may need to switch back to the stepped
5324 thread. Returns true we set the inferior running, false if we left
5325 it stopped (and the event needs further processing). */
5326
5327 static int
5328 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5329 {
5330 if (!non_stop)
5331 {
5332 struct thread_info *tp;
5333 struct thread_info *stepping_thread;
5334 struct thread_info *step_over;
5335
5336 /* If any thread is blocked on some internal breakpoint, and we
5337 simply need to step over that breakpoint to get it going
5338 again, do that first. */
5339
5340 /* However, if we see an event for the stepping thread, then we
5341 know all other threads have been moved past their breakpoints
5342 already. Let the caller check whether the step is finished,
5343 etc., before deciding to move it past a breakpoint. */
5344 if (ecs->event_thread->control.step_range_end != 0)
5345 return 0;
5346
5347 /* Check if the current thread is blocked on an incomplete
5348 step-over, interrupted by a random signal. */
5349 if (ecs->event_thread->control.trap_expected
5350 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5351 {
5352 if (debug_infrun)
5353 {
5354 fprintf_unfiltered (gdb_stdlog,
5355 "infrun: need to finish step-over of [%s]\n",
5356 target_pid_to_str (ecs->event_thread->ptid));
5357 }
5358 keep_going (ecs);
5359 return 1;
5360 }
5361
5362 /* Check if the current thread is blocked by a single-step
5363 breakpoint of another thread. */
5364 if (ecs->hit_singlestep_breakpoint)
5365 {
5366 if (debug_infrun)
5367 {
5368 fprintf_unfiltered (gdb_stdlog,
5369 "infrun: need to step [%s] over single-step "
5370 "breakpoint\n",
5371 target_pid_to_str (ecs->ptid));
5372 }
5373 keep_going (ecs);
5374 return 1;
5375 }
5376
5377 /* Otherwise, we no longer expect a trap in the current thread.
5378 Clear the trap_expected flag before switching back -- this is
5379 what keep_going does as well, if we call it. */
5380 ecs->event_thread->control.trap_expected = 0;
5381
5382 /* Likewise, clear the signal if it should not be passed. */
5383 if (!signal_program[ecs->event_thread->suspend.stop_signal])
5384 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5385
5386 /* If scheduler locking applies even if not stepping, there's no
5387 need to walk over threads. Above we've checked whether the
5388 current thread is stepping. If some other thread not the
5389 event thread is stepping, then it must be that scheduler
5390 locking is not in effect. */
5391 if (schedlock_applies (0))
5392 return 0;
5393
5394 /* Look for the stepping/nexting thread, and check if any other
5395 thread other than the stepping thread needs to start a
5396 step-over. Do all step-overs before actually proceeding with
5397 step/next/etc. */
5398 stepping_thread = NULL;
5399 step_over = NULL;
5400 ALL_NON_EXITED_THREADS (tp)
5401 {
5402 /* Ignore threads of processes we're not resuming. */
5403 if (!sched_multi
5404 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
5405 continue;
5406
5407 /* When stepping over a breakpoint, we lock all threads
5408 except the one that needs to move past the breakpoint.
5409 If a non-event thread has this set, the "incomplete
5410 step-over" check above should have caught it earlier. */
5411 gdb_assert (!tp->control.trap_expected);
5412
5413 /* Did we find the stepping thread? */
5414 if (tp->control.step_range_end)
5415 {
5416 /* Yep. There should only one though. */
5417 gdb_assert (stepping_thread == NULL);
5418
5419 /* The event thread is handled at the top, before we
5420 enter this loop. */
5421 gdb_assert (tp != ecs->event_thread);
5422
5423 /* If some thread other than the event thread is
5424 stepping, then scheduler locking can't be in effect,
5425 otherwise we wouldn't have resumed the current event
5426 thread in the first place. */
5427 gdb_assert (!schedlock_applies (1));
5428
5429 stepping_thread = tp;
5430 }
5431 else if (thread_still_needs_step_over (tp))
5432 {
5433 step_over = tp;
5434
5435 /* At the top we've returned early if the event thread
5436 is stepping. If some other thread not the event
5437 thread is stepping, then scheduler locking can't be
5438 in effect, and we can resume this thread. No need to
5439 keep looking for the stepping thread then. */
5440 break;
5441 }
5442 }
5443
5444 if (step_over != NULL)
5445 {
5446 tp = step_over;
5447 if (debug_infrun)
5448 {
5449 fprintf_unfiltered (gdb_stdlog,
5450 "infrun: need to step-over [%s]\n",
5451 target_pid_to_str (tp->ptid));
5452 }
5453
5454 /* Only the stepping thread should have this set. */
5455 gdb_assert (tp->control.step_range_end == 0);
5456
5457 ecs->ptid = tp->ptid;
5458 ecs->event_thread = tp;
5459 switch_to_thread (ecs->ptid);
5460 keep_going (ecs);
5461 return 1;
5462 }
5463
5464 if (stepping_thread != NULL)
5465 {
5466 struct frame_info *frame;
5467 struct gdbarch *gdbarch;
5468
5469 tp = stepping_thread;
5470
5471 /* If the stepping thread exited, then don't try to switch
5472 back and resume it, which could fail in several different
5473 ways depending on the target. Instead, just keep going.
5474
5475 We can find a stepping dead thread in the thread list in
5476 two cases:
5477
5478 - The target supports thread exit events, and when the
5479 target tries to delete the thread from the thread list,
5480 inferior_ptid pointed at the exiting thread. In such
5481 case, calling delete_thread does not really remove the
5482 thread from the list; instead, the thread is left listed,
5483 with 'exited' state.
5484
5485 - The target's debug interface does not support thread
5486 exit events, and so we have no idea whatsoever if the
5487 previously stepping thread is still alive. For that
5488 reason, we need to synchronously query the target
5489 now. */
5490 if (is_exited (tp->ptid)
5491 || !target_thread_alive (tp->ptid))
5492 {
5493 if (debug_infrun)
5494 fprintf_unfiltered (gdb_stdlog,
5495 "infrun: not switching back to "
5496 "stepped thread, it has vanished\n");
5497
5498 delete_thread (tp->ptid);
5499 keep_going (ecs);
5500 return 1;
5501 }
5502
5503 if (debug_infrun)
5504 fprintf_unfiltered (gdb_stdlog,
5505 "infrun: switching back to stepped thread\n");
5506
5507 ecs->event_thread = tp;
5508 ecs->ptid = tp->ptid;
5509 context_switch (ecs->ptid);
5510
5511 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5512 frame = get_current_frame ();
5513 gdbarch = get_frame_arch (frame);
5514
5515 /* If the PC of the thread we were trying to single-step has
5516 changed, then that thread has trapped or been signaled,
5517 but the event has not been reported to GDB yet. Re-poll
5518 the target looking for this particular thread's event
5519 (i.e. temporarily enable schedlock) by:
5520
5521 - setting a break at the current PC
5522 - resuming that particular thread, only (by setting
5523 trap expected)
5524
5525 This prevents us continuously moving the single-step
5526 breakpoint forward, one instruction at a time,
5527 overstepping. */
5528
5529 if (gdbarch_software_single_step_p (gdbarch)
5530 && stop_pc != tp->prev_pc)
5531 {
5532 if (debug_infrun)
5533 fprintf_unfiltered (gdb_stdlog,
5534 "infrun: expected thread advanced also\n");
5535
5536 /* Clear the info of the previous step-over, as it's no
5537 longer valid. It's what keep_going would do too, if
5538 we called it. Must do this before trying to insert
5539 the sss breakpoint, otherwise if we were previously
5540 trying to step over this exact address in another
5541 thread, the breakpoint ends up not installed. */
5542 clear_step_over_info ();
5543
5544 insert_single_step_breakpoint (get_frame_arch (frame),
5545 get_frame_address_space (frame),
5546 stop_pc);
5547 singlestep_breakpoints_inserted_p = 1;
5548 ecs->event_thread->control.trap_expected = 1;
5549 singlestep_ptid = inferior_ptid;
5550 singlestep_pc = stop_pc;
5551
5552 resume (0, GDB_SIGNAL_0);
5553 prepare_to_wait (ecs);
5554 }
5555 else
5556 {
5557 if (debug_infrun)
5558 fprintf_unfiltered (gdb_stdlog,
5559 "infrun: expected thread still "
5560 "hasn't advanced\n");
5561 keep_going (ecs);
5562 }
5563
5564 return 1;
5565 }
5566 }
5567 return 0;
5568 }
5569
5570 /* Is thread TP in the middle of single-stepping? */
5571
5572 static int
5573 currently_stepping (struct thread_info *tp)
5574 {
5575 return ((tp->control.step_range_end
5576 && tp->control.step_resume_breakpoint == NULL)
5577 || tp->control.trap_expected
5578 || bpstat_should_step ());
5579 }
5580
5581 /* Inferior has stepped into a subroutine call with source code that
5582 we should not step over. Do step to the first line of code in
5583 it. */
5584
5585 static void
5586 handle_step_into_function (struct gdbarch *gdbarch,
5587 struct execution_control_state *ecs)
5588 {
5589 struct symtab *s;
5590 struct symtab_and_line stop_func_sal, sr_sal;
5591
5592 fill_in_stop_func (gdbarch, ecs);
5593
5594 s = find_pc_symtab (stop_pc);
5595 if (s && s->language != language_asm)
5596 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5597 ecs->stop_func_start);
5598
5599 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5600 /* Use the step_resume_break to step until the end of the prologue,
5601 even if that involves jumps (as it seems to on the vax under
5602 4.2). */
5603 /* If the prologue ends in the middle of a source line, continue to
5604 the end of that source line (if it is still within the function).
5605 Otherwise, just go to end of prologue. */
5606 if (stop_func_sal.end
5607 && stop_func_sal.pc != ecs->stop_func_start
5608 && stop_func_sal.end < ecs->stop_func_end)
5609 ecs->stop_func_start = stop_func_sal.end;
5610
5611 /* Architectures which require breakpoint adjustment might not be able
5612 to place a breakpoint at the computed address. If so, the test
5613 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5614 ecs->stop_func_start to an address at which a breakpoint may be
5615 legitimately placed.
5616
5617 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5618 made, GDB will enter an infinite loop when stepping through
5619 optimized code consisting of VLIW instructions which contain
5620 subinstructions corresponding to different source lines. On
5621 FR-V, it's not permitted to place a breakpoint on any but the
5622 first subinstruction of a VLIW instruction. When a breakpoint is
5623 set, GDB will adjust the breakpoint address to the beginning of
5624 the VLIW instruction. Thus, we need to make the corresponding
5625 adjustment here when computing the stop address. */
5626
5627 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5628 {
5629 ecs->stop_func_start
5630 = gdbarch_adjust_breakpoint_address (gdbarch,
5631 ecs->stop_func_start);
5632 }
5633
5634 if (ecs->stop_func_start == stop_pc)
5635 {
5636 /* We are already there: stop now. */
5637 end_stepping_range (ecs);
5638 return;
5639 }
5640 else
5641 {
5642 /* Put the step-breakpoint there and go until there. */
5643 init_sal (&sr_sal); /* initialize to zeroes */
5644 sr_sal.pc = ecs->stop_func_start;
5645 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5646 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5647
5648 /* Do not specify what the fp should be when we stop since on
5649 some machines the prologue is where the new fp value is
5650 established. */
5651 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5652
5653 /* And make sure stepping stops right away then. */
5654 ecs->event_thread->control.step_range_end
5655 = ecs->event_thread->control.step_range_start;
5656 }
5657 keep_going (ecs);
5658 }
5659
5660 /* Inferior has stepped backward into a subroutine call with source
5661 code that we should not step over. Do step to the beginning of the
5662 last line of code in it. */
5663
5664 static void
5665 handle_step_into_function_backward (struct gdbarch *gdbarch,
5666 struct execution_control_state *ecs)
5667 {
5668 struct symtab *s;
5669 struct symtab_and_line stop_func_sal;
5670
5671 fill_in_stop_func (gdbarch, ecs);
5672
5673 s = find_pc_symtab (stop_pc);
5674 if (s && s->language != language_asm)
5675 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5676 ecs->stop_func_start);
5677
5678 stop_func_sal = find_pc_line (stop_pc, 0);
5679
5680 /* OK, we're just going to keep stepping here. */
5681 if (stop_func_sal.pc == stop_pc)
5682 {
5683 /* We're there already. Just stop stepping now. */
5684 end_stepping_range (ecs);
5685 }
5686 else
5687 {
5688 /* Else just reset the step range and keep going.
5689 No step-resume breakpoint, they don't work for
5690 epilogues, which can have multiple entry paths. */
5691 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5692 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5693 keep_going (ecs);
5694 }
5695 return;
5696 }
5697
5698 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5699 This is used to both functions and to skip over code. */
5700
5701 static void
5702 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5703 struct symtab_and_line sr_sal,
5704 struct frame_id sr_id,
5705 enum bptype sr_type)
5706 {
5707 /* There should never be more than one step-resume or longjmp-resume
5708 breakpoint per thread, so we should never be setting a new
5709 step_resume_breakpoint when one is already active. */
5710 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5711 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5712
5713 if (debug_infrun)
5714 fprintf_unfiltered (gdb_stdlog,
5715 "infrun: inserting step-resume breakpoint at %s\n",
5716 paddress (gdbarch, sr_sal.pc));
5717
5718 inferior_thread ()->control.step_resume_breakpoint
5719 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5720 }
5721
5722 void
5723 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5724 struct symtab_and_line sr_sal,
5725 struct frame_id sr_id)
5726 {
5727 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5728 sr_sal, sr_id,
5729 bp_step_resume);
5730 }
5731
5732 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5733 This is used to skip a potential signal handler.
5734
5735 This is called with the interrupted function's frame. The signal
5736 handler, when it returns, will resume the interrupted function at
5737 RETURN_FRAME.pc. */
5738
5739 static void
5740 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5741 {
5742 struct symtab_and_line sr_sal;
5743 struct gdbarch *gdbarch;
5744
5745 gdb_assert (return_frame != NULL);
5746 init_sal (&sr_sal); /* initialize to zeros */
5747
5748 gdbarch = get_frame_arch (return_frame);
5749 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5750 sr_sal.section = find_pc_overlay (sr_sal.pc);
5751 sr_sal.pspace = get_frame_program_space (return_frame);
5752
5753 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5754 get_stack_frame_id (return_frame),
5755 bp_hp_step_resume);
5756 }
5757
5758 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5759 is used to skip a function after stepping into it (for "next" or if
5760 the called function has no debugging information).
5761
5762 The current function has almost always been reached by single
5763 stepping a call or return instruction. NEXT_FRAME belongs to the
5764 current function, and the breakpoint will be set at the caller's
5765 resume address.
5766
5767 This is a separate function rather than reusing
5768 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5769 get_prev_frame, which may stop prematurely (see the implementation
5770 of frame_unwind_caller_id for an example). */
5771
5772 static void
5773 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5774 {
5775 struct symtab_and_line sr_sal;
5776 struct gdbarch *gdbarch;
5777
5778 /* We shouldn't have gotten here if we don't know where the call site
5779 is. */
5780 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5781
5782 init_sal (&sr_sal); /* initialize to zeros */
5783
5784 gdbarch = frame_unwind_caller_arch (next_frame);
5785 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5786 frame_unwind_caller_pc (next_frame));
5787 sr_sal.section = find_pc_overlay (sr_sal.pc);
5788 sr_sal.pspace = frame_unwind_program_space (next_frame);
5789
5790 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5791 frame_unwind_caller_id (next_frame));
5792 }
5793
5794 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5795 new breakpoint at the target of a jmp_buf. The handling of
5796 longjmp-resume uses the same mechanisms used for handling
5797 "step-resume" breakpoints. */
5798
5799 static void
5800 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5801 {
5802 /* There should never be more than one longjmp-resume breakpoint per
5803 thread, so we should never be setting a new
5804 longjmp_resume_breakpoint when one is already active. */
5805 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
5806
5807 if (debug_infrun)
5808 fprintf_unfiltered (gdb_stdlog,
5809 "infrun: inserting longjmp-resume breakpoint at %s\n",
5810 paddress (gdbarch, pc));
5811
5812 inferior_thread ()->control.exception_resume_breakpoint =
5813 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5814 }
5815
5816 /* Insert an exception resume breakpoint. TP is the thread throwing
5817 the exception. The block B is the block of the unwinder debug hook
5818 function. FRAME is the frame corresponding to the call to this
5819 function. SYM is the symbol of the function argument holding the
5820 target PC of the exception. */
5821
5822 static void
5823 insert_exception_resume_breakpoint (struct thread_info *tp,
5824 const struct block *b,
5825 struct frame_info *frame,
5826 struct symbol *sym)
5827 {
5828 volatile struct gdb_exception e;
5829
5830 /* We want to ignore errors here. */
5831 TRY_CATCH (e, RETURN_MASK_ERROR)
5832 {
5833 struct symbol *vsym;
5834 struct value *value;
5835 CORE_ADDR handler;
5836 struct breakpoint *bp;
5837
5838 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5839 value = read_var_value (vsym, frame);
5840 /* If the value was optimized out, revert to the old behavior. */
5841 if (! value_optimized_out (value))
5842 {
5843 handler = value_as_address (value);
5844
5845 if (debug_infrun)
5846 fprintf_unfiltered (gdb_stdlog,
5847 "infrun: exception resume at %lx\n",
5848 (unsigned long) handler);
5849
5850 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5851 handler, bp_exception_resume);
5852
5853 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
5854 frame = NULL;
5855
5856 bp->thread = tp->num;
5857 inferior_thread ()->control.exception_resume_breakpoint = bp;
5858 }
5859 }
5860 }
5861
5862 /* A helper for check_exception_resume that sets an
5863 exception-breakpoint based on a SystemTap probe. */
5864
5865 static void
5866 insert_exception_resume_from_probe (struct thread_info *tp,
5867 const struct bound_probe *probe,
5868 struct frame_info *frame)
5869 {
5870 struct value *arg_value;
5871 CORE_ADDR handler;
5872 struct breakpoint *bp;
5873
5874 arg_value = probe_safe_evaluate_at_pc (frame, 1);
5875 if (!arg_value)
5876 return;
5877
5878 handler = value_as_address (arg_value);
5879
5880 if (debug_infrun)
5881 fprintf_unfiltered (gdb_stdlog,
5882 "infrun: exception resume at %s\n",
5883 paddress (get_objfile_arch (probe->objfile),
5884 handler));
5885
5886 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5887 handler, bp_exception_resume);
5888 bp->thread = tp->num;
5889 inferior_thread ()->control.exception_resume_breakpoint = bp;
5890 }
5891
5892 /* This is called when an exception has been intercepted. Check to
5893 see whether the exception's destination is of interest, and if so,
5894 set an exception resume breakpoint there. */
5895
5896 static void
5897 check_exception_resume (struct execution_control_state *ecs,
5898 struct frame_info *frame)
5899 {
5900 volatile struct gdb_exception e;
5901 struct bound_probe probe;
5902 struct symbol *func;
5903
5904 /* First see if this exception unwinding breakpoint was set via a
5905 SystemTap probe point. If so, the probe has two arguments: the
5906 CFA and the HANDLER. We ignore the CFA, extract the handler, and
5907 set a breakpoint there. */
5908 probe = find_probe_by_pc (get_frame_pc (frame));
5909 if (probe.probe)
5910 {
5911 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
5912 return;
5913 }
5914
5915 func = get_frame_function (frame);
5916 if (!func)
5917 return;
5918
5919 TRY_CATCH (e, RETURN_MASK_ERROR)
5920 {
5921 const struct block *b;
5922 struct block_iterator iter;
5923 struct symbol *sym;
5924 int argno = 0;
5925
5926 /* The exception breakpoint is a thread-specific breakpoint on
5927 the unwinder's debug hook, declared as:
5928
5929 void _Unwind_DebugHook (void *cfa, void *handler);
5930
5931 The CFA argument indicates the frame to which control is
5932 about to be transferred. HANDLER is the destination PC.
5933
5934 We ignore the CFA and set a temporary breakpoint at HANDLER.
5935 This is not extremely efficient but it avoids issues in gdb
5936 with computing the DWARF CFA, and it also works even in weird
5937 cases such as throwing an exception from inside a signal
5938 handler. */
5939
5940 b = SYMBOL_BLOCK_VALUE (func);
5941 ALL_BLOCK_SYMBOLS (b, iter, sym)
5942 {
5943 if (!SYMBOL_IS_ARGUMENT (sym))
5944 continue;
5945
5946 if (argno == 0)
5947 ++argno;
5948 else
5949 {
5950 insert_exception_resume_breakpoint (ecs->event_thread,
5951 b, frame, sym);
5952 break;
5953 }
5954 }
5955 }
5956 }
5957
5958 static void
5959 stop_waiting (struct execution_control_state *ecs)
5960 {
5961 if (debug_infrun)
5962 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
5963
5964 clear_step_over_info ();
5965
5966 /* Let callers know we don't want to wait for the inferior anymore. */
5967 ecs->wait_some_more = 0;
5968 }
5969
5970 /* Called when we should continue running the inferior, because the
5971 current event doesn't cause a user visible stop. This does the
5972 resuming part; waiting for the next event is done elsewhere. */
5973
5974 static void
5975 keep_going (struct execution_control_state *ecs)
5976 {
5977 /* Make sure normal_stop is called if we get a QUIT handled before
5978 reaching resume. */
5979 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5980
5981 /* Save the pc before execution, to compare with pc after stop. */
5982 ecs->event_thread->prev_pc
5983 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5984
5985 if (ecs->event_thread->control.trap_expected
5986 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5987 {
5988 /* We haven't yet gotten our trap, and either: intercepted a
5989 non-signal event (e.g., a fork); or took a signal which we
5990 are supposed to pass through to the inferior. Simply
5991 continue. */
5992 discard_cleanups (old_cleanups);
5993 resume (currently_stepping (ecs->event_thread),
5994 ecs->event_thread->suspend.stop_signal);
5995 }
5996 else
5997 {
5998 volatile struct gdb_exception e;
5999 struct regcache *regcache = get_current_regcache ();
6000 int remove_bp;
6001 int remove_wps;
6002
6003 /* Either the trap was not expected, but we are continuing
6004 anyway (if we got a signal, the user asked it be passed to
6005 the child)
6006 -- or --
6007 We got our expected trap, but decided we should resume from
6008 it.
6009
6010 We're going to run this baby now!
6011
6012 Note that insert_breakpoints won't try to re-insert
6013 already inserted breakpoints. Therefore, we don't
6014 care if breakpoints were already inserted, or not. */
6015
6016 /* If we need to step over a breakpoint, and we're not using
6017 displaced stepping to do so, insert all breakpoints
6018 (watchpoints, etc.) but the one we're stepping over, step one
6019 instruction, and then re-insert the breakpoint when that step
6020 is finished. */
6021
6022 remove_bp = (ecs->hit_singlestep_breakpoint
6023 || thread_still_needs_step_over (ecs->event_thread));
6024 remove_wps = (ecs->event_thread->stepping_over_watchpoint
6025 && !target_have_steppable_watchpoint);
6026
6027 if (remove_bp && !use_displaced_stepping (get_regcache_arch (regcache)))
6028 {
6029 set_step_over_info (get_regcache_aspace (regcache),
6030 regcache_read_pc (regcache), remove_wps);
6031 }
6032 else if (remove_wps)
6033 set_step_over_info (NULL, 0, remove_wps);
6034 else
6035 clear_step_over_info ();
6036
6037 /* Stop stepping if inserting breakpoints fails. */
6038 TRY_CATCH (e, RETURN_MASK_ERROR)
6039 {
6040 insert_breakpoints ();
6041 }
6042 if (e.reason < 0)
6043 {
6044 exception_print (gdb_stderr, e);
6045 stop_waiting (ecs);
6046 return;
6047 }
6048
6049 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
6050
6051 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
6052 explicitly specifies that such a signal should be delivered
6053 to the target program). Typically, that would occur when a
6054 user is debugging a target monitor on a simulator: the target
6055 monitor sets a breakpoint; the simulator encounters this
6056 breakpoint and halts the simulation handing control to GDB;
6057 GDB, noting that the stop address doesn't map to any known
6058 breakpoint, returns control back to the simulator; the
6059 simulator then delivers the hardware equivalent of a
6060 GDB_SIGNAL_TRAP to the program being debugged. */
6061 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6062 && !signal_program[ecs->event_thread->suspend.stop_signal])
6063 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6064
6065 discard_cleanups (old_cleanups);
6066 resume (currently_stepping (ecs->event_thread),
6067 ecs->event_thread->suspend.stop_signal);
6068 }
6069
6070 prepare_to_wait (ecs);
6071 }
6072
6073 /* This function normally comes after a resume, before
6074 handle_inferior_event exits. It takes care of any last bits of
6075 housekeeping, and sets the all-important wait_some_more flag. */
6076
6077 static void
6078 prepare_to_wait (struct execution_control_state *ecs)
6079 {
6080 if (debug_infrun)
6081 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
6082
6083 /* This is the old end of the while loop. Let everybody know we
6084 want to wait for the inferior some more and get called again
6085 soon. */
6086 ecs->wait_some_more = 1;
6087 }
6088
6089 /* We are done with the step range of a step/next/si/ni command.
6090 Called once for each n of a "step n" operation. */
6091
6092 static void
6093 end_stepping_range (struct execution_control_state *ecs)
6094 {
6095 ecs->event_thread->control.stop_step = 1;
6096 stop_waiting (ecs);
6097 }
6098
6099 /* Several print_*_reason functions to print why the inferior has stopped.
6100 We always print something when the inferior exits, or receives a signal.
6101 The rest of the cases are dealt with later on in normal_stop and
6102 print_it_typical. Ideally there should be a call to one of these
6103 print_*_reason functions functions from handle_inferior_event each time
6104 stop_waiting is called.
6105
6106 Note that we don't call these directly, instead we delegate that to
6107 the interpreters, through observers. Interpreters then call these
6108 with whatever uiout is right. */
6109
6110 void
6111 print_end_stepping_range_reason (struct ui_out *uiout)
6112 {
6113 /* For CLI-like interpreters, print nothing. */
6114
6115 if (ui_out_is_mi_like_p (uiout))
6116 {
6117 ui_out_field_string (uiout, "reason",
6118 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
6119 }
6120 }
6121
6122 void
6123 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6124 {
6125 annotate_signalled ();
6126 if (ui_out_is_mi_like_p (uiout))
6127 ui_out_field_string
6128 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
6129 ui_out_text (uiout, "\nProgram terminated with signal ");
6130 annotate_signal_name ();
6131 ui_out_field_string (uiout, "signal-name",
6132 gdb_signal_to_name (siggnal));
6133 annotate_signal_name_end ();
6134 ui_out_text (uiout, ", ");
6135 annotate_signal_string ();
6136 ui_out_field_string (uiout, "signal-meaning",
6137 gdb_signal_to_string (siggnal));
6138 annotate_signal_string_end ();
6139 ui_out_text (uiout, ".\n");
6140 ui_out_text (uiout, "The program no longer exists.\n");
6141 }
6142
6143 void
6144 print_exited_reason (struct ui_out *uiout, int exitstatus)
6145 {
6146 struct inferior *inf = current_inferior ();
6147 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
6148
6149 annotate_exited (exitstatus);
6150 if (exitstatus)
6151 {
6152 if (ui_out_is_mi_like_p (uiout))
6153 ui_out_field_string (uiout, "reason",
6154 async_reason_lookup (EXEC_ASYNC_EXITED));
6155 ui_out_text (uiout, "[Inferior ");
6156 ui_out_text (uiout, plongest (inf->num));
6157 ui_out_text (uiout, " (");
6158 ui_out_text (uiout, pidstr);
6159 ui_out_text (uiout, ") exited with code ");
6160 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
6161 ui_out_text (uiout, "]\n");
6162 }
6163 else
6164 {
6165 if (ui_out_is_mi_like_p (uiout))
6166 ui_out_field_string
6167 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6168 ui_out_text (uiout, "[Inferior ");
6169 ui_out_text (uiout, plongest (inf->num));
6170 ui_out_text (uiout, " (");
6171 ui_out_text (uiout, pidstr);
6172 ui_out_text (uiout, ") exited normally]\n");
6173 }
6174 }
6175
6176 void
6177 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6178 {
6179 annotate_signal ();
6180
6181 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
6182 {
6183 struct thread_info *t = inferior_thread ();
6184
6185 ui_out_text (uiout, "\n[");
6186 ui_out_field_string (uiout, "thread-name",
6187 target_pid_to_str (t->ptid));
6188 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
6189 ui_out_text (uiout, " stopped");
6190 }
6191 else
6192 {
6193 ui_out_text (uiout, "\nProgram received signal ");
6194 annotate_signal_name ();
6195 if (ui_out_is_mi_like_p (uiout))
6196 ui_out_field_string
6197 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
6198 ui_out_field_string (uiout, "signal-name",
6199 gdb_signal_to_name (siggnal));
6200 annotate_signal_name_end ();
6201 ui_out_text (uiout, ", ");
6202 annotate_signal_string ();
6203 ui_out_field_string (uiout, "signal-meaning",
6204 gdb_signal_to_string (siggnal));
6205 annotate_signal_string_end ();
6206 }
6207 ui_out_text (uiout, ".\n");
6208 }
6209
6210 void
6211 print_no_history_reason (struct ui_out *uiout)
6212 {
6213 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
6214 }
6215
6216 /* Print current location without a level number, if we have changed
6217 functions or hit a breakpoint. Print source line if we have one.
6218 bpstat_print contains the logic deciding in detail what to print,
6219 based on the event(s) that just occurred. */
6220
6221 void
6222 print_stop_event (struct target_waitstatus *ws)
6223 {
6224 int bpstat_ret;
6225 int source_flag;
6226 int do_frame_printing = 1;
6227 struct thread_info *tp = inferior_thread ();
6228
6229 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
6230 switch (bpstat_ret)
6231 {
6232 case PRINT_UNKNOWN:
6233 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
6234 should) carry around the function and does (or should) use
6235 that when doing a frame comparison. */
6236 if (tp->control.stop_step
6237 && frame_id_eq (tp->control.step_frame_id,
6238 get_frame_id (get_current_frame ()))
6239 && step_start_function == find_pc_function (stop_pc))
6240 {
6241 /* Finished step, just print source line. */
6242 source_flag = SRC_LINE;
6243 }
6244 else
6245 {
6246 /* Print location and source line. */
6247 source_flag = SRC_AND_LOC;
6248 }
6249 break;
6250 case PRINT_SRC_AND_LOC:
6251 /* Print location and source line. */
6252 source_flag = SRC_AND_LOC;
6253 break;
6254 case PRINT_SRC_ONLY:
6255 source_flag = SRC_LINE;
6256 break;
6257 case PRINT_NOTHING:
6258 /* Something bogus. */
6259 source_flag = SRC_LINE;
6260 do_frame_printing = 0;
6261 break;
6262 default:
6263 internal_error (__FILE__, __LINE__, _("Unknown value."));
6264 }
6265
6266 /* The behavior of this routine with respect to the source
6267 flag is:
6268 SRC_LINE: Print only source line
6269 LOCATION: Print only location
6270 SRC_AND_LOC: Print location and source line. */
6271 if (do_frame_printing)
6272 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6273
6274 /* Display the auto-display expressions. */
6275 do_displays ();
6276 }
6277
6278 /* Here to return control to GDB when the inferior stops for real.
6279 Print appropriate messages, remove breakpoints, give terminal our modes.
6280
6281 STOP_PRINT_FRAME nonzero means print the executing frame
6282 (pc, function, args, file, line number and line text).
6283 BREAKPOINTS_FAILED nonzero means stop was due to error
6284 attempting to insert breakpoints. */
6285
6286 void
6287 normal_stop (void)
6288 {
6289 struct target_waitstatus last;
6290 ptid_t last_ptid;
6291 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6292
6293 get_last_target_status (&last_ptid, &last);
6294
6295 /* If an exception is thrown from this point on, make sure to
6296 propagate GDB's knowledge of the executing state to the
6297 frontend/user running state. A QUIT is an easy exception to see
6298 here, so do this before any filtered output. */
6299 if (!non_stop)
6300 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6301 else if (last.kind != TARGET_WAITKIND_SIGNALLED
6302 && last.kind != TARGET_WAITKIND_EXITED
6303 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6304 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6305
6306 /* As we're presenting a stop, and potentially removing breakpoints,
6307 update the thread list so we can tell whether there are threads
6308 running on the target. With target remote, for example, we can
6309 only learn about new threads when we explicitly update the thread
6310 list. Do this before notifying the interpreters about signal
6311 stops, end of stepping ranges, etc., so that the "new thread"
6312 output is emitted before e.g., "Program received signal FOO",
6313 instead of after. */
6314 update_thread_list ();
6315
6316 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
6317 observer_notify_signal_received (inferior_thread ()->suspend.stop_signal);
6318
6319 /* As with the notification of thread events, we want to delay
6320 notifying the user that we've switched thread context until
6321 the inferior actually stops.
6322
6323 There's no point in saying anything if the inferior has exited.
6324 Note that SIGNALLED here means "exited with a signal", not
6325 "received a signal".
6326
6327 Also skip saying anything in non-stop mode. In that mode, as we
6328 don't want GDB to switch threads behind the user's back, to avoid
6329 races where the user is typing a command to apply to thread x,
6330 but GDB switches to thread y before the user finishes entering
6331 the command, fetch_inferior_event installs a cleanup to restore
6332 the current thread back to the thread the user had selected right
6333 after this event is handled, so we're not really switching, only
6334 informing of a stop. */
6335 if (!non_stop
6336 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6337 && target_has_execution
6338 && last.kind != TARGET_WAITKIND_SIGNALLED
6339 && last.kind != TARGET_WAITKIND_EXITED
6340 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6341 {
6342 target_terminal_ours_for_output ();
6343 printf_filtered (_("[Switching to %s]\n"),
6344 target_pid_to_str (inferior_ptid));
6345 annotate_thread_changed ();
6346 previous_inferior_ptid = inferior_ptid;
6347 }
6348
6349 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6350 {
6351 gdb_assert (sync_execution || !target_can_async_p ());
6352
6353 target_terminal_ours_for_output ();
6354 printf_filtered (_("No unwaited-for children left.\n"));
6355 }
6356
6357 /* Note: this depends on the update_thread_list call above. */
6358 if (!breakpoints_should_be_inserted_now () && target_has_execution)
6359 {
6360 if (remove_breakpoints ())
6361 {
6362 target_terminal_ours_for_output ();
6363 printf_filtered (_("Cannot remove breakpoints because "
6364 "program is no longer writable.\nFurther "
6365 "execution is probably impossible.\n"));
6366 }
6367 }
6368
6369 /* If an auto-display called a function and that got a signal,
6370 delete that auto-display to avoid an infinite recursion. */
6371
6372 if (stopped_by_random_signal)
6373 disable_current_display ();
6374
6375 /* Notify observers if we finished a "step"-like command, etc. */
6376 if (target_has_execution
6377 && last.kind != TARGET_WAITKIND_SIGNALLED
6378 && last.kind != TARGET_WAITKIND_EXITED
6379 && inferior_thread ()->control.stop_step)
6380 {
6381 /* But not if in the middle of doing a "step n" operation for
6382 n > 1 */
6383 if (inferior_thread ()->step_multi)
6384 goto done;
6385
6386 observer_notify_end_stepping_range ();
6387 }
6388
6389 target_terminal_ours ();
6390 async_enable_stdin ();
6391
6392 /* Set the current source location. This will also happen if we
6393 display the frame below, but the current SAL will be incorrect
6394 during a user hook-stop function. */
6395 if (has_stack_frames () && !stop_stack_dummy)
6396 set_current_sal_from_frame (get_current_frame ());
6397
6398 /* Let the user/frontend see the threads as stopped, but do nothing
6399 if the thread was running an infcall. We may be e.g., evaluating
6400 a breakpoint condition. In that case, the thread had state
6401 THREAD_RUNNING before the infcall, and shall remain set to
6402 running, all without informing the user/frontend about state
6403 transition changes. If this is actually a call command, then the
6404 thread was originally already stopped, so there's no state to
6405 finish either. */
6406 if (target_has_execution && inferior_thread ()->control.in_infcall)
6407 discard_cleanups (old_chain);
6408 else
6409 do_cleanups (old_chain);
6410
6411 /* Look up the hook_stop and run it (CLI internally handles problem
6412 of stop_command's pre-hook not existing). */
6413 if (stop_command)
6414 catch_errors (hook_stop_stub, stop_command,
6415 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6416
6417 if (!has_stack_frames ())
6418 goto done;
6419
6420 if (last.kind == TARGET_WAITKIND_SIGNALLED
6421 || last.kind == TARGET_WAITKIND_EXITED)
6422 goto done;
6423
6424 /* Select innermost stack frame - i.e., current frame is frame 0,
6425 and current location is based on that.
6426 Don't do this on return from a stack dummy routine,
6427 or if the program has exited. */
6428
6429 if (!stop_stack_dummy)
6430 {
6431 select_frame (get_current_frame ());
6432
6433 /* If --batch-silent is enabled then there's no need to print the current
6434 source location, and to try risks causing an error message about
6435 missing source files. */
6436 if (stop_print_frame && !batch_silent)
6437 print_stop_event (&last);
6438 }
6439
6440 /* Save the function value return registers, if we care.
6441 We might be about to restore their previous contents. */
6442 if (inferior_thread ()->control.proceed_to_finish
6443 && execution_direction != EXEC_REVERSE)
6444 {
6445 /* This should not be necessary. */
6446 if (stop_registers)
6447 regcache_xfree (stop_registers);
6448
6449 /* NB: The copy goes through to the target picking up the value of
6450 all the registers. */
6451 stop_registers = regcache_dup (get_current_regcache ());
6452 }
6453
6454 if (stop_stack_dummy == STOP_STACK_DUMMY)
6455 {
6456 /* Pop the empty frame that contains the stack dummy.
6457 This also restores inferior state prior to the call
6458 (struct infcall_suspend_state). */
6459 struct frame_info *frame = get_current_frame ();
6460
6461 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6462 frame_pop (frame);
6463 /* frame_pop() calls reinit_frame_cache as the last thing it
6464 does which means there's currently no selected frame. We
6465 don't need to re-establish a selected frame if the dummy call
6466 returns normally, that will be done by
6467 restore_infcall_control_state. However, we do have to handle
6468 the case where the dummy call is returning after being
6469 stopped (e.g. the dummy call previously hit a breakpoint).
6470 We can't know which case we have so just always re-establish
6471 a selected frame here. */
6472 select_frame (get_current_frame ());
6473 }
6474
6475 done:
6476 annotate_stopped ();
6477
6478 /* Suppress the stop observer if we're in the middle of:
6479
6480 - a step n (n > 1), as there still more steps to be done.
6481
6482 - a "finish" command, as the observer will be called in
6483 finish_command_continuation, so it can include the inferior
6484 function's return value.
6485
6486 - calling an inferior function, as we pretend we inferior didn't
6487 run at all. The return value of the call is handled by the
6488 expression evaluator, through call_function_by_hand. */
6489
6490 if (!target_has_execution
6491 || last.kind == TARGET_WAITKIND_SIGNALLED
6492 || last.kind == TARGET_WAITKIND_EXITED
6493 || last.kind == TARGET_WAITKIND_NO_RESUMED
6494 || (!(inferior_thread ()->step_multi
6495 && inferior_thread ()->control.stop_step)
6496 && !(inferior_thread ()->control.stop_bpstat
6497 && inferior_thread ()->control.proceed_to_finish)
6498 && !inferior_thread ()->control.in_infcall))
6499 {
6500 if (!ptid_equal (inferior_ptid, null_ptid))
6501 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6502 stop_print_frame);
6503 else
6504 observer_notify_normal_stop (NULL, stop_print_frame);
6505 }
6506
6507 if (target_has_execution)
6508 {
6509 if (last.kind != TARGET_WAITKIND_SIGNALLED
6510 && last.kind != TARGET_WAITKIND_EXITED)
6511 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6512 Delete any breakpoint that is to be deleted at the next stop. */
6513 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6514 }
6515
6516 /* Try to get rid of automatically added inferiors that are no
6517 longer needed. Keeping those around slows down things linearly.
6518 Note that this never removes the current inferior. */
6519 prune_inferiors ();
6520 }
6521
6522 static int
6523 hook_stop_stub (void *cmd)
6524 {
6525 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6526 return (0);
6527 }
6528 \f
6529 int
6530 signal_stop_state (int signo)
6531 {
6532 return signal_stop[signo];
6533 }
6534
6535 int
6536 signal_print_state (int signo)
6537 {
6538 return signal_print[signo];
6539 }
6540
6541 int
6542 signal_pass_state (int signo)
6543 {
6544 return signal_program[signo];
6545 }
6546
6547 static void
6548 signal_cache_update (int signo)
6549 {
6550 if (signo == -1)
6551 {
6552 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6553 signal_cache_update (signo);
6554
6555 return;
6556 }
6557
6558 signal_pass[signo] = (signal_stop[signo] == 0
6559 && signal_print[signo] == 0
6560 && signal_program[signo] == 1
6561 && signal_catch[signo] == 0);
6562 }
6563
6564 int
6565 signal_stop_update (int signo, int state)
6566 {
6567 int ret = signal_stop[signo];
6568
6569 signal_stop[signo] = state;
6570 signal_cache_update (signo);
6571 return ret;
6572 }
6573
6574 int
6575 signal_print_update (int signo, int state)
6576 {
6577 int ret = signal_print[signo];
6578
6579 signal_print[signo] = state;
6580 signal_cache_update (signo);
6581 return ret;
6582 }
6583
6584 int
6585 signal_pass_update (int signo, int state)
6586 {
6587 int ret = signal_program[signo];
6588
6589 signal_program[signo] = state;
6590 signal_cache_update (signo);
6591 return ret;
6592 }
6593
6594 /* Update the global 'signal_catch' from INFO and notify the
6595 target. */
6596
6597 void
6598 signal_catch_update (const unsigned int *info)
6599 {
6600 int i;
6601
6602 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6603 signal_catch[i] = info[i] > 0;
6604 signal_cache_update (-1);
6605 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6606 }
6607
6608 static void
6609 sig_print_header (void)
6610 {
6611 printf_filtered (_("Signal Stop\tPrint\tPass "
6612 "to program\tDescription\n"));
6613 }
6614
6615 static void
6616 sig_print_info (enum gdb_signal oursig)
6617 {
6618 const char *name = gdb_signal_to_name (oursig);
6619 int name_padding = 13 - strlen (name);
6620
6621 if (name_padding <= 0)
6622 name_padding = 0;
6623
6624 printf_filtered ("%s", name);
6625 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6626 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6627 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6628 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6629 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6630 }
6631
6632 /* Specify how various signals in the inferior should be handled. */
6633
6634 static void
6635 handle_command (char *args, int from_tty)
6636 {
6637 char **argv;
6638 int digits, wordlen;
6639 int sigfirst, signum, siglast;
6640 enum gdb_signal oursig;
6641 int allsigs;
6642 int nsigs;
6643 unsigned char *sigs;
6644 struct cleanup *old_chain;
6645
6646 if (args == NULL)
6647 {
6648 error_no_arg (_("signal to handle"));
6649 }
6650
6651 /* Allocate and zero an array of flags for which signals to handle. */
6652
6653 nsigs = (int) GDB_SIGNAL_LAST;
6654 sigs = (unsigned char *) alloca (nsigs);
6655 memset (sigs, 0, nsigs);
6656
6657 /* Break the command line up into args. */
6658
6659 argv = gdb_buildargv (args);
6660 old_chain = make_cleanup_freeargv (argv);
6661
6662 /* Walk through the args, looking for signal oursigs, signal names, and
6663 actions. Signal numbers and signal names may be interspersed with
6664 actions, with the actions being performed for all signals cumulatively
6665 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6666
6667 while (*argv != NULL)
6668 {
6669 wordlen = strlen (*argv);
6670 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6671 {;
6672 }
6673 allsigs = 0;
6674 sigfirst = siglast = -1;
6675
6676 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6677 {
6678 /* Apply action to all signals except those used by the
6679 debugger. Silently skip those. */
6680 allsigs = 1;
6681 sigfirst = 0;
6682 siglast = nsigs - 1;
6683 }
6684 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6685 {
6686 SET_SIGS (nsigs, sigs, signal_stop);
6687 SET_SIGS (nsigs, sigs, signal_print);
6688 }
6689 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6690 {
6691 UNSET_SIGS (nsigs, sigs, signal_program);
6692 }
6693 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6694 {
6695 SET_SIGS (nsigs, sigs, signal_print);
6696 }
6697 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6698 {
6699 SET_SIGS (nsigs, sigs, signal_program);
6700 }
6701 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6702 {
6703 UNSET_SIGS (nsigs, sigs, signal_stop);
6704 }
6705 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6706 {
6707 SET_SIGS (nsigs, sigs, signal_program);
6708 }
6709 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6710 {
6711 UNSET_SIGS (nsigs, sigs, signal_print);
6712 UNSET_SIGS (nsigs, sigs, signal_stop);
6713 }
6714 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6715 {
6716 UNSET_SIGS (nsigs, sigs, signal_program);
6717 }
6718 else if (digits > 0)
6719 {
6720 /* It is numeric. The numeric signal refers to our own
6721 internal signal numbering from target.h, not to host/target
6722 signal number. This is a feature; users really should be
6723 using symbolic names anyway, and the common ones like
6724 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6725
6726 sigfirst = siglast = (int)
6727 gdb_signal_from_command (atoi (*argv));
6728 if ((*argv)[digits] == '-')
6729 {
6730 siglast = (int)
6731 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6732 }
6733 if (sigfirst > siglast)
6734 {
6735 /* Bet he didn't figure we'd think of this case... */
6736 signum = sigfirst;
6737 sigfirst = siglast;
6738 siglast = signum;
6739 }
6740 }
6741 else
6742 {
6743 oursig = gdb_signal_from_name (*argv);
6744 if (oursig != GDB_SIGNAL_UNKNOWN)
6745 {
6746 sigfirst = siglast = (int) oursig;
6747 }
6748 else
6749 {
6750 /* Not a number and not a recognized flag word => complain. */
6751 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6752 }
6753 }
6754
6755 /* If any signal numbers or symbol names were found, set flags for
6756 which signals to apply actions to. */
6757
6758 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6759 {
6760 switch ((enum gdb_signal) signum)
6761 {
6762 case GDB_SIGNAL_TRAP:
6763 case GDB_SIGNAL_INT:
6764 if (!allsigs && !sigs[signum])
6765 {
6766 if (query (_("%s is used by the debugger.\n\
6767 Are you sure you want to change it? "),
6768 gdb_signal_to_name ((enum gdb_signal) signum)))
6769 {
6770 sigs[signum] = 1;
6771 }
6772 else
6773 {
6774 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6775 gdb_flush (gdb_stdout);
6776 }
6777 }
6778 break;
6779 case GDB_SIGNAL_0:
6780 case GDB_SIGNAL_DEFAULT:
6781 case GDB_SIGNAL_UNKNOWN:
6782 /* Make sure that "all" doesn't print these. */
6783 break;
6784 default:
6785 sigs[signum] = 1;
6786 break;
6787 }
6788 }
6789
6790 argv++;
6791 }
6792
6793 for (signum = 0; signum < nsigs; signum++)
6794 if (sigs[signum])
6795 {
6796 signal_cache_update (-1);
6797 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6798 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
6799
6800 if (from_tty)
6801 {
6802 /* Show the results. */
6803 sig_print_header ();
6804 for (; signum < nsigs; signum++)
6805 if (sigs[signum])
6806 sig_print_info (signum);
6807 }
6808
6809 break;
6810 }
6811
6812 do_cleanups (old_chain);
6813 }
6814
6815 /* Complete the "handle" command. */
6816
6817 static VEC (char_ptr) *
6818 handle_completer (struct cmd_list_element *ignore,
6819 const char *text, const char *word)
6820 {
6821 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
6822 static const char * const keywords[] =
6823 {
6824 "all",
6825 "stop",
6826 "ignore",
6827 "print",
6828 "pass",
6829 "nostop",
6830 "noignore",
6831 "noprint",
6832 "nopass",
6833 NULL,
6834 };
6835
6836 vec_signals = signal_completer (ignore, text, word);
6837 vec_keywords = complete_on_enum (keywords, word, word);
6838
6839 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
6840 VEC_free (char_ptr, vec_signals);
6841 VEC_free (char_ptr, vec_keywords);
6842 return return_val;
6843 }
6844
6845 static void
6846 xdb_handle_command (char *args, int from_tty)
6847 {
6848 char **argv;
6849 struct cleanup *old_chain;
6850
6851 if (args == NULL)
6852 error_no_arg (_("xdb command"));
6853
6854 /* Break the command line up into args. */
6855
6856 argv = gdb_buildargv (args);
6857 old_chain = make_cleanup_freeargv (argv);
6858 if (argv[1] != (char *) NULL)
6859 {
6860 char *argBuf;
6861 int bufLen;
6862
6863 bufLen = strlen (argv[0]) + 20;
6864 argBuf = (char *) xmalloc (bufLen);
6865 if (argBuf)
6866 {
6867 int validFlag = 1;
6868 enum gdb_signal oursig;
6869
6870 oursig = gdb_signal_from_name (argv[0]);
6871 memset (argBuf, 0, bufLen);
6872 if (strcmp (argv[1], "Q") == 0)
6873 sprintf (argBuf, "%s %s", argv[0], "noprint");
6874 else
6875 {
6876 if (strcmp (argv[1], "s") == 0)
6877 {
6878 if (!signal_stop[oursig])
6879 sprintf (argBuf, "%s %s", argv[0], "stop");
6880 else
6881 sprintf (argBuf, "%s %s", argv[0], "nostop");
6882 }
6883 else if (strcmp (argv[1], "i") == 0)
6884 {
6885 if (!signal_program[oursig])
6886 sprintf (argBuf, "%s %s", argv[0], "pass");
6887 else
6888 sprintf (argBuf, "%s %s", argv[0], "nopass");
6889 }
6890 else if (strcmp (argv[1], "r") == 0)
6891 {
6892 if (!signal_print[oursig])
6893 sprintf (argBuf, "%s %s", argv[0], "print");
6894 else
6895 sprintf (argBuf, "%s %s", argv[0], "noprint");
6896 }
6897 else
6898 validFlag = 0;
6899 }
6900 if (validFlag)
6901 handle_command (argBuf, from_tty);
6902 else
6903 printf_filtered (_("Invalid signal handling flag.\n"));
6904 if (argBuf)
6905 xfree (argBuf);
6906 }
6907 }
6908 do_cleanups (old_chain);
6909 }
6910
6911 enum gdb_signal
6912 gdb_signal_from_command (int num)
6913 {
6914 if (num >= 1 && num <= 15)
6915 return (enum gdb_signal) num;
6916 error (_("Only signals 1-15 are valid as numeric signals.\n\
6917 Use \"info signals\" for a list of symbolic signals."));
6918 }
6919
6920 /* Print current contents of the tables set by the handle command.
6921 It is possible we should just be printing signals actually used
6922 by the current target (but for things to work right when switching
6923 targets, all signals should be in the signal tables). */
6924
6925 static void
6926 signals_info (char *signum_exp, int from_tty)
6927 {
6928 enum gdb_signal oursig;
6929
6930 sig_print_header ();
6931
6932 if (signum_exp)
6933 {
6934 /* First see if this is a symbol name. */
6935 oursig = gdb_signal_from_name (signum_exp);
6936 if (oursig == GDB_SIGNAL_UNKNOWN)
6937 {
6938 /* No, try numeric. */
6939 oursig =
6940 gdb_signal_from_command (parse_and_eval_long (signum_exp));
6941 }
6942 sig_print_info (oursig);
6943 return;
6944 }
6945
6946 printf_filtered ("\n");
6947 /* These ugly casts brought to you by the native VAX compiler. */
6948 for (oursig = GDB_SIGNAL_FIRST;
6949 (int) oursig < (int) GDB_SIGNAL_LAST;
6950 oursig = (enum gdb_signal) ((int) oursig + 1))
6951 {
6952 QUIT;
6953
6954 if (oursig != GDB_SIGNAL_UNKNOWN
6955 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
6956 sig_print_info (oursig);
6957 }
6958
6959 printf_filtered (_("\nUse the \"handle\" command "
6960 "to change these tables.\n"));
6961 }
6962
6963 /* Check if it makes sense to read $_siginfo from the current thread
6964 at this point. If not, throw an error. */
6965
6966 static void
6967 validate_siginfo_access (void)
6968 {
6969 /* No current inferior, no siginfo. */
6970 if (ptid_equal (inferior_ptid, null_ptid))
6971 error (_("No thread selected."));
6972
6973 /* Don't try to read from a dead thread. */
6974 if (is_exited (inferior_ptid))
6975 error (_("The current thread has terminated"));
6976
6977 /* ... or from a spinning thread. */
6978 if (is_running (inferior_ptid))
6979 error (_("Selected thread is running."));
6980 }
6981
6982 /* The $_siginfo convenience variable is a bit special. We don't know
6983 for sure the type of the value until we actually have a chance to
6984 fetch the data. The type can change depending on gdbarch, so it is
6985 also dependent on which thread you have selected.
6986
6987 1. making $_siginfo be an internalvar that creates a new value on
6988 access.
6989
6990 2. making the value of $_siginfo be an lval_computed value. */
6991
6992 /* This function implements the lval_computed support for reading a
6993 $_siginfo value. */
6994
6995 static void
6996 siginfo_value_read (struct value *v)
6997 {
6998 LONGEST transferred;
6999
7000 validate_siginfo_access ();
7001
7002 transferred =
7003 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
7004 NULL,
7005 value_contents_all_raw (v),
7006 value_offset (v),
7007 TYPE_LENGTH (value_type (v)));
7008
7009 if (transferred != TYPE_LENGTH (value_type (v)))
7010 error (_("Unable to read siginfo"));
7011 }
7012
7013 /* This function implements the lval_computed support for writing a
7014 $_siginfo value. */
7015
7016 static void
7017 siginfo_value_write (struct value *v, struct value *fromval)
7018 {
7019 LONGEST transferred;
7020
7021 validate_siginfo_access ();
7022
7023 transferred = target_write (&current_target,
7024 TARGET_OBJECT_SIGNAL_INFO,
7025 NULL,
7026 value_contents_all_raw (fromval),
7027 value_offset (v),
7028 TYPE_LENGTH (value_type (fromval)));
7029
7030 if (transferred != TYPE_LENGTH (value_type (fromval)))
7031 error (_("Unable to write siginfo"));
7032 }
7033
7034 static const struct lval_funcs siginfo_value_funcs =
7035 {
7036 siginfo_value_read,
7037 siginfo_value_write
7038 };
7039
7040 /* Return a new value with the correct type for the siginfo object of
7041 the current thread using architecture GDBARCH. Return a void value
7042 if there's no object available. */
7043
7044 static struct value *
7045 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
7046 void *ignore)
7047 {
7048 if (target_has_stack
7049 && !ptid_equal (inferior_ptid, null_ptid)
7050 && gdbarch_get_siginfo_type_p (gdbarch))
7051 {
7052 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7053
7054 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
7055 }
7056
7057 return allocate_value (builtin_type (gdbarch)->builtin_void);
7058 }
7059
7060 \f
7061 /* infcall_suspend_state contains state about the program itself like its
7062 registers and any signal it received when it last stopped.
7063 This state must be restored regardless of how the inferior function call
7064 ends (either successfully, or after it hits a breakpoint or signal)
7065 if the program is to properly continue where it left off. */
7066
7067 struct infcall_suspend_state
7068 {
7069 struct thread_suspend_state thread_suspend;
7070 #if 0 /* Currently unused and empty structures are not valid C. */
7071 struct inferior_suspend_state inferior_suspend;
7072 #endif
7073
7074 /* Other fields: */
7075 CORE_ADDR stop_pc;
7076 struct regcache *registers;
7077
7078 /* Format of SIGINFO_DATA or NULL if it is not present. */
7079 struct gdbarch *siginfo_gdbarch;
7080
7081 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
7082 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
7083 content would be invalid. */
7084 gdb_byte *siginfo_data;
7085 };
7086
7087 struct infcall_suspend_state *
7088 save_infcall_suspend_state (void)
7089 {
7090 struct infcall_suspend_state *inf_state;
7091 struct thread_info *tp = inferior_thread ();
7092 #if 0
7093 struct inferior *inf = current_inferior ();
7094 #endif
7095 struct regcache *regcache = get_current_regcache ();
7096 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7097 gdb_byte *siginfo_data = NULL;
7098
7099 if (gdbarch_get_siginfo_type_p (gdbarch))
7100 {
7101 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7102 size_t len = TYPE_LENGTH (type);
7103 struct cleanup *back_to;
7104
7105 siginfo_data = xmalloc (len);
7106 back_to = make_cleanup (xfree, siginfo_data);
7107
7108 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7109 siginfo_data, 0, len) == len)
7110 discard_cleanups (back_to);
7111 else
7112 {
7113 /* Errors ignored. */
7114 do_cleanups (back_to);
7115 siginfo_data = NULL;
7116 }
7117 }
7118
7119 inf_state = XCNEW (struct infcall_suspend_state);
7120
7121 if (siginfo_data)
7122 {
7123 inf_state->siginfo_gdbarch = gdbarch;
7124 inf_state->siginfo_data = siginfo_data;
7125 }
7126
7127 inf_state->thread_suspend = tp->suspend;
7128 #if 0 /* Currently unused and empty structures are not valid C. */
7129 inf_state->inferior_suspend = inf->suspend;
7130 #endif
7131
7132 /* run_inferior_call will not use the signal due to its `proceed' call with
7133 GDB_SIGNAL_0 anyway. */
7134 tp->suspend.stop_signal = GDB_SIGNAL_0;
7135
7136 inf_state->stop_pc = stop_pc;
7137
7138 inf_state->registers = regcache_dup (regcache);
7139
7140 return inf_state;
7141 }
7142
7143 /* Restore inferior session state to INF_STATE. */
7144
7145 void
7146 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7147 {
7148 struct thread_info *tp = inferior_thread ();
7149 #if 0
7150 struct inferior *inf = current_inferior ();
7151 #endif
7152 struct regcache *regcache = get_current_regcache ();
7153 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7154
7155 tp->suspend = inf_state->thread_suspend;
7156 #if 0 /* Currently unused and empty structures are not valid C. */
7157 inf->suspend = inf_state->inferior_suspend;
7158 #endif
7159
7160 stop_pc = inf_state->stop_pc;
7161
7162 if (inf_state->siginfo_gdbarch == gdbarch)
7163 {
7164 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7165
7166 /* Errors ignored. */
7167 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7168 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
7169 }
7170
7171 /* The inferior can be gone if the user types "print exit(0)"
7172 (and perhaps other times). */
7173 if (target_has_execution)
7174 /* NB: The register write goes through to the target. */
7175 regcache_cpy (regcache, inf_state->registers);
7176
7177 discard_infcall_suspend_state (inf_state);
7178 }
7179
7180 static void
7181 do_restore_infcall_suspend_state_cleanup (void *state)
7182 {
7183 restore_infcall_suspend_state (state);
7184 }
7185
7186 struct cleanup *
7187 make_cleanup_restore_infcall_suspend_state
7188 (struct infcall_suspend_state *inf_state)
7189 {
7190 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
7191 }
7192
7193 void
7194 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7195 {
7196 regcache_xfree (inf_state->registers);
7197 xfree (inf_state->siginfo_data);
7198 xfree (inf_state);
7199 }
7200
7201 struct regcache *
7202 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
7203 {
7204 return inf_state->registers;
7205 }
7206
7207 /* infcall_control_state contains state regarding gdb's control of the
7208 inferior itself like stepping control. It also contains session state like
7209 the user's currently selected frame. */
7210
7211 struct infcall_control_state
7212 {
7213 struct thread_control_state thread_control;
7214 struct inferior_control_state inferior_control;
7215
7216 /* Other fields: */
7217 enum stop_stack_kind stop_stack_dummy;
7218 int stopped_by_random_signal;
7219 int stop_after_trap;
7220
7221 /* ID if the selected frame when the inferior function call was made. */
7222 struct frame_id selected_frame_id;
7223 };
7224
7225 /* Save all of the information associated with the inferior<==>gdb
7226 connection. */
7227
7228 struct infcall_control_state *
7229 save_infcall_control_state (void)
7230 {
7231 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
7232 struct thread_info *tp = inferior_thread ();
7233 struct inferior *inf = current_inferior ();
7234
7235 inf_status->thread_control = tp->control;
7236 inf_status->inferior_control = inf->control;
7237
7238 tp->control.step_resume_breakpoint = NULL;
7239 tp->control.exception_resume_breakpoint = NULL;
7240
7241 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
7242 chain. If caller's caller is walking the chain, they'll be happier if we
7243 hand them back the original chain when restore_infcall_control_state is
7244 called. */
7245 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
7246
7247 /* Other fields: */
7248 inf_status->stop_stack_dummy = stop_stack_dummy;
7249 inf_status->stopped_by_random_signal = stopped_by_random_signal;
7250 inf_status->stop_after_trap = stop_after_trap;
7251
7252 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
7253
7254 return inf_status;
7255 }
7256
7257 static int
7258 restore_selected_frame (void *args)
7259 {
7260 struct frame_id *fid = (struct frame_id *) args;
7261 struct frame_info *frame;
7262
7263 frame = frame_find_by_id (*fid);
7264
7265 /* If inf_status->selected_frame_id is NULL, there was no previously
7266 selected frame. */
7267 if (frame == NULL)
7268 {
7269 warning (_("Unable to restore previously selected frame."));
7270 return 0;
7271 }
7272
7273 select_frame (frame);
7274
7275 return (1);
7276 }
7277
7278 /* Restore inferior session state to INF_STATUS. */
7279
7280 void
7281 restore_infcall_control_state (struct infcall_control_state *inf_status)
7282 {
7283 struct thread_info *tp = inferior_thread ();
7284 struct inferior *inf = current_inferior ();
7285
7286 if (tp->control.step_resume_breakpoint)
7287 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7288
7289 if (tp->control.exception_resume_breakpoint)
7290 tp->control.exception_resume_breakpoint->disposition
7291 = disp_del_at_next_stop;
7292
7293 /* Handle the bpstat_copy of the chain. */
7294 bpstat_clear (&tp->control.stop_bpstat);
7295
7296 tp->control = inf_status->thread_control;
7297 inf->control = inf_status->inferior_control;
7298
7299 /* Other fields: */
7300 stop_stack_dummy = inf_status->stop_stack_dummy;
7301 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7302 stop_after_trap = inf_status->stop_after_trap;
7303
7304 if (target_has_stack)
7305 {
7306 /* The point of catch_errors is that if the stack is clobbered,
7307 walking the stack might encounter a garbage pointer and
7308 error() trying to dereference it. */
7309 if (catch_errors
7310 (restore_selected_frame, &inf_status->selected_frame_id,
7311 "Unable to restore previously selected frame:\n",
7312 RETURN_MASK_ERROR) == 0)
7313 /* Error in restoring the selected frame. Select the innermost
7314 frame. */
7315 select_frame (get_current_frame ());
7316 }
7317
7318 xfree (inf_status);
7319 }
7320
7321 static void
7322 do_restore_infcall_control_state_cleanup (void *sts)
7323 {
7324 restore_infcall_control_state (sts);
7325 }
7326
7327 struct cleanup *
7328 make_cleanup_restore_infcall_control_state
7329 (struct infcall_control_state *inf_status)
7330 {
7331 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7332 }
7333
7334 void
7335 discard_infcall_control_state (struct infcall_control_state *inf_status)
7336 {
7337 if (inf_status->thread_control.step_resume_breakpoint)
7338 inf_status->thread_control.step_resume_breakpoint->disposition
7339 = disp_del_at_next_stop;
7340
7341 if (inf_status->thread_control.exception_resume_breakpoint)
7342 inf_status->thread_control.exception_resume_breakpoint->disposition
7343 = disp_del_at_next_stop;
7344
7345 /* See save_infcall_control_state for info on stop_bpstat. */
7346 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7347
7348 xfree (inf_status);
7349 }
7350 \f
7351 /* restore_inferior_ptid() will be used by the cleanup machinery
7352 to restore the inferior_ptid value saved in a call to
7353 save_inferior_ptid(). */
7354
7355 static void
7356 restore_inferior_ptid (void *arg)
7357 {
7358 ptid_t *saved_ptid_ptr = arg;
7359
7360 inferior_ptid = *saved_ptid_ptr;
7361 xfree (arg);
7362 }
7363
7364 /* Save the value of inferior_ptid so that it may be restored by a
7365 later call to do_cleanups(). Returns the struct cleanup pointer
7366 needed for later doing the cleanup. */
7367
7368 struct cleanup *
7369 save_inferior_ptid (void)
7370 {
7371 ptid_t *saved_ptid_ptr;
7372
7373 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7374 *saved_ptid_ptr = inferior_ptid;
7375 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7376 }
7377
7378 /* See infrun.h. */
7379
7380 void
7381 clear_exit_convenience_vars (void)
7382 {
7383 clear_internalvar (lookup_internalvar ("_exitsignal"));
7384 clear_internalvar (lookup_internalvar ("_exitcode"));
7385 }
7386 \f
7387
7388 /* User interface for reverse debugging:
7389 Set exec-direction / show exec-direction commands
7390 (returns error unless target implements to_set_exec_direction method). */
7391
7392 int execution_direction = EXEC_FORWARD;
7393 static const char exec_forward[] = "forward";
7394 static const char exec_reverse[] = "reverse";
7395 static const char *exec_direction = exec_forward;
7396 static const char *const exec_direction_names[] = {
7397 exec_forward,
7398 exec_reverse,
7399 NULL
7400 };
7401
7402 static void
7403 set_exec_direction_func (char *args, int from_tty,
7404 struct cmd_list_element *cmd)
7405 {
7406 if (target_can_execute_reverse)
7407 {
7408 if (!strcmp (exec_direction, exec_forward))
7409 execution_direction = EXEC_FORWARD;
7410 else if (!strcmp (exec_direction, exec_reverse))
7411 execution_direction = EXEC_REVERSE;
7412 }
7413 else
7414 {
7415 exec_direction = exec_forward;
7416 error (_("Target does not support this operation."));
7417 }
7418 }
7419
7420 static void
7421 show_exec_direction_func (struct ui_file *out, int from_tty,
7422 struct cmd_list_element *cmd, const char *value)
7423 {
7424 switch (execution_direction) {
7425 case EXEC_FORWARD:
7426 fprintf_filtered (out, _("Forward.\n"));
7427 break;
7428 case EXEC_REVERSE:
7429 fprintf_filtered (out, _("Reverse.\n"));
7430 break;
7431 default:
7432 internal_error (__FILE__, __LINE__,
7433 _("bogus execution_direction value: %d"),
7434 (int) execution_direction);
7435 }
7436 }
7437
7438 static void
7439 show_schedule_multiple (struct ui_file *file, int from_tty,
7440 struct cmd_list_element *c, const char *value)
7441 {
7442 fprintf_filtered (file, _("Resuming the execution of threads "
7443 "of all processes is %s.\n"), value);
7444 }
7445
7446 /* Implementation of `siginfo' variable. */
7447
7448 static const struct internalvar_funcs siginfo_funcs =
7449 {
7450 siginfo_make_value,
7451 NULL,
7452 NULL
7453 };
7454
7455 void
7456 _initialize_infrun (void)
7457 {
7458 int i;
7459 int numsigs;
7460 struct cmd_list_element *c;
7461
7462 add_info ("signals", signals_info, _("\
7463 What debugger does when program gets various signals.\n\
7464 Specify a signal as argument to print info on that signal only."));
7465 add_info_alias ("handle", "signals", 0);
7466
7467 c = add_com ("handle", class_run, handle_command, _("\
7468 Specify how to handle signals.\n\
7469 Usage: handle SIGNAL [ACTIONS]\n\
7470 Args are signals and actions to apply to those signals.\n\
7471 If no actions are specified, the current settings for the specified signals\n\
7472 will be displayed instead.\n\
7473 \n\
7474 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7475 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7476 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7477 The special arg \"all\" is recognized to mean all signals except those\n\
7478 used by the debugger, typically SIGTRAP and SIGINT.\n\
7479 \n\
7480 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7481 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7482 Stop means reenter debugger if this signal happens (implies print).\n\
7483 Print means print a message if this signal happens.\n\
7484 Pass means let program see this signal; otherwise program doesn't know.\n\
7485 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7486 Pass and Stop may be combined.\n\
7487 \n\
7488 Multiple signals may be specified. Signal numbers and signal names\n\
7489 may be interspersed with actions, with the actions being performed for\n\
7490 all signals cumulatively specified."));
7491 set_cmd_completer (c, handle_completer);
7492
7493 if (xdb_commands)
7494 {
7495 add_com ("lz", class_info, signals_info, _("\
7496 What debugger does when program gets various signals.\n\
7497 Specify a signal as argument to print info on that signal only."));
7498 add_com ("z", class_run, xdb_handle_command, _("\
7499 Specify how to handle a signal.\n\
7500 Args are signals and actions to apply to those signals.\n\
7501 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7502 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7503 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7504 The special arg \"all\" is recognized to mean all signals except those\n\
7505 used by the debugger, typically SIGTRAP and SIGINT.\n\
7506 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7507 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7508 nopass), \"Q\" (noprint)\n\
7509 Stop means reenter debugger if this signal happens (implies print).\n\
7510 Print means print a message if this signal happens.\n\
7511 Pass means let program see this signal; otherwise program doesn't know.\n\
7512 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7513 Pass and Stop may be combined."));
7514 }
7515
7516 if (!dbx_commands)
7517 stop_command = add_cmd ("stop", class_obscure,
7518 not_just_help_class_command, _("\
7519 There is no `stop' command, but you can set a hook on `stop'.\n\
7520 This allows you to set a list of commands to be run each time execution\n\
7521 of the program stops."), &cmdlist);
7522
7523 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7524 Set inferior debugging."), _("\
7525 Show inferior debugging."), _("\
7526 When non-zero, inferior specific debugging is enabled."),
7527 NULL,
7528 show_debug_infrun,
7529 &setdebuglist, &showdebuglist);
7530
7531 add_setshow_boolean_cmd ("displaced", class_maintenance,
7532 &debug_displaced, _("\
7533 Set displaced stepping debugging."), _("\
7534 Show displaced stepping debugging."), _("\
7535 When non-zero, displaced stepping specific debugging is enabled."),
7536 NULL,
7537 show_debug_displaced,
7538 &setdebuglist, &showdebuglist);
7539
7540 add_setshow_boolean_cmd ("non-stop", no_class,
7541 &non_stop_1, _("\
7542 Set whether gdb controls the inferior in non-stop mode."), _("\
7543 Show whether gdb controls the inferior in non-stop mode."), _("\
7544 When debugging a multi-threaded program and this setting is\n\
7545 off (the default, also called all-stop mode), when one thread stops\n\
7546 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7547 all other threads in the program while you interact with the thread of\n\
7548 interest. When you continue or step a thread, you can allow the other\n\
7549 threads to run, or have them remain stopped, but while you inspect any\n\
7550 thread's state, all threads stop.\n\
7551 \n\
7552 In non-stop mode, when one thread stops, other threads can continue\n\
7553 to run freely. You'll be able to step each thread independently,\n\
7554 leave it stopped or free to run as needed."),
7555 set_non_stop,
7556 show_non_stop,
7557 &setlist,
7558 &showlist);
7559
7560 numsigs = (int) GDB_SIGNAL_LAST;
7561 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7562 signal_print = (unsigned char *)
7563 xmalloc (sizeof (signal_print[0]) * numsigs);
7564 signal_program = (unsigned char *)
7565 xmalloc (sizeof (signal_program[0]) * numsigs);
7566 signal_catch = (unsigned char *)
7567 xmalloc (sizeof (signal_catch[0]) * numsigs);
7568 signal_pass = (unsigned char *)
7569 xmalloc (sizeof (signal_pass[0]) * numsigs);
7570 for (i = 0; i < numsigs; i++)
7571 {
7572 signal_stop[i] = 1;
7573 signal_print[i] = 1;
7574 signal_program[i] = 1;
7575 signal_catch[i] = 0;
7576 }
7577
7578 /* Signals caused by debugger's own actions
7579 should not be given to the program afterwards. */
7580 signal_program[GDB_SIGNAL_TRAP] = 0;
7581 signal_program[GDB_SIGNAL_INT] = 0;
7582
7583 /* Signals that are not errors should not normally enter the debugger. */
7584 signal_stop[GDB_SIGNAL_ALRM] = 0;
7585 signal_print[GDB_SIGNAL_ALRM] = 0;
7586 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7587 signal_print[GDB_SIGNAL_VTALRM] = 0;
7588 signal_stop[GDB_SIGNAL_PROF] = 0;
7589 signal_print[GDB_SIGNAL_PROF] = 0;
7590 signal_stop[GDB_SIGNAL_CHLD] = 0;
7591 signal_print[GDB_SIGNAL_CHLD] = 0;
7592 signal_stop[GDB_SIGNAL_IO] = 0;
7593 signal_print[GDB_SIGNAL_IO] = 0;
7594 signal_stop[GDB_SIGNAL_POLL] = 0;
7595 signal_print[GDB_SIGNAL_POLL] = 0;
7596 signal_stop[GDB_SIGNAL_URG] = 0;
7597 signal_print[GDB_SIGNAL_URG] = 0;
7598 signal_stop[GDB_SIGNAL_WINCH] = 0;
7599 signal_print[GDB_SIGNAL_WINCH] = 0;
7600 signal_stop[GDB_SIGNAL_PRIO] = 0;
7601 signal_print[GDB_SIGNAL_PRIO] = 0;
7602
7603 /* These signals are used internally by user-level thread
7604 implementations. (See signal(5) on Solaris.) Like the above
7605 signals, a healthy program receives and handles them as part of
7606 its normal operation. */
7607 signal_stop[GDB_SIGNAL_LWP] = 0;
7608 signal_print[GDB_SIGNAL_LWP] = 0;
7609 signal_stop[GDB_SIGNAL_WAITING] = 0;
7610 signal_print[GDB_SIGNAL_WAITING] = 0;
7611 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7612 signal_print[GDB_SIGNAL_CANCEL] = 0;
7613
7614 /* Update cached state. */
7615 signal_cache_update (-1);
7616
7617 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7618 &stop_on_solib_events, _("\
7619 Set stopping for shared library events."), _("\
7620 Show stopping for shared library events."), _("\
7621 If nonzero, gdb will give control to the user when the dynamic linker\n\
7622 notifies gdb of shared library events. The most common event of interest\n\
7623 to the user would be loading/unloading of a new library."),
7624 set_stop_on_solib_events,
7625 show_stop_on_solib_events,
7626 &setlist, &showlist);
7627
7628 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7629 follow_fork_mode_kind_names,
7630 &follow_fork_mode_string, _("\
7631 Set debugger response to a program call of fork or vfork."), _("\
7632 Show debugger response to a program call of fork or vfork."), _("\
7633 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7634 parent - the original process is debugged after a fork\n\
7635 child - the new process is debugged after a fork\n\
7636 The unfollowed process will continue to run.\n\
7637 By default, the debugger will follow the parent process."),
7638 NULL,
7639 show_follow_fork_mode_string,
7640 &setlist, &showlist);
7641
7642 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7643 follow_exec_mode_names,
7644 &follow_exec_mode_string, _("\
7645 Set debugger response to a program call of exec."), _("\
7646 Show debugger response to a program call of exec."), _("\
7647 An exec call replaces the program image of a process.\n\
7648 \n\
7649 follow-exec-mode can be:\n\
7650 \n\
7651 new - the debugger creates a new inferior and rebinds the process\n\
7652 to this new inferior. The program the process was running before\n\
7653 the exec call can be restarted afterwards by restarting the original\n\
7654 inferior.\n\
7655 \n\
7656 same - the debugger keeps the process bound to the same inferior.\n\
7657 The new executable image replaces the previous executable loaded in\n\
7658 the inferior. Restarting the inferior after the exec call restarts\n\
7659 the executable the process was running after the exec call.\n\
7660 \n\
7661 By default, the debugger will use the same inferior."),
7662 NULL,
7663 show_follow_exec_mode_string,
7664 &setlist, &showlist);
7665
7666 add_setshow_enum_cmd ("scheduler-locking", class_run,
7667 scheduler_enums, &scheduler_mode, _("\
7668 Set mode for locking scheduler during execution."), _("\
7669 Show mode for locking scheduler during execution."), _("\
7670 off == no locking (threads may preempt at any time)\n\
7671 on == full locking (no thread except the current thread may run)\n\
7672 step == scheduler locked during every single-step operation.\n\
7673 In this mode, no other thread may run during a step command.\n\
7674 Other threads may run while stepping over a function call ('next')."),
7675 set_schedlock_func, /* traps on target vector */
7676 show_scheduler_mode,
7677 &setlist, &showlist);
7678
7679 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7680 Set mode for resuming threads of all processes."), _("\
7681 Show mode for resuming threads of all processes."), _("\
7682 When on, execution commands (such as 'continue' or 'next') resume all\n\
7683 threads of all processes. When off (which is the default), execution\n\
7684 commands only resume the threads of the current process. The set of\n\
7685 threads that are resumed is further refined by the scheduler-locking\n\
7686 mode (see help set scheduler-locking)."),
7687 NULL,
7688 show_schedule_multiple,
7689 &setlist, &showlist);
7690
7691 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7692 Set mode of the step operation."), _("\
7693 Show mode of the step operation."), _("\
7694 When set, doing a step over a function without debug line information\n\
7695 will stop at the first instruction of that function. Otherwise, the\n\
7696 function is skipped and the step command stops at a different source line."),
7697 NULL,
7698 show_step_stop_if_no_debug,
7699 &setlist, &showlist);
7700
7701 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7702 &can_use_displaced_stepping, _("\
7703 Set debugger's willingness to use displaced stepping."), _("\
7704 Show debugger's willingness to use displaced stepping."), _("\
7705 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7706 supported by the target architecture. If off, gdb will not use displaced\n\
7707 stepping to step over breakpoints, even if such is supported by the target\n\
7708 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7709 if the target architecture supports it and non-stop mode is active, but will not\n\
7710 use it in all-stop mode (see help set non-stop)."),
7711 NULL,
7712 show_can_use_displaced_stepping,
7713 &setlist, &showlist);
7714
7715 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7716 &exec_direction, _("Set direction of execution.\n\
7717 Options are 'forward' or 'reverse'."),
7718 _("Show direction of execution (forward/reverse)."),
7719 _("Tells gdb whether to execute forward or backward."),
7720 set_exec_direction_func, show_exec_direction_func,
7721 &setlist, &showlist);
7722
7723 /* Set/show detach-on-fork: user-settable mode. */
7724
7725 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7726 Set whether gdb will detach the child of a fork."), _("\
7727 Show whether gdb will detach the child of a fork."), _("\
7728 Tells gdb whether to detach the child of a fork."),
7729 NULL, NULL, &setlist, &showlist);
7730
7731 /* Set/show disable address space randomization mode. */
7732
7733 add_setshow_boolean_cmd ("disable-randomization", class_support,
7734 &disable_randomization, _("\
7735 Set disabling of debuggee's virtual address space randomization."), _("\
7736 Show disabling of debuggee's virtual address space randomization."), _("\
7737 When this mode is on (which is the default), randomization of the virtual\n\
7738 address space is disabled. Standalone programs run with the randomization\n\
7739 enabled by default on some platforms."),
7740 &set_disable_randomization,
7741 &show_disable_randomization,
7742 &setlist, &showlist);
7743
7744 /* ptid initializations */
7745 inferior_ptid = null_ptid;
7746 target_last_wait_ptid = minus_one_ptid;
7747
7748 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7749 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7750 observer_attach_thread_exit (infrun_thread_thread_exit);
7751 observer_attach_inferior_exit (infrun_inferior_exit);
7752
7753 /* Explicitly create without lookup, since that tries to create a
7754 value with a void typed value, and when we get here, gdbarch
7755 isn't initialized yet. At this point, we're quite sure there
7756 isn't another convenience variable of the same name. */
7757 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7758
7759 add_setshow_boolean_cmd ("observer", no_class,
7760 &observer_mode_1, _("\
7761 Set whether gdb controls the inferior in observer mode."), _("\
7762 Show whether gdb controls the inferior in observer mode."), _("\
7763 In observer mode, GDB can get data from the inferior, but not\n\
7764 affect its execution. Registers and memory may not be changed,\n\
7765 breakpoints may not be set, and the program cannot be interrupted\n\
7766 or signalled."),
7767 set_observer_mode,
7768 show_observer_mode,
7769 &setlist,
7770 &showlist);
7771 }