]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/infrun.c
Associate dummy_frame with ptid
[thirdparty/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2014 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <string.h>
24 #include <ctype.h>
25 #include "symtab.h"
26 #include "frame.h"
27 #include "inferior.h"
28 #include "exceptions.h"
29 #include "breakpoint.h"
30 #include "gdb_wait.h"
31 #include "gdbcore.h"
32 #include "gdbcmd.h"
33 #include "cli/cli-script.h"
34 #include "target.h"
35 #include "gdbthread.h"
36 #include "annotate.h"
37 #include "symfile.h"
38 #include "top.h"
39 #include <signal.h>
40 #include "inf-loop.h"
41 #include "regcache.h"
42 #include "value.h"
43 #include "observer.h"
44 #include "language.h"
45 #include "solib.h"
46 #include "main.h"
47 #include "dictionary.h"
48 #include "block.h"
49 #include "gdb_assert.h"
50 #include "mi/mi-common.h"
51 #include "event-top.h"
52 #include "record.h"
53 #include "record-full.h"
54 #include "inline-frame.h"
55 #include "jit.h"
56 #include "tracepoint.h"
57 #include "continuations.h"
58 #include "interps.h"
59 #include "skip.h"
60 #include "probe.h"
61 #include "objfiles.h"
62 #include "completer.h"
63 #include "target-descriptions.h"
64 #include "target-dcache.h"
65
66 /* Prototypes for local functions */
67
68 static void signals_info (char *, int);
69
70 static void handle_command (char *, int);
71
72 static void sig_print_info (enum gdb_signal);
73
74 static void sig_print_header (void);
75
76 static void resume_cleanups (void *);
77
78 static int hook_stop_stub (void *);
79
80 static int restore_selected_frame (void *);
81
82 static int follow_fork (void);
83
84 static void set_schedlock_func (char *args, int from_tty,
85 struct cmd_list_element *c);
86
87 static int currently_stepping (struct thread_info *tp);
88
89 static void xdb_handle_command (char *args, int from_tty);
90
91 void _initialize_infrun (void);
92
93 void nullify_last_target_wait_ptid (void);
94
95 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
96
97 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
98
99 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
100
101 /* When set, stop the 'step' command if we enter a function which has
102 no line number information. The normal behavior is that we step
103 over such function. */
104 int step_stop_if_no_debug = 0;
105 static void
106 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
107 struct cmd_list_element *c, const char *value)
108 {
109 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
110 }
111
112 /* In asynchronous mode, but simulating synchronous execution. */
113
114 int sync_execution = 0;
115
116 /* proceed and normal_stop use this to notify the user when the
117 inferior stopped in a different thread than it had been running
118 in. */
119
120 static ptid_t previous_inferior_ptid;
121
122 /* If set (default for legacy reasons), when following a fork, GDB
123 will detach from one of the fork branches, child or parent.
124 Exactly which branch is detached depends on 'set follow-fork-mode'
125 setting. */
126
127 static int detach_fork = 1;
128
129 int debug_displaced = 0;
130 static void
131 show_debug_displaced (struct ui_file *file, int from_tty,
132 struct cmd_list_element *c, const char *value)
133 {
134 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
135 }
136
137 unsigned int debug_infrun = 0;
138 static void
139 show_debug_infrun (struct ui_file *file, int from_tty,
140 struct cmd_list_element *c, const char *value)
141 {
142 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
143 }
144
145
146 /* Support for disabling address space randomization. */
147
148 int disable_randomization = 1;
149
150 static void
151 show_disable_randomization (struct ui_file *file, int from_tty,
152 struct cmd_list_element *c, const char *value)
153 {
154 if (target_supports_disable_randomization ())
155 fprintf_filtered (file,
156 _("Disabling randomization of debuggee's "
157 "virtual address space is %s.\n"),
158 value);
159 else
160 fputs_filtered (_("Disabling randomization of debuggee's "
161 "virtual address space is unsupported on\n"
162 "this platform.\n"), file);
163 }
164
165 static void
166 set_disable_randomization (char *args, int from_tty,
167 struct cmd_list_element *c)
168 {
169 if (!target_supports_disable_randomization ())
170 error (_("Disabling randomization of debuggee's "
171 "virtual address space is unsupported on\n"
172 "this platform."));
173 }
174
175 /* User interface for non-stop mode. */
176
177 int non_stop = 0;
178 static int non_stop_1 = 0;
179
180 static void
181 set_non_stop (char *args, int from_tty,
182 struct cmd_list_element *c)
183 {
184 if (target_has_execution)
185 {
186 non_stop_1 = non_stop;
187 error (_("Cannot change this setting while the inferior is running."));
188 }
189
190 non_stop = non_stop_1;
191 }
192
193 static void
194 show_non_stop (struct ui_file *file, int from_tty,
195 struct cmd_list_element *c, const char *value)
196 {
197 fprintf_filtered (file,
198 _("Controlling the inferior in non-stop mode is %s.\n"),
199 value);
200 }
201
202 /* "Observer mode" is somewhat like a more extreme version of
203 non-stop, in which all GDB operations that might affect the
204 target's execution have been disabled. */
205
206 int observer_mode = 0;
207 static int observer_mode_1 = 0;
208
209 static void
210 set_observer_mode (char *args, int from_tty,
211 struct cmd_list_element *c)
212 {
213 if (target_has_execution)
214 {
215 observer_mode_1 = observer_mode;
216 error (_("Cannot change this setting while the inferior is running."));
217 }
218
219 observer_mode = observer_mode_1;
220
221 may_write_registers = !observer_mode;
222 may_write_memory = !observer_mode;
223 may_insert_breakpoints = !observer_mode;
224 may_insert_tracepoints = !observer_mode;
225 /* We can insert fast tracepoints in or out of observer mode,
226 but enable them if we're going into this mode. */
227 if (observer_mode)
228 may_insert_fast_tracepoints = 1;
229 may_stop = !observer_mode;
230 update_target_permissions ();
231
232 /* Going *into* observer mode we must force non-stop, then
233 going out we leave it that way. */
234 if (observer_mode)
235 {
236 pagination_enabled = 0;
237 non_stop = non_stop_1 = 1;
238 }
239
240 if (from_tty)
241 printf_filtered (_("Observer mode is now %s.\n"),
242 (observer_mode ? "on" : "off"));
243 }
244
245 static void
246 show_observer_mode (struct ui_file *file, int from_tty,
247 struct cmd_list_element *c, const char *value)
248 {
249 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
250 }
251
252 /* This updates the value of observer mode based on changes in
253 permissions. Note that we are deliberately ignoring the values of
254 may-write-registers and may-write-memory, since the user may have
255 reason to enable these during a session, for instance to turn on a
256 debugging-related global. */
257
258 void
259 update_observer_mode (void)
260 {
261 int newval;
262
263 newval = (!may_insert_breakpoints
264 && !may_insert_tracepoints
265 && may_insert_fast_tracepoints
266 && !may_stop
267 && non_stop);
268
269 /* Let the user know if things change. */
270 if (newval != observer_mode)
271 printf_filtered (_("Observer mode is now %s.\n"),
272 (newval ? "on" : "off"));
273
274 observer_mode = observer_mode_1 = newval;
275 }
276
277 /* Tables of how to react to signals; the user sets them. */
278
279 static unsigned char *signal_stop;
280 static unsigned char *signal_print;
281 static unsigned char *signal_program;
282
283 /* Table of signals that are registered with "catch signal". A
284 non-zero entry indicates that the signal is caught by some "catch
285 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
286 signals. */
287 static unsigned char *signal_catch;
288
289 /* Table of signals that the target may silently handle.
290 This is automatically determined from the flags above,
291 and simply cached here. */
292 static unsigned char *signal_pass;
293
294 #define SET_SIGS(nsigs,sigs,flags) \
295 do { \
296 int signum = (nsigs); \
297 while (signum-- > 0) \
298 if ((sigs)[signum]) \
299 (flags)[signum] = 1; \
300 } while (0)
301
302 #define UNSET_SIGS(nsigs,sigs,flags) \
303 do { \
304 int signum = (nsigs); \
305 while (signum-- > 0) \
306 if ((sigs)[signum]) \
307 (flags)[signum] = 0; \
308 } while (0)
309
310 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
311 this function is to avoid exporting `signal_program'. */
312
313 void
314 update_signals_program_target (void)
315 {
316 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
317 }
318
319 /* Value to pass to target_resume() to cause all threads to resume. */
320
321 #define RESUME_ALL minus_one_ptid
322
323 /* Command list pointer for the "stop" placeholder. */
324
325 static struct cmd_list_element *stop_command;
326
327 /* Function inferior was in as of last step command. */
328
329 static struct symbol *step_start_function;
330
331 /* Nonzero if we want to give control to the user when we're notified
332 of shared library events by the dynamic linker. */
333 int stop_on_solib_events;
334
335 /* Enable or disable optional shared library event breakpoints
336 as appropriate when the above flag is changed. */
337
338 static void
339 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
340 {
341 update_solib_breakpoints ();
342 }
343
344 static void
345 show_stop_on_solib_events (struct ui_file *file, int from_tty,
346 struct cmd_list_element *c, const char *value)
347 {
348 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
349 value);
350 }
351
352 /* Nonzero means expecting a trace trap
353 and should stop the inferior and return silently when it happens. */
354
355 int stop_after_trap;
356
357 /* Save register contents here when executing a "finish" command or are
358 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
359 Thus this contains the return value from the called function (assuming
360 values are returned in a register). */
361
362 struct regcache *stop_registers;
363
364 /* Nonzero after stop if current stack frame should be printed. */
365
366 static int stop_print_frame;
367
368 /* This is a cached copy of the pid/waitstatus of the last event
369 returned by target_wait()/deprecated_target_wait_hook(). This
370 information is returned by get_last_target_status(). */
371 static ptid_t target_last_wait_ptid;
372 static struct target_waitstatus target_last_waitstatus;
373
374 static void context_switch (ptid_t ptid);
375
376 void init_thread_stepping_state (struct thread_info *tss);
377
378 static void init_infwait_state (void);
379
380 static const char follow_fork_mode_child[] = "child";
381 static const char follow_fork_mode_parent[] = "parent";
382
383 static const char *const follow_fork_mode_kind_names[] = {
384 follow_fork_mode_child,
385 follow_fork_mode_parent,
386 NULL
387 };
388
389 static const char *follow_fork_mode_string = follow_fork_mode_parent;
390 static void
391 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
392 struct cmd_list_element *c, const char *value)
393 {
394 fprintf_filtered (file,
395 _("Debugger response to a program "
396 "call of fork or vfork is \"%s\".\n"),
397 value);
398 }
399 \f
400
401 /* Tell the target to follow the fork we're stopped at. Returns true
402 if the inferior should be resumed; false, if the target for some
403 reason decided it's best not to resume. */
404
405 static int
406 follow_fork (void)
407 {
408 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
409 int should_resume = 1;
410 struct thread_info *tp;
411
412 /* Copy user stepping state to the new inferior thread. FIXME: the
413 followed fork child thread should have a copy of most of the
414 parent thread structure's run control related fields, not just these.
415 Initialized to avoid "may be used uninitialized" warnings from gcc. */
416 struct breakpoint *step_resume_breakpoint = NULL;
417 struct breakpoint *exception_resume_breakpoint = NULL;
418 CORE_ADDR step_range_start = 0;
419 CORE_ADDR step_range_end = 0;
420 struct frame_id step_frame_id = { 0 };
421 struct interp *command_interp = NULL;
422
423 if (!non_stop)
424 {
425 ptid_t wait_ptid;
426 struct target_waitstatus wait_status;
427
428 /* Get the last target status returned by target_wait(). */
429 get_last_target_status (&wait_ptid, &wait_status);
430
431 /* If not stopped at a fork event, then there's nothing else to
432 do. */
433 if (wait_status.kind != TARGET_WAITKIND_FORKED
434 && wait_status.kind != TARGET_WAITKIND_VFORKED)
435 return 1;
436
437 /* Check if we switched over from WAIT_PTID, since the event was
438 reported. */
439 if (!ptid_equal (wait_ptid, minus_one_ptid)
440 && !ptid_equal (inferior_ptid, wait_ptid))
441 {
442 /* We did. Switch back to WAIT_PTID thread, to tell the
443 target to follow it (in either direction). We'll
444 afterwards refuse to resume, and inform the user what
445 happened. */
446 switch_to_thread (wait_ptid);
447 should_resume = 0;
448 }
449 }
450
451 tp = inferior_thread ();
452
453 /* If there were any forks/vforks that were caught and are now to be
454 followed, then do so now. */
455 switch (tp->pending_follow.kind)
456 {
457 case TARGET_WAITKIND_FORKED:
458 case TARGET_WAITKIND_VFORKED:
459 {
460 ptid_t parent, child;
461
462 /* If the user did a next/step, etc, over a fork call,
463 preserve the stepping state in the fork child. */
464 if (follow_child && should_resume)
465 {
466 step_resume_breakpoint = clone_momentary_breakpoint
467 (tp->control.step_resume_breakpoint);
468 step_range_start = tp->control.step_range_start;
469 step_range_end = tp->control.step_range_end;
470 step_frame_id = tp->control.step_frame_id;
471 exception_resume_breakpoint
472 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
473 command_interp = tp->control.command_interp;
474
475 /* For now, delete the parent's sr breakpoint, otherwise,
476 parent/child sr breakpoints are considered duplicates,
477 and the child version will not be installed. Remove
478 this when the breakpoints module becomes aware of
479 inferiors and address spaces. */
480 delete_step_resume_breakpoint (tp);
481 tp->control.step_range_start = 0;
482 tp->control.step_range_end = 0;
483 tp->control.step_frame_id = null_frame_id;
484 delete_exception_resume_breakpoint (tp);
485 tp->control.command_interp = NULL;
486 }
487
488 parent = inferior_ptid;
489 child = tp->pending_follow.value.related_pid;
490
491 /* Tell the target to do whatever is necessary to follow
492 either parent or child. */
493 if (target_follow_fork (follow_child, detach_fork))
494 {
495 /* Target refused to follow, or there's some other reason
496 we shouldn't resume. */
497 should_resume = 0;
498 }
499 else
500 {
501 /* This pending follow fork event is now handled, one way
502 or another. The previous selected thread may be gone
503 from the lists by now, but if it is still around, need
504 to clear the pending follow request. */
505 tp = find_thread_ptid (parent);
506 if (tp)
507 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
508
509 /* This makes sure we don't try to apply the "Switched
510 over from WAIT_PID" logic above. */
511 nullify_last_target_wait_ptid ();
512
513 /* If we followed the child, switch to it... */
514 if (follow_child)
515 {
516 switch_to_thread (child);
517
518 /* ... and preserve the stepping state, in case the
519 user was stepping over the fork call. */
520 if (should_resume)
521 {
522 tp = inferior_thread ();
523 tp->control.step_resume_breakpoint
524 = step_resume_breakpoint;
525 tp->control.step_range_start = step_range_start;
526 tp->control.step_range_end = step_range_end;
527 tp->control.step_frame_id = step_frame_id;
528 tp->control.exception_resume_breakpoint
529 = exception_resume_breakpoint;
530 tp->control.command_interp = command_interp;
531 }
532 else
533 {
534 /* If we get here, it was because we're trying to
535 resume from a fork catchpoint, but, the user
536 has switched threads away from the thread that
537 forked. In that case, the resume command
538 issued is most likely not applicable to the
539 child, so just warn, and refuse to resume. */
540 warning (_("Not resuming: switched threads "
541 "before following fork child.\n"));
542 }
543
544 /* Reset breakpoints in the child as appropriate. */
545 follow_inferior_reset_breakpoints ();
546 }
547 else
548 switch_to_thread (parent);
549 }
550 }
551 break;
552 case TARGET_WAITKIND_SPURIOUS:
553 /* Nothing to follow. */
554 break;
555 default:
556 internal_error (__FILE__, __LINE__,
557 "Unexpected pending_follow.kind %d\n",
558 tp->pending_follow.kind);
559 break;
560 }
561
562 return should_resume;
563 }
564
565 void
566 follow_inferior_reset_breakpoints (void)
567 {
568 struct thread_info *tp = inferior_thread ();
569
570 /* Was there a step_resume breakpoint? (There was if the user
571 did a "next" at the fork() call.) If so, explicitly reset its
572 thread number. Cloned step_resume breakpoints are disabled on
573 creation, so enable it here now that it is associated with the
574 correct thread.
575
576 step_resumes are a form of bp that are made to be per-thread.
577 Since we created the step_resume bp when the parent process
578 was being debugged, and now are switching to the child process,
579 from the breakpoint package's viewpoint, that's a switch of
580 "threads". We must update the bp's notion of which thread
581 it is for, or it'll be ignored when it triggers. */
582
583 if (tp->control.step_resume_breakpoint)
584 {
585 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
586 tp->control.step_resume_breakpoint->loc->enabled = 1;
587 }
588
589 /* Treat exception_resume breakpoints like step_resume breakpoints. */
590 if (tp->control.exception_resume_breakpoint)
591 {
592 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
593 tp->control.exception_resume_breakpoint->loc->enabled = 1;
594 }
595
596 /* Reinsert all breakpoints in the child. The user may have set
597 breakpoints after catching the fork, in which case those
598 were never set in the child, but only in the parent. This makes
599 sure the inserted breakpoints match the breakpoint list. */
600
601 breakpoint_re_set ();
602 insert_breakpoints ();
603 }
604
605 /* The child has exited or execed: resume threads of the parent the
606 user wanted to be executing. */
607
608 static int
609 proceed_after_vfork_done (struct thread_info *thread,
610 void *arg)
611 {
612 int pid = * (int *) arg;
613
614 if (ptid_get_pid (thread->ptid) == pid
615 && is_running (thread->ptid)
616 && !is_executing (thread->ptid)
617 && !thread->stop_requested
618 && thread->suspend.stop_signal == GDB_SIGNAL_0)
619 {
620 if (debug_infrun)
621 fprintf_unfiltered (gdb_stdlog,
622 "infrun: resuming vfork parent thread %s\n",
623 target_pid_to_str (thread->ptid));
624
625 switch_to_thread (thread->ptid);
626 clear_proceed_status ();
627 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT, 0);
628 }
629
630 return 0;
631 }
632
633 /* Called whenever we notice an exec or exit event, to handle
634 detaching or resuming a vfork parent. */
635
636 static void
637 handle_vfork_child_exec_or_exit (int exec)
638 {
639 struct inferior *inf = current_inferior ();
640
641 if (inf->vfork_parent)
642 {
643 int resume_parent = -1;
644
645 /* This exec or exit marks the end of the shared memory region
646 between the parent and the child. If the user wanted to
647 detach from the parent, now is the time. */
648
649 if (inf->vfork_parent->pending_detach)
650 {
651 struct thread_info *tp;
652 struct cleanup *old_chain;
653 struct program_space *pspace;
654 struct address_space *aspace;
655
656 /* follow-fork child, detach-on-fork on. */
657
658 inf->vfork_parent->pending_detach = 0;
659
660 if (!exec)
661 {
662 /* If we're handling a child exit, then inferior_ptid
663 points at the inferior's pid, not to a thread. */
664 old_chain = save_inferior_ptid ();
665 save_current_program_space ();
666 save_current_inferior ();
667 }
668 else
669 old_chain = save_current_space_and_thread ();
670
671 /* We're letting loose of the parent. */
672 tp = any_live_thread_of_process (inf->vfork_parent->pid);
673 switch_to_thread (tp->ptid);
674
675 /* We're about to detach from the parent, which implicitly
676 removes breakpoints from its address space. There's a
677 catch here: we want to reuse the spaces for the child,
678 but, parent/child are still sharing the pspace at this
679 point, although the exec in reality makes the kernel give
680 the child a fresh set of new pages. The problem here is
681 that the breakpoints module being unaware of this, would
682 likely chose the child process to write to the parent
683 address space. Swapping the child temporarily away from
684 the spaces has the desired effect. Yes, this is "sort
685 of" a hack. */
686
687 pspace = inf->pspace;
688 aspace = inf->aspace;
689 inf->aspace = NULL;
690 inf->pspace = NULL;
691
692 if (debug_infrun || info_verbose)
693 {
694 target_terminal_ours ();
695
696 if (exec)
697 fprintf_filtered (gdb_stdlog,
698 "Detaching vfork parent process "
699 "%d after child exec.\n",
700 inf->vfork_parent->pid);
701 else
702 fprintf_filtered (gdb_stdlog,
703 "Detaching vfork parent process "
704 "%d after child exit.\n",
705 inf->vfork_parent->pid);
706 }
707
708 target_detach (NULL, 0);
709
710 /* Put it back. */
711 inf->pspace = pspace;
712 inf->aspace = aspace;
713
714 do_cleanups (old_chain);
715 }
716 else if (exec)
717 {
718 /* We're staying attached to the parent, so, really give the
719 child a new address space. */
720 inf->pspace = add_program_space (maybe_new_address_space ());
721 inf->aspace = inf->pspace->aspace;
722 inf->removable = 1;
723 set_current_program_space (inf->pspace);
724
725 resume_parent = inf->vfork_parent->pid;
726
727 /* Break the bonds. */
728 inf->vfork_parent->vfork_child = NULL;
729 }
730 else
731 {
732 struct cleanup *old_chain;
733 struct program_space *pspace;
734
735 /* If this is a vfork child exiting, then the pspace and
736 aspaces were shared with the parent. Since we're
737 reporting the process exit, we'll be mourning all that is
738 found in the address space, and switching to null_ptid,
739 preparing to start a new inferior. But, since we don't
740 want to clobber the parent's address/program spaces, we
741 go ahead and create a new one for this exiting
742 inferior. */
743
744 /* Switch to null_ptid, so that clone_program_space doesn't want
745 to read the selected frame of a dead process. */
746 old_chain = save_inferior_ptid ();
747 inferior_ptid = null_ptid;
748
749 /* This inferior is dead, so avoid giving the breakpoints
750 module the option to write through to it (cloning a
751 program space resets breakpoints). */
752 inf->aspace = NULL;
753 inf->pspace = NULL;
754 pspace = add_program_space (maybe_new_address_space ());
755 set_current_program_space (pspace);
756 inf->removable = 1;
757 inf->symfile_flags = SYMFILE_NO_READ;
758 clone_program_space (pspace, inf->vfork_parent->pspace);
759 inf->pspace = pspace;
760 inf->aspace = pspace->aspace;
761
762 /* Put back inferior_ptid. We'll continue mourning this
763 inferior. */
764 do_cleanups (old_chain);
765
766 resume_parent = inf->vfork_parent->pid;
767 /* Break the bonds. */
768 inf->vfork_parent->vfork_child = NULL;
769 }
770
771 inf->vfork_parent = NULL;
772
773 gdb_assert (current_program_space == inf->pspace);
774
775 if (non_stop && resume_parent != -1)
776 {
777 /* If the user wanted the parent to be running, let it go
778 free now. */
779 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
780
781 if (debug_infrun)
782 fprintf_unfiltered (gdb_stdlog,
783 "infrun: resuming vfork parent process %d\n",
784 resume_parent);
785
786 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
787
788 do_cleanups (old_chain);
789 }
790 }
791 }
792
793 /* Enum strings for "set|show follow-exec-mode". */
794
795 static const char follow_exec_mode_new[] = "new";
796 static const char follow_exec_mode_same[] = "same";
797 static const char *const follow_exec_mode_names[] =
798 {
799 follow_exec_mode_new,
800 follow_exec_mode_same,
801 NULL,
802 };
803
804 static const char *follow_exec_mode_string = follow_exec_mode_same;
805 static void
806 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
807 struct cmd_list_element *c, const char *value)
808 {
809 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
810 }
811
812 /* EXECD_PATHNAME is assumed to be non-NULL. */
813
814 static void
815 follow_exec (ptid_t pid, char *execd_pathname)
816 {
817 struct thread_info *th = inferior_thread ();
818 struct inferior *inf = current_inferior ();
819
820 /* This is an exec event that we actually wish to pay attention to.
821 Refresh our symbol table to the newly exec'd program, remove any
822 momentary bp's, etc.
823
824 If there are breakpoints, they aren't really inserted now,
825 since the exec() transformed our inferior into a fresh set
826 of instructions.
827
828 We want to preserve symbolic breakpoints on the list, since
829 we have hopes that they can be reset after the new a.out's
830 symbol table is read.
831
832 However, any "raw" breakpoints must be removed from the list
833 (e.g., the solib bp's), since their address is probably invalid
834 now.
835
836 And, we DON'T want to call delete_breakpoints() here, since
837 that may write the bp's "shadow contents" (the instruction
838 value that was overwritten witha TRAP instruction). Since
839 we now have a new a.out, those shadow contents aren't valid. */
840
841 mark_breakpoints_out ();
842
843 update_breakpoints_after_exec ();
844
845 /* If there was one, it's gone now. We cannot truly step-to-next
846 statement through an exec(). */
847 th->control.step_resume_breakpoint = NULL;
848 th->control.exception_resume_breakpoint = NULL;
849 th->control.step_range_start = 0;
850 th->control.step_range_end = 0;
851
852 /* The target reports the exec event to the main thread, even if
853 some other thread does the exec, and even if the main thread was
854 already stopped --- if debugging in non-stop mode, it's possible
855 the user had the main thread held stopped in the previous image
856 --- release it now. This is the same behavior as step-over-exec
857 with scheduler-locking on in all-stop mode. */
858 th->stop_requested = 0;
859
860 /* What is this a.out's name? */
861 printf_unfiltered (_("%s is executing new program: %s\n"),
862 target_pid_to_str (inferior_ptid),
863 execd_pathname);
864
865 /* We've followed the inferior through an exec. Therefore, the
866 inferior has essentially been killed & reborn. */
867
868 gdb_flush (gdb_stdout);
869
870 breakpoint_init_inferior (inf_execd);
871
872 if (gdb_sysroot && *gdb_sysroot)
873 {
874 char *name = alloca (strlen (gdb_sysroot)
875 + strlen (execd_pathname)
876 + 1);
877
878 strcpy (name, gdb_sysroot);
879 strcat (name, execd_pathname);
880 execd_pathname = name;
881 }
882
883 /* Reset the shared library package. This ensures that we get a
884 shlib event when the child reaches "_start", at which point the
885 dld will have had a chance to initialize the child. */
886 /* Also, loading a symbol file below may trigger symbol lookups, and
887 we don't want those to be satisfied by the libraries of the
888 previous incarnation of this process. */
889 no_shared_libraries (NULL, 0);
890
891 if (follow_exec_mode_string == follow_exec_mode_new)
892 {
893 struct program_space *pspace;
894
895 /* The user wants to keep the old inferior and program spaces
896 around. Create a new fresh one, and switch to it. */
897
898 inf = add_inferior (current_inferior ()->pid);
899 pspace = add_program_space (maybe_new_address_space ());
900 inf->pspace = pspace;
901 inf->aspace = pspace->aspace;
902
903 exit_inferior_num_silent (current_inferior ()->num);
904
905 set_current_inferior (inf);
906 set_current_program_space (pspace);
907 }
908 else
909 {
910 /* The old description may no longer be fit for the new image.
911 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
912 old description; we'll read a new one below. No need to do
913 this on "follow-exec-mode new", as the old inferior stays
914 around (its description is later cleared/refetched on
915 restart). */
916 target_clear_description ();
917 }
918
919 gdb_assert (current_program_space == inf->pspace);
920
921 /* That a.out is now the one to use. */
922 exec_file_attach (execd_pathname, 0);
923
924 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
925 (Position Independent Executable) main symbol file will get applied by
926 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
927 the breakpoints with the zero displacement. */
928
929 symbol_file_add (execd_pathname,
930 (inf->symfile_flags
931 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
932 NULL, 0);
933
934 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
935 set_initial_language ();
936
937 /* If the target can specify a description, read it. Must do this
938 after flipping to the new executable (because the target supplied
939 description must be compatible with the executable's
940 architecture, and the old executable may e.g., be 32-bit, while
941 the new one 64-bit), and before anything involving memory or
942 registers. */
943 target_find_description ();
944
945 solib_create_inferior_hook (0);
946
947 jit_inferior_created_hook ();
948
949 breakpoint_re_set ();
950
951 /* Reinsert all breakpoints. (Those which were symbolic have
952 been reset to the proper address in the new a.out, thanks
953 to symbol_file_command...). */
954 insert_breakpoints ();
955
956 /* The next resume of this inferior should bring it to the shlib
957 startup breakpoints. (If the user had also set bp's on
958 "main" from the old (parent) process, then they'll auto-
959 matically get reset there in the new process.). */
960 }
961
962 /* Non-zero if we just simulating a single-step. This is needed
963 because we cannot remove the breakpoints in the inferior process
964 until after the `wait' in `wait_for_inferior'. */
965 static int singlestep_breakpoints_inserted_p = 0;
966
967 /* The thread we inserted single-step breakpoints for. */
968 static ptid_t singlestep_ptid;
969
970 /* PC when we started this single-step. */
971 static CORE_ADDR singlestep_pc;
972
973 /* Info about an instruction that is being stepped over. Invalid if
974 ASPACE is NULL. */
975
976 struct step_over_info
977 {
978 /* The instruction's address space. */
979 struct address_space *aspace;
980
981 /* The instruction's address. */
982 CORE_ADDR address;
983 };
984
985 /* The step-over info of the location that is being stepped over.
986
987 Note that with async/breakpoint always-inserted mode, a user might
988 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
989 being stepped over. As setting a new breakpoint inserts all
990 breakpoints, we need to make sure the breakpoint being stepped over
991 isn't inserted then. We do that by only clearing the step-over
992 info when the step-over is actually finished (or aborted).
993
994 Presently GDB can only step over one breakpoint at any given time.
995 Given threads that can't run code in the same address space as the
996 breakpoint's can't really miss the breakpoint, GDB could be taught
997 to step-over at most one breakpoint per address space (so this info
998 could move to the address space object if/when GDB is extended).
999 The set of breakpoints being stepped over will normally be much
1000 smaller than the set of all breakpoints, so a flag in the
1001 breakpoint location structure would be wasteful. A separate list
1002 also saves complexity and run-time, as otherwise we'd have to go
1003 through all breakpoint locations clearing their flag whenever we
1004 start a new sequence. Similar considerations weigh against storing
1005 this info in the thread object. Plus, not all step overs actually
1006 have breakpoint locations -- e.g., stepping past a single-step
1007 breakpoint, or stepping to complete a non-continuable
1008 watchpoint. */
1009 static struct step_over_info step_over_info;
1010
1011 /* Record the address of the breakpoint/instruction we're currently
1012 stepping over. */
1013
1014 static void
1015 set_step_over_info (struct address_space *aspace, CORE_ADDR address)
1016 {
1017 step_over_info.aspace = aspace;
1018 step_over_info.address = address;
1019 }
1020
1021 /* Called when we're not longer stepping over a breakpoint / an
1022 instruction, so all breakpoints are free to be (re)inserted. */
1023
1024 static void
1025 clear_step_over_info (void)
1026 {
1027 step_over_info.aspace = NULL;
1028 step_over_info.address = 0;
1029 }
1030
1031 /* See inferior.h. */
1032
1033 int
1034 stepping_past_instruction_at (struct address_space *aspace,
1035 CORE_ADDR address)
1036 {
1037 return (step_over_info.aspace != NULL
1038 && breakpoint_address_match (aspace, address,
1039 step_over_info.aspace,
1040 step_over_info.address));
1041 }
1042
1043 \f
1044 /* Displaced stepping. */
1045
1046 /* In non-stop debugging mode, we must take special care to manage
1047 breakpoints properly; in particular, the traditional strategy for
1048 stepping a thread past a breakpoint it has hit is unsuitable.
1049 'Displaced stepping' is a tactic for stepping one thread past a
1050 breakpoint it has hit while ensuring that other threads running
1051 concurrently will hit the breakpoint as they should.
1052
1053 The traditional way to step a thread T off a breakpoint in a
1054 multi-threaded program in all-stop mode is as follows:
1055
1056 a0) Initially, all threads are stopped, and breakpoints are not
1057 inserted.
1058 a1) We single-step T, leaving breakpoints uninserted.
1059 a2) We insert breakpoints, and resume all threads.
1060
1061 In non-stop debugging, however, this strategy is unsuitable: we
1062 don't want to have to stop all threads in the system in order to
1063 continue or step T past a breakpoint. Instead, we use displaced
1064 stepping:
1065
1066 n0) Initially, T is stopped, other threads are running, and
1067 breakpoints are inserted.
1068 n1) We copy the instruction "under" the breakpoint to a separate
1069 location, outside the main code stream, making any adjustments
1070 to the instruction, register, and memory state as directed by
1071 T's architecture.
1072 n2) We single-step T over the instruction at its new location.
1073 n3) We adjust the resulting register and memory state as directed
1074 by T's architecture. This includes resetting T's PC to point
1075 back into the main instruction stream.
1076 n4) We resume T.
1077
1078 This approach depends on the following gdbarch methods:
1079
1080 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1081 indicate where to copy the instruction, and how much space must
1082 be reserved there. We use these in step n1.
1083
1084 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1085 address, and makes any necessary adjustments to the instruction,
1086 register contents, and memory. We use this in step n1.
1087
1088 - gdbarch_displaced_step_fixup adjusts registers and memory after
1089 we have successfuly single-stepped the instruction, to yield the
1090 same effect the instruction would have had if we had executed it
1091 at its original address. We use this in step n3.
1092
1093 - gdbarch_displaced_step_free_closure provides cleanup.
1094
1095 The gdbarch_displaced_step_copy_insn and
1096 gdbarch_displaced_step_fixup functions must be written so that
1097 copying an instruction with gdbarch_displaced_step_copy_insn,
1098 single-stepping across the copied instruction, and then applying
1099 gdbarch_displaced_insn_fixup should have the same effects on the
1100 thread's memory and registers as stepping the instruction in place
1101 would have. Exactly which responsibilities fall to the copy and
1102 which fall to the fixup is up to the author of those functions.
1103
1104 See the comments in gdbarch.sh for details.
1105
1106 Note that displaced stepping and software single-step cannot
1107 currently be used in combination, although with some care I think
1108 they could be made to. Software single-step works by placing
1109 breakpoints on all possible subsequent instructions; if the
1110 displaced instruction is a PC-relative jump, those breakpoints
1111 could fall in very strange places --- on pages that aren't
1112 executable, or at addresses that are not proper instruction
1113 boundaries. (We do generally let other threads run while we wait
1114 to hit the software single-step breakpoint, and they might
1115 encounter such a corrupted instruction.) One way to work around
1116 this would be to have gdbarch_displaced_step_copy_insn fully
1117 simulate the effect of PC-relative instructions (and return NULL)
1118 on architectures that use software single-stepping.
1119
1120 In non-stop mode, we can have independent and simultaneous step
1121 requests, so more than one thread may need to simultaneously step
1122 over a breakpoint. The current implementation assumes there is
1123 only one scratch space per process. In this case, we have to
1124 serialize access to the scratch space. If thread A wants to step
1125 over a breakpoint, but we are currently waiting for some other
1126 thread to complete a displaced step, we leave thread A stopped and
1127 place it in the displaced_step_request_queue. Whenever a displaced
1128 step finishes, we pick the next thread in the queue and start a new
1129 displaced step operation on it. See displaced_step_prepare and
1130 displaced_step_fixup for details. */
1131
1132 struct displaced_step_request
1133 {
1134 ptid_t ptid;
1135 struct displaced_step_request *next;
1136 };
1137
1138 /* Per-inferior displaced stepping state. */
1139 struct displaced_step_inferior_state
1140 {
1141 /* Pointer to next in linked list. */
1142 struct displaced_step_inferior_state *next;
1143
1144 /* The process this displaced step state refers to. */
1145 int pid;
1146
1147 /* A queue of pending displaced stepping requests. One entry per
1148 thread that needs to do a displaced step. */
1149 struct displaced_step_request *step_request_queue;
1150
1151 /* If this is not null_ptid, this is the thread carrying out a
1152 displaced single-step in process PID. This thread's state will
1153 require fixing up once it has completed its step. */
1154 ptid_t step_ptid;
1155
1156 /* The architecture the thread had when we stepped it. */
1157 struct gdbarch *step_gdbarch;
1158
1159 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1160 for post-step cleanup. */
1161 struct displaced_step_closure *step_closure;
1162
1163 /* The address of the original instruction, and the copy we
1164 made. */
1165 CORE_ADDR step_original, step_copy;
1166
1167 /* Saved contents of copy area. */
1168 gdb_byte *step_saved_copy;
1169 };
1170
1171 /* The list of states of processes involved in displaced stepping
1172 presently. */
1173 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1174
1175 /* Get the displaced stepping state of process PID. */
1176
1177 static struct displaced_step_inferior_state *
1178 get_displaced_stepping_state (int pid)
1179 {
1180 struct displaced_step_inferior_state *state;
1181
1182 for (state = displaced_step_inferior_states;
1183 state != NULL;
1184 state = state->next)
1185 if (state->pid == pid)
1186 return state;
1187
1188 return NULL;
1189 }
1190
1191 /* Add a new displaced stepping state for process PID to the displaced
1192 stepping state list, or return a pointer to an already existing
1193 entry, if it already exists. Never returns NULL. */
1194
1195 static struct displaced_step_inferior_state *
1196 add_displaced_stepping_state (int pid)
1197 {
1198 struct displaced_step_inferior_state *state;
1199
1200 for (state = displaced_step_inferior_states;
1201 state != NULL;
1202 state = state->next)
1203 if (state->pid == pid)
1204 return state;
1205
1206 state = xcalloc (1, sizeof (*state));
1207 state->pid = pid;
1208 state->next = displaced_step_inferior_states;
1209 displaced_step_inferior_states = state;
1210
1211 return state;
1212 }
1213
1214 /* If inferior is in displaced stepping, and ADDR equals to starting address
1215 of copy area, return corresponding displaced_step_closure. Otherwise,
1216 return NULL. */
1217
1218 struct displaced_step_closure*
1219 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1220 {
1221 struct displaced_step_inferior_state *displaced
1222 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1223
1224 /* If checking the mode of displaced instruction in copy area. */
1225 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1226 && (displaced->step_copy == addr))
1227 return displaced->step_closure;
1228
1229 return NULL;
1230 }
1231
1232 /* Remove the displaced stepping state of process PID. */
1233
1234 static void
1235 remove_displaced_stepping_state (int pid)
1236 {
1237 struct displaced_step_inferior_state *it, **prev_next_p;
1238
1239 gdb_assert (pid != 0);
1240
1241 it = displaced_step_inferior_states;
1242 prev_next_p = &displaced_step_inferior_states;
1243 while (it)
1244 {
1245 if (it->pid == pid)
1246 {
1247 *prev_next_p = it->next;
1248 xfree (it);
1249 return;
1250 }
1251
1252 prev_next_p = &it->next;
1253 it = *prev_next_p;
1254 }
1255 }
1256
1257 static void
1258 infrun_inferior_exit (struct inferior *inf)
1259 {
1260 remove_displaced_stepping_state (inf->pid);
1261 }
1262
1263 /* If ON, and the architecture supports it, GDB will use displaced
1264 stepping to step over breakpoints. If OFF, or if the architecture
1265 doesn't support it, GDB will instead use the traditional
1266 hold-and-step approach. If AUTO (which is the default), GDB will
1267 decide which technique to use to step over breakpoints depending on
1268 which of all-stop or non-stop mode is active --- displaced stepping
1269 in non-stop mode; hold-and-step in all-stop mode. */
1270
1271 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1272
1273 static void
1274 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1275 struct cmd_list_element *c,
1276 const char *value)
1277 {
1278 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1279 fprintf_filtered (file,
1280 _("Debugger's willingness to use displaced stepping "
1281 "to step over breakpoints is %s (currently %s).\n"),
1282 value, non_stop ? "on" : "off");
1283 else
1284 fprintf_filtered (file,
1285 _("Debugger's willingness to use displaced stepping "
1286 "to step over breakpoints is %s.\n"), value);
1287 }
1288
1289 /* Return non-zero if displaced stepping can/should be used to step
1290 over breakpoints. */
1291
1292 static int
1293 use_displaced_stepping (struct gdbarch *gdbarch)
1294 {
1295 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1296 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1297 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1298 && find_record_target () == NULL);
1299 }
1300
1301 /* Clean out any stray displaced stepping state. */
1302 static void
1303 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1304 {
1305 /* Indicate that there is no cleanup pending. */
1306 displaced->step_ptid = null_ptid;
1307
1308 if (displaced->step_closure)
1309 {
1310 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1311 displaced->step_closure);
1312 displaced->step_closure = NULL;
1313 }
1314 }
1315
1316 static void
1317 displaced_step_clear_cleanup (void *arg)
1318 {
1319 struct displaced_step_inferior_state *state = arg;
1320
1321 displaced_step_clear (state);
1322 }
1323
1324 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1325 void
1326 displaced_step_dump_bytes (struct ui_file *file,
1327 const gdb_byte *buf,
1328 size_t len)
1329 {
1330 int i;
1331
1332 for (i = 0; i < len; i++)
1333 fprintf_unfiltered (file, "%02x ", buf[i]);
1334 fputs_unfiltered ("\n", file);
1335 }
1336
1337 /* Prepare to single-step, using displaced stepping.
1338
1339 Note that we cannot use displaced stepping when we have a signal to
1340 deliver. If we have a signal to deliver and an instruction to step
1341 over, then after the step, there will be no indication from the
1342 target whether the thread entered a signal handler or ignored the
1343 signal and stepped over the instruction successfully --- both cases
1344 result in a simple SIGTRAP. In the first case we mustn't do a
1345 fixup, and in the second case we must --- but we can't tell which.
1346 Comments in the code for 'random signals' in handle_inferior_event
1347 explain how we handle this case instead.
1348
1349 Returns 1 if preparing was successful -- this thread is going to be
1350 stepped now; or 0 if displaced stepping this thread got queued. */
1351 static int
1352 displaced_step_prepare (ptid_t ptid)
1353 {
1354 struct cleanup *old_cleanups, *ignore_cleanups;
1355 struct thread_info *tp = find_thread_ptid (ptid);
1356 struct regcache *regcache = get_thread_regcache (ptid);
1357 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1358 CORE_ADDR original, copy;
1359 ULONGEST len;
1360 struct displaced_step_closure *closure;
1361 struct displaced_step_inferior_state *displaced;
1362 int status;
1363
1364 /* We should never reach this function if the architecture does not
1365 support displaced stepping. */
1366 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1367
1368 /* Disable range stepping while executing in the scratch pad. We
1369 want a single-step even if executing the displaced instruction in
1370 the scratch buffer lands within the stepping range (e.g., a
1371 jump/branch). */
1372 tp->control.may_range_step = 0;
1373
1374 /* We have to displaced step one thread at a time, as we only have
1375 access to a single scratch space per inferior. */
1376
1377 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1378
1379 if (!ptid_equal (displaced->step_ptid, null_ptid))
1380 {
1381 /* Already waiting for a displaced step to finish. Defer this
1382 request and place in queue. */
1383 struct displaced_step_request *req, *new_req;
1384
1385 if (debug_displaced)
1386 fprintf_unfiltered (gdb_stdlog,
1387 "displaced: defering step of %s\n",
1388 target_pid_to_str (ptid));
1389
1390 new_req = xmalloc (sizeof (*new_req));
1391 new_req->ptid = ptid;
1392 new_req->next = NULL;
1393
1394 if (displaced->step_request_queue)
1395 {
1396 for (req = displaced->step_request_queue;
1397 req && req->next;
1398 req = req->next)
1399 ;
1400 req->next = new_req;
1401 }
1402 else
1403 displaced->step_request_queue = new_req;
1404
1405 return 0;
1406 }
1407 else
1408 {
1409 if (debug_displaced)
1410 fprintf_unfiltered (gdb_stdlog,
1411 "displaced: stepping %s now\n",
1412 target_pid_to_str (ptid));
1413 }
1414
1415 displaced_step_clear (displaced);
1416
1417 old_cleanups = save_inferior_ptid ();
1418 inferior_ptid = ptid;
1419
1420 original = regcache_read_pc (regcache);
1421
1422 copy = gdbarch_displaced_step_location (gdbarch);
1423 len = gdbarch_max_insn_length (gdbarch);
1424
1425 /* Save the original contents of the copy area. */
1426 displaced->step_saved_copy = xmalloc (len);
1427 ignore_cleanups = make_cleanup (free_current_contents,
1428 &displaced->step_saved_copy);
1429 status = target_read_memory (copy, displaced->step_saved_copy, len);
1430 if (status != 0)
1431 throw_error (MEMORY_ERROR,
1432 _("Error accessing memory address %s (%s) for "
1433 "displaced-stepping scratch space."),
1434 paddress (gdbarch, copy), safe_strerror (status));
1435 if (debug_displaced)
1436 {
1437 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1438 paddress (gdbarch, copy));
1439 displaced_step_dump_bytes (gdb_stdlog,
1440 displaced->step_saved_copy,
1441 len);
1442 };
1443
1444 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1445 original, copy, regcache);
1446
1447 /* We don't support the fully-simulated case at present. */
1448 gdb_assert (closure);
1449
1450 /* Save the information we need to fix things up if the step
1451 succeeds. */
1452 displaced->step_ptid = ptid;
1453 displaced->step_gdbarch = gdbarch;
1454 displaced->step_closure = closure;
1455 displaced->step_original = original;
1456 displaced->step_copy = copy;
1457
1458 make_cleanup (displaced_step_clear_cleanup, displaced);
1459
1460 /* Resume execution at the copy. */
1461 regcache_write_pc (regcache, copy);
1462
1463 discard_cleanups (ignore_cleanups);
1464
1465 do_cleanups (old_cleanups);
1466
1467 if (debug_displaced)
1468 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1469 paddress (gdbarch, copy));
1470
1471 return 1;
1472 }
1473
1474 static void
1475 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1476 const gdb_byte *myaddr, int len)
1477 {
1478 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1479
1480 inferior_ptid = ptid;
1481 write_memory (memaddr, myaddr, len);
1482 do_cleanups (ptid_cleanup);
1483 }
1484
1485 /* Restore the contents of the copy area for thread PTID. */
1486
1487 static void
1488 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1489 ptid_t ptid)
1490 {
1491 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1492
1493 write_memory_ptid (ptid, displaced->step_copy,
1494 displaced->step_saved_copy, len);
1495 if (debug_displaced)
1496 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1497 target_pid_to_str (ptid),
1498 paddress (displaced->step_gdbarch,
1499 displaced->step_copy));
1500 }
1501
1502 static void
1503 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1504 {
1505 struct cleanup *old_cleanups;
1506 struct displaced_step_inferior_state *displaced
1507 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1508
1509 /* Was any thread of this process doing a displaced step? */
1510 if (displaced == NULL)
1511 return;
1512
1513 /* Was this event for the pid we displaced? */
1514 if (ptid_equal (displaced->step_ptid, null_ptid)
1515 || ! ptid_equal (displaced->step_ptid, event_ptid))
1516 return;
1517
1518 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1519
1520 displaced_step_restore (displaced, displaced->step_ptid);
1521
1522 /* Did the instruction complete successfully? */
1523 if (signal == GDB_SIGNAL_TRAP)
1524 {
1525 /* Fix up the resulting state. */
1526 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1527 displaced->step_closure,
1528 displaced->step_original,
1529 displaced->step_copy,
1530 get_thread_regcache (displaced->step_ptid));
1531 }
1532 else
1533 {
1534 /* Since the instruction didn't complete, all we can do is
1535 relocate the PC. */
1536 struct regcache *regcache = get_thread_regcache (event_ptid);
1537 CORE_ADDR pc = regcache_read_pc (regcache);
1538
1539 pc = displaced->step_original + (pc - displaced->step_copy);
1540 regcache_write_pc (regcache, pc);
1541 }
1542
1543 do_cleanups (old_cleanups);
1544
1545 displaced->step_ptid = null_ptid;
1546
1547 /* Are there any pending displaced stepping requests? If so, run
1548 one now. Leave the state object around, since we're likely to
1549 need it again soon. */
1550 while (displaced->step_request_queue)
1551 {
1552 struct displaced_step_request *head;
1553 ptid_t ptid;
1554 struct regcache *regcache;
1555 struct gdbarch *gdbarch;
1556 CORE_ADDR actual_pc;
1557 struct address_space *aspace;
1558
1559 head = displaced->step_request_queue;
1560 ptid = head->ptid;
1561 displaced->step_request_queue = head->next;
1562 xfree (head);
1563
1564 context_switch (ptid);
1565
1566 regcache = get_thread_regcache (ptid);
1567 actual_pc = regcache_read_pc (regcache);
1568 aspace = get_regcache_aspace (regcache);
1569
1570 if (breakpoint_here_p (aspace, actual_pc))
1571 {
1572 if (debug_displaced)
1573 fprintf_unfiltered (gdb_stdlog,
1574 "displaced: stepping queued %s now\n",
1575 target_pid_to_str (ptid));
1576
1577 displaced_step_prepare (ptid);
1578
1579 gdbarch = get_regcache_arch (regcache);
1580
1581 if (debug_displaced)
1582 {
1583 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1584 gdb_byte buf[4];
1585
1586 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1587 paddress (gdbarch, actual_pc));
1588 read_memory (actual_pc, buf, sizeof (buf));
1589 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1590 }
1591
1592 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1593 displaced->step_closure))
1594 target_resume (ptid, 1, GDB_SIGNAL_0);
1595 else
1596 target_resume (ptid, 0, GDB_SIGNAL_0);
1597
1598 /* Done, we're stepping a thread. */
1599 break;
1600 }
1601 else
1602 {
1603 int step;
1604 struct thread_info *tp = inferior_thread ();
1605
1606 /* The breakpoint we were sitting under has since been
1607 removed. */
1608 tp->control.trap_expected = 0;
1609
1610 /* Go back to what we were trying to do. */
1611 step = currently_stepping (tp);
1612
1613 if (debug_displaced)
1614 fprintf_unfiltered (gdb_stdlog,
1615 "displaced: breakpoint is gone: %s, step(%d)\n",
1616 target_pid_to_str (tp->ptid), step);
1617
1618 target_resume (ptid, step, GDB_SIGNAL_0);
1619 tp->suspend.stop_signal = GDB_SIGNAL_0;
1620
1621 /* This request was discarded. See if there's any other
1622 thread waiting for its turn. */
1623 }
1624 }
1625 }
1626
1627 /* Update global variables holding ptids to hold NEW_PTID if they were
1628 holding OLD_PTID. */
1629 static void
1630 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1631 {
1632 struct displaced_step_request *it;
1633 struct displaced_step_inferior_state *displaced;
1634
1635 if (ptid_equal (inferior_ptid, old_ptid))
1636 inferior_ptid = new_ptid;
1637
1638 if (ptid_equal (singlestep_ptid, old_ptid))
1639 singlestep_ptid = new_ptid;
1640
1641 for (displaced = displaced_step_inferior_states;
1642 displaced;
1643 displaced = displaced->next)
1644 {
1645 if (ptid_equal (displaced->step_ptid, old_ptid))
1646 displaced->step_ptid = new_ptid;
1647
1648 for (it = displaced->step_request_queue; it; it = it->next)
1649 if (ptid_equal (it->ptid, old_ptid))
1650 it->ptid = new_ptid;
1651 }
1652 }
1653
1654 \f
1655 /* Resuming. */
1656
1657 /* Things to clean up if we QUIT out of resume (). */
1658 static void
1659 resume_cleanups (void *ignore)
1660 {
1661 normal_stop ();
1662 }
1663
1664 static const char schedlock_off[] = "off";
1665 static const char schedlock_on[] = "on";
1666 static const char schedlock_step[] = "step";
1667 static const char *const scheduler_enums[] = {
1668 schedlock_off,
1669 schedlock_on,
1670 schedlock_step,
1671 NULL
1672 };
1673 static const char *scheduler_mode = schedlock_off;
1674 static void
1675 show_scheduler_mode (struct ui_file *file, int from_tty,
1676 struct cmd_list_element *c, const char *value)
1677 {
1678 fprintf_filtered (file,
1679 _("Mode for locking scheduler "
1680 "during execution is \"%s\".\n"),
1681 value);
1682 }
1683
1684 static void
1685 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1686 {
1687 if (!target_can_lock_scheduler)
1688 {
1689 scheduler_mode = schedlock_off;
1690 error (_("Target '%s' cannot support this command."), target_shortname);
1691 }
1692 }
1693
1694 /* True if execution commands resume all threads of all processes by
1695 default; otherwise, resume only threads of the current inferior
1696 process. */
1697 int sched_multi = 0;
1698
1699 /* Try to setup for software single stepping over the specified location.
1700 Return 1 if target_resume() should use hardware single step.
1701
1702 GDBARCH the current gdbarch.
1703 PC the location to step over. */
1704
1705 static int
1706 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1707 {
1708 int hw_step = 1;
1709
1710 if (execution_direction == EXEC_FORWARD
1711 && gdbarch_software_single_step_p (gdbarch)
1712 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1713 {
1714 hw_step = 0;
1715 /* Do not pull these breakpoints until after a `wait' in
1716 `wait_for_inferior'. */
1717 singlestep_breakpoints_inserted_p = 1;
1718 singlestep_ptid = inferior_ptid;
1719 singlestep_pc = pc;
1720 }
1721 return hw_step;
1722 }
1723
1724 /* Return a ptid representing the set of threads that we will proceed,
1725 in the perspective of the user/frontend. We may actually resume
1726 fewer threads at first, e.g., if a thread is stopped at a
1727 breakpoint that needs stepping-off, but that should not be visible
1728 to the user/frontend, and neither should the frontend/user be
1729 allowed to proceed any of the threads that happen to be stopped for
1730 internal run control handling, if a previous command wanted them
1731 resumed. */
1732
1733 ptid_t
1734 user_visible_resume_ptid (int step)
1735 {
1736 /* By default, resume all threads of all processes. */
1737 ptid_t resume_ptid = RESUME_ALL;
1738
1739 /* Maybe resume only all threads of the current process. */
1740 if (!sched_multi && target_supports_multi_process ())
1741 {
1742 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1743 }
1744
1745 /* Maybe resume a single thread after all. */
1746 if (non_stop)
1747 {
1748 /* With non-stop mode on, threads are always handled
1749 individually. */
1750 resume_ptid = inferior_ptid;
1751 }
1752 else if ((scheduler_mode == schedlock_on)
1753 || (scheduler_mode == schedlock_step
1754 && (step || singlestep_breakpoints_inserted_p)))
1755 {
1756 /* User-settable 'scheduler' mode requires solo thread resume. */
1757 resume_ptid = inferior_ptid;
1758 }
1759
1760 return resume_ptid;
1761 }
1762
1763 /* Resume the inferior, but allow a QUIT. This is useful if the user
1764 wants to interrupt some lengthy single-stepping operation
1765 (for child processes, the SIGINT goes to the inferior, and so
1766 we get a SIGINT random_signal, but for remote debugging and perhaps
1767 other targets, that's not true).
1768
1769 STEP nonzero if we should step (zero to continue instead).
1770 SIG is the signal to give the inferior (zero for none). */
1771 void
1772 resume (int step, enum gdb_signal sig)
1773 {
1774 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1775 struct regcache *regcache = get_current_regcache ();
1776 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1777 struct thread_info *tp = inferior_thread ();
1778 CORE_ADDR pc = regcache_read_pc (regcache);
1779 struct address_space *aspace = get_regcache_aspace (regcache);
1780 ptid_t resume_ptid;
1781 /* From here on, this represents the caller's step vs continue
1782 request, while STEP represents what we'll actually request the
1783 target to do. STEP can decay from a step to a continue, if e.g.,
1784 we need to implement single-stepping with breakpoints (software
1785 single-step). When deciding whether "set scheduler-locking step"
1786 applies, it's the callers intention that counts. */
1787 const int entry_step = step;
1788
1789 QUIT;
1790
1791 if (current_inferior ()->waiting_for_vfork_done)
1792 {
1793 /* Don't try to single-step a vfork parent that is waiting for
1794 the child to get out of the shared memory region (by exec'ing
1795 or exiting). This is particularly important on software
1796 single-step archs, as the child process would trip on the
1797 software single step breakpoint inserted for the parent
1798 process. Since the parent will not actually execute any
1799 instruction until the child is out of the shared region (such
1800 are vfork's semantics), it is safe to simply continue it.
1801 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1802 the parent, and tell it to `keep_going', which automatically
1803 re-sets it stepping. */
1804 if (debug_infrun)
1805 fprintf_unfiltered (gdb_stdlog,
1806 "infrun: resume : clear step\n");
1807 step = 0;
1808 }
1809
1810 if (debug_infrun)
1811 fprintf_unfiltered (gdb_stdlog,
1812 "infrun: resume (step=%d, signal=%s), "
1813 "trap_expected=%d, current thread [%s] at %s\n",
1814 step, gdb_signal_to_symbol_string (sig),
1815 tp->control.trap_expected,
1816 target_pid_to_str (inferior_ptid),
1817 paddress (gdbarch, pc));
1818
1819 /* Normally, by the time we reach `resume', the breakpoints are either
1820 removed or inserted, as appropriate. The exception is if we're sitting
1821 at a permanent breakpoint; we need to step over it, but permanent
1822 breakpoints can't be removed. So we have to test for it here. */
1823 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1824 {
1825 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1826 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1827 else
1828 error (_("\
1829 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1830 how to step past a permanent breakpoint on this architecture. Try using\n\
1831 a command like `return' or `jump' to continue execution."));
1832 }
1833
1834 /* If we have a breakpoint to step over, make sure to do a single
1835 step only. Same if we have software watchpoints. */
1836 if (tp->control.trap_expected || bpstat_should_step ())
1837 tp->control.may_range_step = 0;
1838
1839 /* If enabled, step over breakpoints by executing a copy of the
1840 instruction at a different address.
1841
1842 We can't use displaced stepping when we have a signal to deliver;
1843 the comments for displaced_step_prepare explain why. The
1844 comments in the handle_inferior event for dealing with 'random
1845 signals' explain what we do instead.
1846
1847 We can't use displaced stepping when we are waiting for vfork_done
1848 event, displaced stepping breaks the vfork child similarly as single
1849 step software breakpoint. */
1850 if (use_displaced_stepping (gdbarch)
1851 && (tp->control.trap_expected
1852 || (step && gdbarch_software_single_step_p (gdbarch)))
1853 && sig == GDB_SIGNAL_0
1854 && !current_inferior ()->waiting_for_vfork_done)
1855 {
1856 struct displaced_step_inferior_state *displaced;
1857
1858 if (!displaced_step_prepare (inferior_ptid))
1859 {
1860 /* Got placed in displaced stepping queue. Will be resumed
1861 later when all the currently queued displaced stepping
1862 requests finish. The thread is not executing at this
1863 point, and the call to set_executing will be made later.
1864 But we need to call set_running here, since from the
1865 user/frontend's point of view, threads were set running.
1866 Unless we're calling an inferior function, as in that
1867 case we pretend the inferior doesn't run at all. */
1868 if (!tp->control.in_infcall)
1869 set_running (user_visible_resume_ptid (entry_step), 1);
1870 discard_cleanups (old_cleanups);
1871 return;
1872 }
1873
1874 /* Update pc to reflect the new address from which we will execute
1875 instructions due to displaced stepping. */
1876 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
1877
1878 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1879 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1880 displaced->step_closure);
1881 }
1882
1883 /* Do we need to do it the hard way, w/temp breakpoints? */
1884 else if (step)
1885 step = maybe_software_singlestep (gdbarch, pc);
1886
1887 /* Currently, our software single-step implementation leads to different
1888 results than hardware single-stepping in one situation: when stepping
1889 into delivering a signal which has an associated signal handler,
1890 hardware single-step will stop at the first instruction of the handler,
1891 while software single-step will simply skip execution of the handler.
1892
1893 For now, this difference in behavior is accepted since there is no
1894 easy way to actually implement single-stepping into a signal handler
1895 without kernel support.
1896
1897 However, there is one scenario where this difference leads to follow-on
1898 problems: if we're stepping off a breakpoint by removing all breakpoints
1899 and then single-stepping. In this case, the software single-step
1900 behavior means that even if there is a *breakpoint* in the signal
1901 handler, GDB still would not stop.
1902
1903 Fortunately, we can at least fix this particular issue. We detect
1904 here the case where we are about to deliver a signal while software
1905 single-stepping with breakpoints removed. In this situation, we
1906 revert the decisions to remove all breakpoints and insert single-
1907 step breakpoints, and instead we install a step-resume breakpoint
1908 at the current address, deliver the signal without stepping, and
1909 once we arrive back at the step-resume breakpoint, actually step
1910 over the breakpoint we originally wanted to step over. */
1911 if (singlestep_breakpoints_inserted_p
1912 && tp->control.trap_expected && sig != GDB_SIGNAL_0)
1913 {
1914 /* If we have nested signals or a pending signal is delivered
1915 immediately after a handler returns, might might already have
1916 a step-resume breakpoint set on the earlier handler. We cannot
1917 set another step-resume breakpoint; just continue on until the
1918 original breakpoint is hit. */
1919 if (tp->control.step_resume_breakpoint == NULL)
1920 {
1921 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1922 tp->step_after_step_resume_breakpoint = 1;
1923 }
1924
1925 remove_single_step_breakpoints ();
1926 singlestep_breakpoints_inserted_p = 0;
1927
1928 clear_step_over_info ();
1929 tp->control.trap_expected = 0;
1930
1931 insert_breakpoints ();
1932 }
1933
1934 /* If STEP is set, it's a request to use hardware stepping
1935 facilities. But in that case, we should never
1936 use singlestep breakpoint. */
1937 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1938
1939 /* Decide the set of threads to ask the target to resume. Start
1940 by assuming everything will be resumed, than narrow the set
1941 by applying increasingly restricting conditions. */
1942 resume_ptid = user_visible_resume_ptid (entry_step);
1943
1944 /* Even if RESUME_PTID is a wildcard, and we end up resuming less
1945 (e.g., we might need to step over a breakpoint), from the
1946 user/frontend's point of view, all threads in RESUME_PTID are now
1947 running. Unless we're calling an inferior function, as in that
1948 case pretend we inferior doesn't run at all. */
1949 if (!tp->control.in_infcall)
1950 set_running (resume_ptid, 1);
1951
1952 /* Maybe resume a single thread after all. */
1953 if ((step || singlestep_breakpoints_inserted_p)
1954 && tp->control.trap_expected)
1955 {
1956 /* We're allowing a thread to run past a breakpoint it has
1957 hit, by single-stepping the thread with the breakpoint
1958 removed. In which case, we need to single-step only this
1959 thread, and keep others stopped, as they can miss this
1960 breakpoint if allowed to run. */
1961 resume_ptid = inferior_ptid;
1962 }
1963
1964 if (gdbarch_cannot_step_breakpoint (gdbarch))
1965 {
1966 /* Most targets can step a breakpoint instruction, thus
1967 executing it normally. But if this one cannot, just
1968 continue and we will hit it anyway. */
1969 if (step && breakpoint_inserted_here_p (aspace, pc))
1970 step = 0;
1971 }
1972
1973 if (debug_displaced
1974 && use_displaced_stepping (gdbarch)
1975 && tp->control.trap_expected)
1976 {
1977 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1978 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1979 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1980 gdb_byte buf[4];
1981
1982 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1983 paddress (resume_gdbarch, actual_pc));
1984 read_memory (actual_pc, buf, sizeof (buf));
1985 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1986 }
1987
1988 if (tp->control.may_range_step)
1989 {
1990 /* If we're resuming a thread with the PC out of the step
1991 range, then we're doing some nested/finer run control
1992 operation, like stepping the thread out of the dynamic
1993 linker or the displaced stepping scratch pad. We
1994 shouldn't have allowed a range step then. */
1995 gdb_assert (pc_in_thread_step_range (pc, tp));
1996 }
1997
1998 /* Install inferior's terminal modes. */
1999 target_terminal_inferior ();
2000
2001 /* Avoid confusing the next resume, if the next stop/resume
2002 happens to apply to another thread. */
2003 tp->suspend.stop_signal = GDB_SIGNAL_0;
2004
2005 /* Advise target which signals may be handled silently. If we have
2006 removed breakpoints because we are stepping over one (which can
2007 happen only if we are not using displaced stepping), we need to
2008 receive all signals to avoid accidentally skipping a breakpoint
2009 during execution of a signal handler. */
2010 if ((step || singlestep_breakpoints_inserted_p)
2011 && tp->control.trap_expected
2012 && !use_displaced_stepping (gdbarch))
2013 target_pass_signals (0, NULL);
2014 else
2015 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2016
2017 target_resume (resume_ptid, step, sig);
2018
2019 discard_cleanups (old_cleanups);
2020 }
2021 \f
2022 /* Proceeding. */
2023
2024 /* Clear out all variables saying what to do when inferior is continued.
2025 First do this, then set the ones you want, then call `proceed'. */
2026
2027 static void
2028 clear_proceed_status_thread (struct thread_info *tp)
2029 {
2030 if (debug_infrun)
2031 fprintf_unfiltered (gdb_stdlog,
2032 "infrun: clear_proceed_status_thread (%s)\n",
2033 target_pid_to_str (tp->ptid));
2034
2035 tp->control.trap_expected = 0;
2036 tp->control.step_range_start = 0;
2037 tp->control.step_range_end = 0;
2038 tp->control.may_range_step = 0;
2039 tp->control.step_frame_id = null_frame_id;
2040 tp->control.step_stack_frame_id = null_frame_id;
2041 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2042 tp->stop_requested = 0;
2043
2044 tp->control.stop_step = 0;
2045
2046 tp->control.proceed_to_finish = 0;
2047
2048 tp->control.command_interp = NULL;
2049
2050 /* Discard any remaining commands or status from previous stop. */
2051 bpstat_clear (&tp->control.stop_bpstat);
2052 }
2053
2054 static int
2055 clear_proceed_status_callback (struct thread_info *tp, void *data)
2056 {
2057 if (is_exited (tp->ptid))
2058 return 0;
2059
2060 clear_proceed_status_thread (tp);
2061 return 0;
2062 }
2063
2064 void
2065 clear_proceed_status (void)
2066 {
2067 if (!non_stop)
2068 {
2069 /* In all-stop mode, delete the per-thread status of all
2070 threads, even if inferior_ptid is null_ptid, there may be
2071 threads on the list. E.g., we may be launching a new
2072 process, while selecting the executable. */
2073 iterate_over_threads (clear_proceed_status_callback, NULL);
2074 }
2075
2076 if (!ptid_equal (inferior_ptid, null_ptid))
2077 {
2078 struct inferior *inferior;
2079
2080 if (non_stop)
2081 {
2082 /* If in non-stop mode, only delete the per-thread status of
2083 the current thread. */
2084 clear_proceed_status_thread (inferior_thread ());
2085 }
2086
2087 inferior = current_inferior ();
2088 inferior->control.stop_soon = NO_STOP_QUIETLY;
2089 }
2090
2091 stop_after_trap = 0;
2092
2093 clear_step_over_info ();
2094
2095 observer_notify_about_to_proceed ();
2096
2097 if (stop_registers)
2098 {
2099 regcache_xfree (stop_registers);
2100 stop_registers = NULL;
2101 }
2102 }
2103
2104 /* Returns true if TP is still stopped at a breakpoint that needs
2105 stepping-over in order to make progress. If the breakpoint is gone
2106 meanwhile, we can skip the whole step-over dance. */
2107
2108 static int
2109 thread_still_needs_step_over (struct thread_info *tp)
2110 {
2111 if (tp->stepping_over_breakpoint)
2112 {
2113 struct regcache *regcache = get_thread_regcache (tp->ptid);
2114
2115 if (breakpoint_here_p (get_regcache_aspace (regcache),
2116 regcache_read_pc (regcache)))
2117 return 1;
2118
2119 tp->stepping_over_breakpoint = 0;
2120 }
2121
2122 return 0;
2123 }
2124
2125 /* Returns true if scheduler locking applies. STEP indicates whether
2126 we're about to do a step/next-like command to a thread. */
2127
2128 static int
2129 schedlock_applies (int step)
2130 {
2131 return (scheduler_mode == schedlock_on
2132 || (scheduler_mode == schedlock_step
2133 && step));
2134 }
2135
2136 /* Look a thread other than EXCEPT that has previously reported a
2137 breakpoint event, and thus needs a step-over in order to make
2138 progress. Returns NULL is none is found. STEP indicates whether
2139 we're about to step the current thread, in order to decide whether
2140 "set scheduler-locking step" applies. */
2141
2142 static struct thread_info *
2143 find_thread_needs_step_over (int step, struct thread_info *except)
2144 {
2145 struct thread_info *tp, *current;
2146
2147 /* With non-stop mode on, threads are always handled individually. */
2148 gdb_assert (! non_stop);
2149
2150 current = inferior_thread ();
2151
2152 /* If scheduler locking applies, we can avoid iterating over all
2153 threads. */
2154 if (schedlock_applies (step))
2155 {
2156 if (except != current
2157 && thread_still_needs_step_over (current))
2158 return current;
2159
2160 return NULL;
2161 }
2162
2163 ALL_NON_EXITED_THREADS (tp)
2164 {
2165 /* Ignore the EXCEPT thread. */
2166 if (tp == except)
2167 continue;
2168 /* Ignore threads of processes we're not resuming. */
2169 if (!sched_multi
2170 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
2171 continue;
2172
2173 if (thread_still_needs_step_over (tp))
2174 return tp;
2175 }
2176
2177 return NULL;
2178 }
2179
2180 /* Basic routine for continuing the program in various fashions.
2181
2182 ADDR is the address to resume at, or -1 for resume where stopped.
2183 SIGGNAL is the signal to give it, or 0 for none,
2184 or -1 for act according to how it stopped.
2185 STEP is nonzero if should trap after one instruction.
2186 -1 means return after that and print nothing.
2187 You should probably set various step_... variables
2188 before calling here, if you are stepping.
2189
2190 You should call clear_proceed_status before calling proceed. */
2191
2192 void
2193 proceed (CORE_ADDR addr, enum gdb_signal siggnal, int step)
2194 {
2195 struct regcache *regcache;
2196 struct gdbarch *gdbarch;
2197 struct thread_info *tp;
2198 CORE_ADDR pc;
2199 struct address_space *aspace;
2200
2201 /* If we're stopped at a fork/vfork, follow the branch set by the
2202 "set follow-fork-mode" command; otherwise, we'll just proceed
2203 resuming the current thread. */
2204 if (!follow_fork ())
2205 {
2206 /* The target for some reason decided not to resume. */
2207 normal_stop ();
2208 if (target_can_async_p ())
2209 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2210 return;
2211 }
2212
2213 /* We'll update this if & when we switch to a new thread. */
2214 previous_inferior_ptid = inferior_ptid;
2215
2216 regcache = get_current_regcache ();
2217 gdbarch = get_regcache_arch (regcache);
2218 aspace = get_regcache_aspace (regcache);
2219 pc = regcache_read_pc (regcache);
2220 tp = inferior_thread ();
2221
2222 if (step > 0)
2223 step_start_function = find_pc_function (pc);
2224 if (step < 0)
2225 stop_after_trap = 1;
2226
2227 /* Fill in with reasonable starting values. */
2228 init_thread_stepping_state (tp);
2229
2230 if (addr == (CORE_ADDR) -1)
2231 {
2232 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2233 && execution_direction != EXEC_REVERSE)
2234 /* There is a breakpoint at the address we will resume at,
2235 step one instruction before inserting breakpoints so that
2236 we do not stop right away (and report a second hit at this
2237 breakpoint).
2238
2239 Note, we don't do this in reverse, because we won't
2240 actually be executing the breakpoint insn anyway.
2241 We'll be (un-)executing the previous instruction. */
2242 tp->stepping_over_breakpoint = 1;
2243 else if (gdbarch_single_step_through_delay_p (gdbarch)
2244 && gdbarch_single_step_through_delay (gdbarch,
2245 get_current_frame ()))
2246 /* We stepped onto an instruction that needs to be stepped
2247 again before re-inserting the breakpoint, do so. */
2248 tp->stepping_over_breakpoint = 1;
2249 }
2250 else
2251 {
2252 regcache_write_pc (regcache, addr);
2253 }
2254
2255 /* Record the interpreter that issued the execution command that
2256 caused this thread to resume. If the top level interpreter is
2257 MI/async, and the execution command was a CLI command
2258 (next/step/etc.), we'll want to print stop event output to the MI
2259 console channel (the stepped-to line, etc.), as if the user
2260 entered the execution command on a real GDB console. */
2261 inferior_thread ()->control.command_interp = command_interp ();
2262
2263 if (debug_infrun)
2264 fprintf_unfiltered (gdb_stdlog,
2265 "infrun: proceed (addr=%s, signal=%s, step=%d)\n",
2266 paddress (gdbarch, addr),
2267 gdb_signal_to_symbol_string (siggnal), step);
2268
2269 if (non_stop)
2270 /* In non-stop, each thread is handled individually. The context
2271 must already be set to the right thread here. */
2272 ;
2273 else
2274 {
2275 struct thread_info *step_over;
2276
2277 /* In a multi-threaded task we may select another thread and
2278 then continue or step.
2279
2280 But if the old thread was stopped at a breakpoint, it will
2281 immediately cause another breakpoint stop without any
2282 execution (i.e. it will report a breakpoint hit incorrectly).
2283 So we must step over it first.
2284
2285 Look for a thread other than the current (TP) that reported a
2286 breakpoint hit and hasn't been resumed yet since. */
2287 step_over = find_thread_needs_step_over (step, tp);
2288 if (step_over != NULL)
2289 {
2290 if (debug_infrun)
2291 fprintf_unfiltered (gdb_stdlog,
2292 "infrun: need to step-over [%s] first\n",
2293 target_pid_to_str (step_over->ptid));
2294
2295 /* Store the prev_pc for the stepping thread too, needed by
2296 switch_back_to_stepping thread. */
2297 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2298 switch_to_thread (step_over->ptid);
2299 tp = step_over;
2300 }
2301 }
2302
2303 /* If we need to step over a breakpoint, and we're not using
2304 displaced stepping to do so, insert all breakpoints (watchpoints,
2305 etc.) but the one we're stepping over, step one instruction, and
2306 then re-insert the breakpoint when that step is finished. */
2307 if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
2308 {
2309 struct regcache *regcache = get_current_regcache ();
2310
2311 set_step_over_info (get_regcache_aspace (regcache),
2312 regcache_read_pc (regcache));
2313 }
2314 else
2315 clear_step_over_info ();
2316
2317 insert_breakpoints ();
2318
2319 tp->control.trap_expected = tp->stepping_over_breakpoint;
2320
2321 if (!non_stop)
2322 {
2323 /* Pass the last stop signal to the thread we're resuming,
2324 irrespective of whether the current thread is the thread that
2325 got the last event or not. This was historically GDB's
2326 behaviour before keeping a stop_signal per thread. */
2327
2328 struct thread_info *last_thread;
2329 ptid_t last_ptid;
2330 struct target_waitstatus last_status;
2331
2332 get_last_target_status (&last_ptid, &last_status);
2333 if (!ptid_equal (inferior_ptid, last_ptid)
2334 && !ptid_equal (last_ptid, null_ptid)
2335 && !ptid_equal (last_ptid, minus_one_ptid))
2336 {
2337 last_thread = find_thread_ptid (last_ptid);
2338 if (last_thread)
2339 {
2340 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2341 last_thread->suspend.stop_signal = GDB_SIGNAL_0;
2342 }
2343 }
2344 }
2345
2346 if (siggnal != GDB_SIGNAL_DEFAULT)
2347 tp->suspend.stop_signal = siggnal;
2348 /* If this signal should not be seen by program,
2349 give it zero. Used for debugging signals. */
2350 else if (!signal_program[tp->suspend.stop_signal])
2351 tp->suspend.stop_signal = GDB_SIGNAL_0;
2352
2353 annotate_starting ();
2354
2355 /* Make sure that output from GDB appears before output from the
2356 inferior. */
2357 gdb_flush (gdb_stdout);
2358
2359 /* Refresh prev_pc value just prior to resuming. This used to be
2360 done in stop_waiting, however, setting prev_pc there did not handle
2361 scenarios such as inferior function calls or returning from
2362 a function via the return command. In those cases, the prev_pc
2363 value was not set properly for subsequent commands. The prev_pc value
2364 is used to initialize the starting line number in the ecs. With an
2365 invalid value, the gdb next command ends up stopping at the position
2366 represented by the next line table entry past our start position.
2367 On platforms that generate one line table entry per line, this
2368 is not a problem. However, on the ia64, the compiler generates
2369 extraneous line table entries that do not increase the line number.
2370 When we issue the gdb next command on the ia64 after an inferior call
2371 or a return command, we often end up a few instructions forward, still
2372 within the original line we started.
2373
2374 An attempt was made to refresh the prev_pc at the same time the
2375 execution_control_state is initialized (for instance, just before
2376 waiting for an inferior event). But this approach did not work
2377 because of platforms that use ptrace, where the pc register cannot
2378 be read unless the inferior is stopped. At that point, we are not
2379 guaranteed the inferior is stopped and so the regcache_read_pc() call
2380 can fail. Setting the prev_pc value here ensures the value is updated
2381 correctly when the inferior is stopped. */
2382 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2383
2384 /* Reset to normal state. */
2385 init_infwait_state ();
2386
2387 /* Resume inferior. */
2388 resume (tp->control.trap_expected || step || bpstat_should_step (),
2389 tp->suspend.stop_signal);
2390
2391 /* Wait for it to stop (if not standalone)
2392 and in any case decode why it stopped, and act accordingly. */
2393 /* Do this only if we are not using the event loop, or if the target
2394 does not support asynchronous execution. */
2395 if (!target_can_async_p ())
2396 {
2397 wait_for_inferior ();
2398 normal_stop ();
2399 }
2400 }
2401 \f
2402
2403 /* Start remote-debugging of a machine over a serial link. */
2404
2405 void
2406 start_remote (int from_tty)
2407 {
2408 struct inferior *inferior;
2409
2410 inferior = current_inferior ();
2411 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2412
2413 /* Always go on waiting for the target, regardless of the mode. */
2414 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2415 indicate to wait_for_inferior that a target should timeout if
2416 nothing is returned (instead of just blocking). Because of this,
2417 targets expecting an immediate response need to, internally, set
2418 things up so that the target_wait() is forced to eventually
2419 timeout. */
2420 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2421 differentiate to its caller what the state of the target is after
2422 the initial open has been performed. Here we're assuming that
2423 the target has stopped. It should be possible to eventually have
2424 target_open() return to the caller an indication that the target
2425 is currently running and GDB state should be set to the same as
2426 for an async run. */
2427 wait_for_inferior ();
2428
2429 /* Now that the inferior has stopped, do any bookkeeping like
2430 loading shared libraries. We want to do this before normal_stop,
2431 so that the displayed frame is up to date. */
2432 post_create_inferior (&current_target, from_tty);
2433
2434 normal_stop ();
2435 }
2436
2437 /* Initialize static vars when a new inferior begins. */
2438
2439 void
2440 init_wait_for_inferior (void)
2441 {
2442 /* These are meaningless until the first time through wait_for_inferior. */
2443
2444 breakpoint_init_inferior (inf_starting);
2445
2446 clear_proceed_status ();
2447
2448 target_last_wait_ptid = minus_one_ptid;
2449
2450 previous_inferior_ptid = inferior_ptid;
2451 init_infwait_state ();
2452
2453 /* Discard any skipped inlined frames. */
2454 clear_inline_frame_state (minus_one_ptid);
2455
2456 singlestep_ptid = null_ptid;
2457 singlestep_pc = 0;
2458 }
2459
2460 \f
2461 /* This enum encodes possible reasons for doing a target_wait, so that
2462 wfi can call target_wait in one place. (Ultimately the call will be
2463 moved out of the infinite loop entirely.) */
2464
2465 enum infwait_states
2466 {
2467 infwait_normal_state,
2468 infwait_step_watch_state,
2469 infwait_nonstep_watch_state
2470 };
2471
2472 /* The PTID we'll do a target_wait on.*/
2473 ptid_t waiton_ptid;
2474
2475 /* Current inferior wait state. */
2476 static enum infwait_states infwait_state;
2477
2478 /* Data to be passed around while handling an event. This data is
2479 discarded between events. */
2480 struct execution_control_state
2481 {
2482 ptid_t ptid;
2483 /* The thread that got the event, if this was a thread event; NULL
2484 otherwise. */
2485 struct thread_info *event_thread;
2486
2487 struct target_waitstatus ws;
2488 int stop_func_filled_in;
2489 CORE_ADDR stop_func_start;
2490 CORE_ADDR stop_func_end;
2491 const char *stop_func_name;
2492 int wait_some_more;
2493
2494 /* We were in infwait_step_watch_state or
2495 infwait_nonstep_watch_state state, and the thread reported an
2496 event. */
2497 int stepped_after_stopped_by_watchpoint;
2498
2499 /* True if the event thread hit the single-step breakpoint of
2500 another thread. Thus the event doesn't cause a stop, the thread
2501 needs to be single-stepped past the single-step breakpoint before
2502 we can switch back to the original stepping thread. */
2503 int hit_singlestep_breakpoint;
2504 };
2505
2506 static void handle_inferior_event (struct execution_control_state *ecs);
2507
2508 static void handle_step_into_function (struct gdbarch *gdbarch,
2509 struct execution_control_state *ecs);
2510 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2511 struct execution_control_state *ecs);
2512 static void handle_signal_stop (struct execution_control_state *ecs);
2513 static void check_exception_resume (struct execution_control_state *,
2514 struct frame_info *);
2515
2516 static void end_stepping_range (struct execution_control_state *ecs);
2517 static void stop_waiting (struct execution_control_state *ecs);
2518 static void prepare_to_wait (struct execution_control_state *ecs);
2519 static void keep_going (struct execution_control_state *ecs);
2520 static void process_event_stop_test (struct execution_control_state *ecs);
2521 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2522
2523 /* Callback for iterate over threads. If the thread is stopped, but
2524 the user/frontend doesn't know about that yet, go through
2525 normal_stop, as if the thread had just stopped now. ARG points at
2526 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2527 ptid_is_pid(PTID) is true, applies to all threads of the process
2528 pointed at by PTID. Otherwise, apply only to the thread pointed by
2529 PTID. */
2530
2531 static int
2532 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2533 {
2534 ptid_t ptid = * (ptid_t *) arg;
2535
2536 if ((ptid_equal (info->ptid, ptid)
2537 || ptid_equal (minus_one_ptid, ptid)
2538 || (ptid_is_pid (ptid)
2539 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2540 && is_running (info->ptid)
2541 && !is_executing (info->ptid))
2542 {
2543 struct cleanup *old_chain;
2544 struct execution_control_state ecss;
2545 struct execution_control_state *ecs = &ecss;
2546
2547 memset (ecs, 0, sizeof (*ecs));
2548
2549 old_chain = make_cleanup_restore_current_thread ();
2550
2551 overlay_cache_invalid = 1;
2552 /* Flush target cache before starting to handle each event.
2553 Target was running and cache could be stale. This is just a
2554 heuristic. Running threads may modify target memory, but we
2555 don't get any event. */
2556 target_dcache_invalidate ();
2557
2558 /* Go through handle_inferior_event/normal_stop, so we always
2559 have consistent output as if the stop event had been
2560 reported. */
2561 ecs->ptid = info->ptid;
2562 ecs->event_thread = find_thread_ptid (info->ptid);
2563 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2564 ecs->ws.value.sig = GDB_SIGNAL_0;
2565
2566 handle_inferior_event (ecs);
2567
2568 if (!ecs->wait_some_more)
2569 {
2570 struct thread_info *tp;
2571
2572 normal_stop ();
2573
2574 /* Finish off the continuations. */
2575 tp = inferior_thread ();
2576 do_all_intermediate_continuations_thread (tp, 1);
2577 do_all_continuations_thread (tp, 1);
2578 }
2579
2580 do_cleanups (old_chain);
2581 }
2582
2583 return 0;
2584 }
2585
2586 /* This function is attached as a "thread_stop_requested" observer.
2587 Cleanup local state that assumed the PTID was to be resumed, and
2588 report the stop to the frontend. */
2589
2590 static void
2591 infrun_thread_stop_requested (ptid_t ptid)
2592 {
2593 struct displaced_step_inferior_state *displaced;
2594
2595 /* PTID was requested to stop. Remove it from the displaced
2596 stepping queue, so we don't try to resume it automatically. */
2597
2598 for (displaced = displaced_step_inferior_states;
2599 displaced;
2600 displaced = displaced->next)
2601 {
2602 struct displaced_step_request *it, **prev_next_p;
2603
2604 it = displaced->step_request_queue;
2605 prev_next_p = &displaced->step_request_queue;
2606 while (it)
2607 {
2608 if (ptid_match (it->ptid, ptid))
2609 {
2610 *prev_next_p = it->next;
2611 it->next = NULL;
2612 xfree (it);
2613 }
2614 else
2615 {
2616 prev_next_p = &it->next;
2617 }
2618
2619 it = *prev_next_p;
2620 }
2621 }
2622
2623 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2624 }
2625
2626 static void
2627 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2628 {
2629 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2630 nullify_last_target_wait_ptid ();
2631 }
2632
2633 /* Callback for iterate_over_threads. */
2634
2635 static int
2636 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2637 {
2638 if (is_exited (info->ptid))
2639 return 0;
2640
2641 delete_step_resume_breakpoint (info);
2642 delete_exception_resume_breakpoint (info);
2643 return 0;
2644 }
2645
2646 /* In all-stop, delete the step resume breakpoint of any thread that
2647 had one. In non-stop, delete the step resume breakpoint of the
2648 thread that just stopped. */
2649
2650 static void
2651 delete_step_thread_step_resume_breakpoint (void)
2652 {
2653 if (!target_has_execution
2654 || ptid_equal (inferior_ptid, null_ptid))
2655 /* If the inferior has exited, we have already deleted the step
2656 resume breakpoints out of GDB's lists. */
2657 return;
2658
2659 if (non_stop)
2660 {
2661 /* If in non-stop mode, only delete the step-resume or
2662 longjmp-resume breakpoint of the thread that just stopped
2663 stepping. */
2664 struct thread_info *tp = inferior_thread ();
2665
2666 delete_step_resume_breakpoint (tp);
2667 delete_exception_resume_breakpoint (tp);
2668 }
2669 else
2670 /* In all-stop mode, delete all step-resume and longjmp-resume
2671 breakpoints of any thread that had them. */
2672 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2673 }
2674
2675 /* A cleanup wrapper. */
2676
2677 static void
2678 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2679 {
2680 delete_step_thread_step_resume_breakpoint ();
2681 }
2682
2683 /* Pretty print the results of target_wait, for debugging purposes. */
2684
2685 static void
2686 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2687 const struct target_waitstatus *ws)
2688 {
2689 char *status_string = target_waitstatus_to_string (ws);
2690 struct ui_file *tmp_stream = mem_fileopen ();
2691 char *text;
2692
2693 /* The text is split over several lines because it was getting too long.
2694 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2695 output as a unit; we want only one timestamp printed if debug_timestamp
2696 is set. */
2697
2698 fprintf_unfiltered (tmp_stream,
2699 "infrun: target_wait (%d", ptid_get_pid (waiton_ptid));
2700 if (ptid_get_pid (waiton_ptid) != -1)
2701 fprintf_unfiltered (tmp_stream,
2702 " [%s]", target_pid_to_str (waiton_ptid));
2703 fprintf_unfiltered (tmp_stream, ", status) =\n");
2704 fprintf_unfiltered (tmp_stream,
2705 "infrun: %d [%s],\n",
2706 ptid_get_pid (result_ptid),
2707 target_pid_to_str (result_ptid));
2708 fprintf_unfiltered (tmp_stream,
2709 "infrun: %s\n",
2710 status_string);
2711
2712 text = ui_file_xstrdup (tmp_stream, NULL);
2713
2714 /* This uses %s in part to handle %'s in the text, but also to avoid
2715 a gcc error: the format attribute requires a string literal. */
2716 fprintf_unfiltered (gdb_stdlog, "%s", text);
2717
2718 xfree (status_string);
2719 xfree (text);
2720 ui_file_delete (tmp_stream);
2721 }
2722
2723 /* Prepare and stabilize the inferior for detaching it. E.g.,
2724 detaching while a thread is displaced stepping is a recipe for
2725 crashing it, as nothing would readjust the PC out of the scratch
2726 pad. */
2727
2728 void
2729 prepare_for_detach (void)
2730 {
2731 struct inferior *inf = current_inferior ();
2732 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2733 struct cleanup *old_chain_1;
2734 struct displaced_step_inferior_state *displaced;
2735
2736 displaced = get_displaced_stepping_state (inf->pid);
2737
2738 /* Is any thread of this process displaced stepping? If not,
2739 there's nothing else to do. */
2740 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2741 return;
2742
2743 if (debug_infrun)
2744 fprintf_unfiltered (gdb_stdlog,
2745 "displaced-stepping in-process while detaching");
2746
2747 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2748 inf->detaching = 1;
2749
2750 while (!ptid_equal (displaced->step_ptid, null_ptid))
2751 {
2752 struct cleanup *old_chain_2;
2753 struct execution_control_state ecss;
2754 struct execution_control_state *ecs;
2755
2756 ecs = &ecss;
2757 memset (ecs, 0, sizeof (*ecs));
2758
2759 overlay_cache_invalid = 1;
2760 /* Flush target cache before starting to handle each event.
2761 Target was running and cache could be stale. This is just a
2762 heuristic. Running threads may modify target memory, but we
2763 don't get any event. */
2764 target_dcache_invalidate ();
2765
2766 if (deprecated_target_wait_hook)
2767 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2768 else
2769 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2770
2771 if (debug_infrun)
2772 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2773
2774 /* If an error happens while handling the event, propagate GDB's
2775 knowledge of the executing state to the frontend/user running
2776 state. */
2777 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2778 &minus_one_ptid);
2779
2780 /* Now figure out what to do with the result of the result. */
2781 handle_inferior_event (ecs);
2782
2783 /* No error, don't finish the state yet. */
2784 discard_cleanups (old_chain_2);
2785
2786 /* Breakpoints and watchpoints are not installed on the target
2787 at this point, and signals are passed directly to the
2788 inferior, so this must mean the process is gone. */
2789 if (!ecs->wait_some_more)
2790 {
2791 discard_cleanups (old_chain_1);
2792 error (_("Program exited while detaching"));
2793 }
2794 }
2795
2796 discard_cleanups (old_chain_1);
2797 }
2798
2799 /* Wait for control to return from inferior to debugger.
2800
2801 If inferior gets a signal, we may decide to start it up again
2802 instead of returning. That is why there is a loop in this function.
2803 When this function actually returns it means the inferior
2804 should be left stopped and GDB should read more commands. */
2805
2806 void
2807 wait_for_inferior (void)
2808 {
2809 struct cleanup *old_cleanups;
2810
2811 if (debug_infrun)
2812 fprintf_unfiltered
2813 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2814
2815 old_cleanups =
2816 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2817
2818 while (1)
2819 {
2820 struct execution_control_state ecss;
2821 struct execution_control_state *ecs = &ecss;
2822 struct cleanup *old_chain;
2823
2824 memset (ecs, 0, sizeof (*ecs));
2825
2826 overlay_cache_invalid = 1;
2827
2828 /* Flush target cache before starting to handle each event.
2829 Target was running and cache could be stale. This is just a
2830 heuristic. Running threads may modify target memory, but we
2831 don't get any event. */
2832 target_dcache_invalidate ();
2833
2834 if (deprecated_target_wait_hook)
2835 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2836 else
2837 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2838
2839 if (debug_infrun)
2840 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2841
2842 /* If an error happens while handling the event, propagate GDB's
2843 knowledge of the executing state to the frontend/user running
2844 state. */
2845 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2846
2847 /* Now figure out what to do with the result of the result. */
2848 handle_inferior_event (ecs);
2849
2850 /* No error, don't finish the state yet. */
2851 discard_cleanups (old_chain);
2852
2853 if (!ecs->wait_some_more)
2854 break;
2855 }
2856
2857 do_cleanups (old_cleanups);
2858 }
2859
2860 /* Asynchronous version of wait_for_inferior. It is called by the
2861 event loop whenever a change of state is detected on the file
2862 descriptor corresponding to the target. It can be called more than
2863 once to complete a single execution command. In such cases we need
2864 to keep the state in a global variable ECSS. If it is the last time
2865 that this function is called for a single execution command, then
2866 report to the user that the inferior has stopped, and do the
2867 necessary cleanups. */
2868
2869 void
2870 fetch_inferior_event (void *client_data)
2871 {
2872 struct execution_control_state ecss;
2873 struct execution_control_state *ecs = &ecss;
2874 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2875 struct cleanup *ts_old_chain;
2876 int was_sync = sync_execution;
2877 int cmd_done = 0;
2878
2879 memset (ecs, 0, sizeof (*ecs));
2880
2881 /* We're handling a live event, so make sure we're doing live
2882 debugging. If we're looking at traceframes while the target is
2883 running, we're going to need to get back to that mode after
2884 handling the event. */
2885 if (non_stop)
2886 {
2887 make_cleanup_restore_current_traceframe ();
2888 set_current_traceframe (-1);
2889 }
2890
2891 if (non_stop)
2892 /* In non-stop mode, the user/frontend should not notice a thread
2893 switch due to internal events. Make sure we reverse to the
2894 user selected thread and frame after handling the event and
2895 running any breakpoint commands. */
2896 make_cleanup_restore_current_thread ();
2897
2898 overlay_cache_invalid = 1;
2899 /* Flush target cache before starting to handle each event. Target
2900 was running and cache could be stale. This is just a heuristic.
2901 Running threads may modify target memory, but we don't get any
2902 event. */
2903 target_dcache_invalidate ();
2904
2905 make_cleanup_restore_integer (&execution_direction);
2906 execution_direction = target_execution_direction ();
2907
2908 if (deprecated_target_wait_hook)
2909 ecs->ptid =
2910 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2911 else
2912 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2913
2914 if (debug_infrun)
2915 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2916
2917 /* If an error happens while handling the event, propagate GDB's
2918 knowledge of the executing state to the frontend/user running
2919 state. */
2920 if (!non_stop)
2921 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2922 else
2923 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2924
2925 /* Get executed before make_cleanup_restore_current_thread above to apply
2926 still for the thread which has thrown the exception. */
2927 make_bpstat_clear_actions_cleanup ();
2928
2929 /* Now figure out what to do with the result of the result. */
2930 handle_inferior_event (ecs);
2931
2932 if (!ecs->wait_some_more)
2933 {
2934 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2935
2936 delete_step_thread_step_resume_breakpoint ();
2937
2938 /* We may not find an inferior if this was a process exit. */
2939 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2940 normal_stop ();
2941
2942 if (target_has_execution
2943 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2944 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2945 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2946 && ecs->event_thread->step_multi
2947 && ecs->event_thread->control.stop_step)
2948 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2949 else
2950 {
2951 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2952 cmd_done = 1;
2953 }
2954 }
2955
2956 /* No error, don't finish the thread states yet. */
2957 discard_cleanups (ts_old_chain);
2958
2959 /* Revert thread and frame. */
2960 do_cleanups (old_chain);
2961
2962 /* If the inferior was in sync execution mode, and now isn't,
2963 restore the prompt (a synchronous execution command has finished,
2964 and we're ready for input). */
2965 if (interpreter_async && was_sync && !sync_execution)
2966 observer_notify_sync_execution_done ();
2967
2968 if (cmd_done
2969 && !was_sync
2970 && exec_done_display_p
2971 && (ptid_equal (inferior_ptid, null_ptid)
2972 || !is_running (inferior_ptid)))
2973 printf_unfiltered (_("completed.\n"));
2974 }
2975
2976 /* Record the frame and location we're currently stepping through. */
2977 void
2978 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2979 {
2980 struct thread_info *tp = inferior_thread ();
2981
2982 tp->control.step_frame_id = get_frame_id (frame);
2983 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2984
2985 tp->current_symtab = sal.symtab;
2986 tp->current_line = sal.line;
2987 }
2988
2989 /* Clear context switchable stepping state. */
2990
2991 void
2992 init_thread_stepping_state (struct thread_info *tss)
2993 {
2994 tss->stepping_over_breakpoint = 0;
2995 tss->step_after_step_resume_breakpoint = 0;
2996 }
2997
2998 /* Set the cached copy of the last ptid/waitstatus. */
2999
3000 static void
3001 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
3002 {
3003 target_last_wait_ptid = ptid;
3004 target_last_waitstatus = status;
3005 }
3006
3007 /* Return the cached copy of the last pid/waitstatus returned by
3008 target_wait()/deprecated_target_wait_hook(). The data is actually
3009 cached by handle_inferior_event(), which gets called immediately
3010 after target_wait()/deprecated_target_wait_hook(). */
3011
3012 void
3013 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
3014 {
3015 *ptidp = target_last_wait_ptid;
3016 *status = target_last_waitstatus;
3017 }
3018
3019 void
3020 nullify_last_target_wait_ptid (void)
3021 {
3022 target_last_wait_ptid = minus_one_ptid;
3023 }
3024
3025 /* Switch thread contexts. */
3026
3027 static void
3028 context_switch (ptid_t ptid)
3029 {
3030 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
3031 {
3032 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3033 target_pid_to_str (inferior_ptid));
3034 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3035 target_pid_to_str (ptid));
3036 }
3037
3038 switch_to_thread (ptid);
3039 }
3040
3041 static void
3042 adjust_pc_after_break (struct execution_control_state *ecs)
3043 {
3044 struct regcache *regcache;
3045 struct gdbarch *gdbarch;
3046 struct address_space *aspace;
3047 CORE_ADDR breakpoint_pc, decr_pc;
3048
3049 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3050 we aren't, just return.
3051
3052 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3053 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3054 implemented by software breakpoints should be handled through the normal
3055 breakpoint layer.
3056
3057 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3058 different signals (SIGILL or SIGEMT for instance), but it is less
3059 clear where the PC is pointing afterwards. It may not match
3060 gdbarch_decr_pc_after_break. I don't know any specific target that
3061 generates these signals at breakpoints (the code has been in GDB since at
3062 least 1992) so I can not guess how to handle them here.
3063
3064 In earlier versions of GDB, a target with
3065 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3066 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3067 target with both of these set in GDB history, and it seems unlikely to be
3068 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3069
3070 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3071 return;
3072
3073 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3074 return;
3075
3076 /* In reverse execution, when a breakpoint is hit, the instruction
3077 under it has already been de-executed. The reported PC always
3078 points at the breakpoint address, so adjusting it further would
3079 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3080 architecture:
3081
3082 B1 0x08000000 : INSN1
3083 B2 0x08000001 : INSN2
3084 0x08000002 : INSN3
3085 PC -> 0x08000003 : INSN4
3086
3087 Say you're stopped at 0x08000003 as above. Reverse continuing
3088 from that point should hit B2 as below. Reading the PC when the
3089 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3090 been de-executed already.
3091
3092 B1 0x08000000 : INSN1
3093 B2 PC -> 0x08000001 : INSN2
3094 0x08000002 : INSN3
3095 0x08000003 : INSN4
3096
3097 We can't apply the same logic as for forward execution, because
3098 we would wrongly adjust the PC to 0x08000000, since there's a
3099 breakpoint at PC - 1. We'd then report a hit on B1, although
3100 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3101 behaviour. */
3102 if (execution_direction == EXEC_REVERSE)
3103 return;
3104
3105 /* If this target does not decrement the PC after breakpoints, then
3106 we have nothing to do. */
3107 regcache = get_thread_regcache (ecs->ptid);
3108 gdbarch = get_regcache_arch (regcache);
3109
3110 decr_pc = target_decr_pc_after_break (gdbarch);
3111 if (decr_pc == 0)
3112 return;
3113
3114 aspace = get_regcache_aspace (regcache);
3115
3116 /* Find the location where (if we've hit a breakpoint) the
3117 breakpoint would be. */
3118 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3119
3120 /* Check whether there actually is a software breakpoint inserted at
3121 that location.
3122
3123 If in non-stop mode, a race condition is possible where we've
3124 removed a breakpoint, but stop events for that breakpoint were
3125 already queued and arrive later. To suppress those spurious
3126 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3127 and retire them after a number of stop events are reported. */
3128 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3129 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3130 {
3131 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3132
3133 if (record_full_is_used ())
3134 record_full_gdb_operation_disable_set ();
3135
3136 /* When using hardware single-step, a SIGTRAP is reported for both
3137 a completed single-step and a software breakpoint. Need to
3138 differentiate between the two, as the latter needs adjusting
3139 but the former does not.
3140
3141 The SIGTRAP can be due to a completed hardware single-step only if
3142 - we didn't insert software single-step breakpoints
3143 - the thread to be examined is still the current thread
3144 - this thread is currently being stepped
3145
3146 If any of these events did not occur, we must have stopped due
3147 to hitting a software breakpoint, and have to back up to the
3148 breakpoint address.
3149
3150 As a special case, we could have hardware single-stepped a
3151 software breakpoint. In this case (prev_pc == breakpoint_pc),
3152 we also need to back up to the breakpoint address. */
3153
3154 if (singlestep_breakpoints_inserted_p
3155 || !ptid_equal (ecs->ptid, inferior_ptid)
3156 || !currently_stepping (ecs->event_thread)
3157 || ecs->event_thread->prev_pc == breakpoint_pc)
3158 regcache_write_pc (regcache, breakpoint_pc);
3159
3160 do_cleanups (old_cleanups);
3161 }
3162 }
3163
3164 static void
3165 init_infwait_state (void)
3166 {
3167 waiton_ptid = pid_to_ptid (-1);
3168 infwait_state = infwait_normal_state;
3169 }
3170
3171 static int
3172 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3173 {
3174 for (frame = get_prev_frame (frame);
3175 frame != NULL;
3176 frame = get_prev_frame (frame))
3177 {
3178 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3179 return 1;
3180 if (get_frame_type (frame) != INLINE_FRAME)
3181 break;
3182 }
3183
3184 return 0;
3185 }
3186
3187 /* Auxiliary function that handles syscall entry/return events.
3188 It returns 1 if the inferior should keep going (and GDB
3189 should ignore the event), or 0 if the event deserves to be
3190 processed. */
3191
3192 static int
3193 handle_syscall_event (struct execution_control_state *ecs)
3194 {
3195 struct regcache *regcache;
3196 int syscall_number;
3197
3198 if (!ptid_equal (ecs->ptid, inferior_ptid))
3199 context_switch (ecs->ptid);
3200
3201 regcache = get_thread_regcache (ecs->ptid);
3202 syscall_number = ecs->ws.value.syscall_number;
3203 stop_pc = regcache_read_pc (regcache);
3204
3205 if (catch_syscall_enabled () > 0
3206 && catching_syscall_number (syscall_number) > 0)
3207 {
3208 if (debug_infrun)
3209 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3210 syscall_number);
3211
3212 ecs->event_thread->control.stop_bpstat
3213 = bpstat_stop_status (get_regcache_aspace (regcache),
3214 stop_pc, ecs->ptid, &ecs->ws);
3215
3216 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3217 {
3218 /* Catchpoint hit. */
3219 return 0;
3220 }
3221 }
3222
3223 /* If no catchpoint triggered for this, then keep going. */
3224 keep_going (ecs);
3225 return 1;
3226 }
3227
3228 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3229
3230 static void
3231 fill_in_stop_func (struct gdbarch *gdbarch,
3232 struct execution_control_state *ecs)
3233 {
3234 if (!ecs->stop_func_filled_in)
3235 {
3236 /* Don't care about return value; stop_func_start and stop_func_name
3237 will both be 0 if it doesn't work. */
3238 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3239 &ecs->stop_func_start, &ecs->stop_func_end);
3240 ecs->stop_func_start
3241 += gdbarch_deprecated_function_start_offset (gdbarch);
3242
3243 if (gdbarch_skip_entrypoint_p (gdbarch))
3244 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3245 ecs->stop_func_start);
3246
3247 ecs->stop_func_filled_in = 1;
3248 }
3249 }
3250
3251
3252 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3253
3254 static enum stop_kind
3255 get_inferior_stop_soon (ptid_t ptid)
3256 {
3257 struct inferior *inf = find_inferior_pid (ptid_get_pid (ptid));
3258
3259 gdb_assert (inf != NULL);
3260 return inf->control.stop_soon;
3261 }
3262
3263 /* Given an execution control state that has been freshly filled in by
3264 an event from the inferior, figure out what it means and take
3265 appropriate action.
3266
3267 The alternatives are:
3268
3269 1) stop_waiting and return; to really stop and return to the
3270 debugger.
3271
3272 2) keep_going and return; to wait for the next event (set
3273 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3274 once). */
3275
3276 static void
3277 handle_inferior_event (struct execution_control_state *ecs)
3278 {
3279 enum stop_kind stop_soon;
3280
3281 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3282 {
3283 /* We had an event in the inferior, but we are not interested in
3284 handling it at this level. The lower layers have already
3285 done what needs to be done, if anything.
3286
3287 One of the possible circumstances for this is when the
3288 inferior produces output for the console. The inferior has
3289 not stopped, and we are ignoring the event. Another possible
3290 circumstance is any event which the lower level knows will be
3291 reported multiple times without an intervening resume. */
3292 if (debug_infrun)
3293 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3294 prepare_to_wait (ecs);
3295 return;
3296 }
3297
3298 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3299 && target_can_async_p () && !sync_execution)
3300 {
3301 /* There were no unwaited-for children left in the target, but,
3302 we're not synchronously waiting for events either. Just
3303 ignore. Otherwise, if we were running a synchronous
3304 execution command, we need to cancel it and give the user
3305 back the terminal. */
3306 if (debug_infrun)
3307 fprintf_unfiltered (gdb_stdlog,
3308 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3309 prepare_to_wait (ecs);
3310 return;
3311 }
3312
3313 /* Cache the last pid/waitstatus. */
3314 set_last_target_status (ecs->ptid, ecs->ws);
3315
3316 /* Always clear state belonging to the previous time we stopped. */
3317 stop_stack_dummy = STOP_NONE;
3318
3319 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3320 {
3321 /* No unwaited-for children left. IOW, all resumed children
3322 have exited. */
3323 if (debug_infrun)
3324 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3325
3326 stop_print_frame = 0;
3327 stop_waiting (ecs);
3328 return;
3329 }
3330
3331 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3332 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3333 {
3334 ecs->event_thread = find_thread_ptid (ecs->ptid);
3335 /* If it's a new thread, add it to the thread database. */
3336 if (ecs->event_thread == NULL)
3337 ecs->event_thread = add_thread (ecs->ptid);
3338
3339 /* Disable range stepping. If the next step request could use a
3340 range, this will be end up re-enabled then. */
3341 ecs->event_thread->control.may_range_step = 0;
3342 }
3343
3344 /* Dependent on valid ECS->EVENT_THREAD. */
3345 adjust_pc_after_break (ecs);
3346
3347 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3348 reinit_frame_cache ();
3349
3350 breakpoint_retire_moribund ();
3351
3352 /* First, distinguish signals caused by the debugger from signals
3353 that have to do with the program's own actions. Note that
3354 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3355 on the operating system version. Here we detect when a SIGILL or
3356 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3357 something similar for SIGSEGV, since a SIGSEGV will be generated
3358 when we're trying to execute a breakpoint instruction on a
3359 non-executable stack. This happens for call dummy breakpoints
3360 for architectures like SPARC that place call dummies on the
3361 stack. */
3362 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3363 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3364 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3365 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3366 {
3367 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3368
3369 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3370 regcache_read_pc (regcache)))
3371 {
3372 if (debug_infrun)
3373 fprintf_unfiltered (gdb_stdlog,
3374 "infrun: Treating signal as SIGTRAP\n");
3375 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3376 }
3377 }
3378
3379 /* Mark the non-executing threads accordingly. In all-stop, all
3380 threads of all processes are stopped when we get any event
3381 reported. In non-stop mode, only the event thread stops. If
3382 we're handling a process exit in non-stop mode, there's nothing
3383 to do, as threads of the dead process are gone, and threads of
3384 any other process were left running. */
3385 if (!non_stop)
3386 set_executing (minus_one_ptid, 0);
3387 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3388 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3389 set_executing (ecs->ptid, 0);
3390
3391 switch (infwait_state)
3392 {
3393 case infwait_normal_state:
3394 if (debug_infrun)
3395 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3396 break;
3397
3398 case infwait_step_watch_state:
3399 if (debug_infrun)
3400 fprintf_unfiltered (gdb_stdlog,
3401 "infrun: infwait_step_watch_state\n");
3402
3403 ecs->stepped_after_stopped_by_watchpoint = 1;
3404 break;
3405
3406 case infwait_nonstep_watch_state:
3407 if (debug_infrun)
3408 fprintf_unfiltered (gdb_stdlog,
3409 "infrun: infwait_nonstep_watch_state\n");
3410 insert_breakpoints ();
3411
3412 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3413 handle things like signals arriving and other things happening
3414 in combination correctly? */
3415 ecs->stepped_after_stopped_by_watchpoint = 1;
3416 break;
3417
3418 default:
3419 internal_error (__FILE__, __LINE__, _("bad switch"));
3420 }
3421
3422 infwait_state = infwait_normal_state;
3423 waiton_ptid = pid_to_ptid (-1);
3424
3425 switch (ecs->ws.kind)
3426 {
3427 case TARGET_WAITKIND_LOADED:
3428 if (debug_infrun)
3429 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3430 if (!ptid_equal (ecs->ptid, inferior_ptid))
3431 context_switch (ecs->ptid);
3432 /* Ignore gracefully during startup of the inferior, as it might
3433 be the shell which has just loaded some objects, otherwise
3434 add the symbols for the newly loaded objects. Also ignore at
3435 the beginning of an attach or remote session; we will query
3436 the full list of libraries once the connection is
3437 established. */
3438
3439 stop_soon = get_inferior_stop_soon (ecs->ptid);
3440 if (stop_soon == NO_STOP_QUIETLY)
3441 {
3442 struct regcache *regcache;
3443
3444 regcache = get_thread_regcache (ecs->ptid);
3445
3446 handle_solib_event ();
3447
3448 ecs->event_thread->control.stop_bpstat
3449 = bpstat_stop_status (get_regcache_aspace (regcache),
3450 stop_pc, ecs->ptid, &ecs->ws);
3451
3452 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3453 {
3454 /* A catchpoint triggered. */
3455 process_event_stop_test (ecs);
3456 return;
3457 }
3458
3459 /* If requested, stop when the dynamic linker notifies
3460 gdb of events. This allows the user to get control
3461 and place breakpoints in initializer routines for
3462 dynamically loaded objects (among other things). */
3463 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3464 if (stop_on_solib_events)
3465 {
3466 /* Make sure we print "Stopped due to solib-event" in
3467 normal_stop. */
3468 stop_print_frame = 1;
3469
3470 stop_waiting (ecs);
3471 return;
3472 }
3473 }
3474
3475 /* If we are skipping through a shell, or through shared library
3476 loading that we aren't interested in, resume the program. If
3477 we're running the program normally, also resume. */
3478 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3479 {
3480 /* Loading of shared libraries might have changed breakpoint
3481 addresses. Make sure new breakpoints are inserted. */
3482 if (stop_soon == NO_STOP_QUIETLY
3483 && !breakpoints_always_inserted_mode ())
3484 insert_breakpoints ();
3485 resume (0, GDB_SIGNAL_0);
3486 prepare_to_wait (ecs);
3487 return;
3488 }
3489
3490 /* But stop if we're attaching or setting up a remote
3491 connection. */
3492 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3493 || stop_soon == STOP_QUIETLY_REMOTE)
3494 {
3495 if (debug_infrun)
3496 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3497 stop_waiting (ecs);
3498 return;
3499 }
3500
3501 internal_error (__FILE__, __LINE__,
3502 _("unhandled stop_soon: %d"), (int) stop_soon);
3503
3504 case TARGET_WAITKIND_SPURIOUS:
3505 if (debug_infrun)
3506 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3507 if (!ptid_equal (ecs->ptid, inferior_ptid))
3508 context_switch (ecs->ptid);
3509 resume (0, GDB_SIGNAL_0);
3510 prepare_to_wait (ecs);
3511 return;
3512
3513 case TARGET_WAITKIND_EXITED:
3514 case TARGET_WAITKIND_SIGNALLED:
3515 if (debug_infrun)
3516 {
3517 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3518 fprintf_unfiltered (gdb_stdlog,
3519 "infrun: TARGET_WAITKIND_EXITED\n");
3520 else
3521 fprintf_unfiltered (gdb_stdlog,
3522 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3523 }
3524
3525 inferior_ptid = ecs->ptid;
3526 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3527 set_current_program_space (current_inferior ()->pspace);
3528 handle_vfork_child_exec_or_exit (0);
3529 target_terminal_ours (); /* Must do this before mourn anyway. */
3530
3531 /* Clearing any previous state of convenience variables. */
3532 clear_exit_convenience_vars ();
3533
3534 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3535 {
3536 /* Record the exit code in the convenience variable $_exitcode, so
3537 that the user can inspect this again later. */
3538 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3539 (LONGEST) ecs->ws.value.integer);
3540
3541 /* Also record this in the inferior itself. */
3542 current_inferior ()->has_exit_code = 1;
3543 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3544
3545 /* Support the --return-child-result option. */
3546 return_child_result_value = ecs->ws.value.integer;
3547
3548 observer_notify_exited (ecs->ws.value.integer);
3549 }
3550 else
3551 {
3552 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3553 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3554
3555 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3556 {
3557 /* Set the value of the internal variable $_exitsignal,
3558 which holds the signal uncaught by the inferior. */
3559 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3560 gdbarch_gdb_signal_to_target (gdbarch,
3561 ecs->ws.value.sig));
3562 }
3563 else
3564 {
3565 /* We don't have access to the target's method used for
3566 converting between signal numbers (GDB's internal
3567 representation <-> target's representation).
3568 Therefore, we cannot do a good job at displaying this
3569 information to the user. It's better to just warn
3570 her about it (if infrun debugging is enabled), and
3571 give up. */
3572 if (debug_infrun)
3573 fprintf_filtered (gdb_stdlog, _("\
3574 Cannot fill $_exitsignal with the correct signal number.\n"));
3575 }
3576
3577 observer_notify_signal_exited (ecs->ws.value.sig);
3578 }
3579
3580 gdb_flush (gdb_stdout);
3581 target_mourn_inferior ();
3582 singlestep_breakpoints_inserted_p = 0;
3583 cancel_single_step_breakpoints ();
3584 stop_print_frame = 0;
3585 stop_waiting (ecs);
3586 return;
3587
3588 /* The following are the only cases in which we keep going;
3589 the above cases end in a continue or goto. */
3590 case TARGET_WAITKIND_FORKED:
3591 case TARGET_WAITKIND_VFORKED:
3592 if (debug_infrun)
3593 {
3594 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3595 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3596 else
3597 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3598 }
3599
3600 /* Check whether the inferior is displaced stepping. */
3601 {
3602 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3603 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3604 struct displaced_step_inferior_state *displaced
3605 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3606
3607 /* If checking displaced stepping is supported, and thread
3608 ecs->ptid is displaced stepping. */
3609 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3610 {
3611 struct inferior *parent_inf
3612 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3613 struct regcache *child_regcache;
3614 CORE_ADDR parent_pc;
3615
3616 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3617 indicating that the displaced stepping of syscall instruction
3618 has been done. Perform cleanup for parent process here. Note
3619 that this operation also cleans up the child process for vfork,
3620 because their pages are shared. */
3621 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
3622
3623 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3624 {
3625 /* Restore scratch pad for child process. */
3626 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3627 }
3628
3629 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3630 the child's PC is also within the scratchpad. Set the child's PC
3631 to the parent's PC value, which has already been fixed up.
3632 FIXME: we use the parent's aspace here, although we're touching
3633 the child, because the child hasn't been added to the inferior
3634 list yet at this point. */
3635
3636 child_regcache
3637 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3638 gdbarch,
3639 parent_inf->aspace);
3640 /* Read PC value of parent process. */
3641 parent_pc = regcache_read_pc (regcache);
3642
3643 if (debug_displaced)
3644 fprintf_unfiltered (gdb_stdlog,
3645 "displaced: write child pc from %s to %s\n",
3646 paddress (gdbarch,
3647 regcache_read_pc (child_regcache)),
3648 paddress (gdbarch, parent_pc));
3649
3650 regcache_write_pc (child_regcache, parent_pc);
3651 }
3652 }
3653
3654 if (!ptid_equal (ecs->ptid, inferior_ptid))
3655 context_switch (ecs->ptid);
3656
3657 /* Immediately detach breakpoints from the child before there's
3658 any chance of letting the user delete breakpoints from the
3659 breakpoint lists. If we don't do this early, it's easy to
3660 leave left over traps in the child, vis: "break foo; catch
3661 fork; c; <fork>; del; c; <child calls foo>". We only follow
3662 the fork on the last `continue', and by that time the
3663 breakpoint at "foo" is long gone from the breakpoint table.
3664 If we vforked, then we don't need to unpatch here, since both
3665 parent and child are sharing the same memory pages; we'll
3666 need to unpatch at follow/detach time instead to be certain
3667 that new breakpoints added between catchpoint hit time and
3668 vfork follow are detached. */
3669 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3670 {
3671 /* This won't actually modify the breakpoint list, but will
3672 physically remove the breakpoints from the child. */
3673 detach_breakpoints (ecs->ws.value.related_pid);
3674 }
3675
3676 if (singlestep_breakpoints_inserted_p)
3677 {
3678 /* Pull the single step breakpoints out of the target. */
3679 remove_single_step_breakpoints ();
3680 singlestep_breakpoints_inserted_p = 0;
3681 }
3682
3683 /* In case the event is caught by a catchpoint, remember that
3684 the event is to be followed at the next resume of the thread,
3685 and not immediately. */
3686 ecs->event_thread->pending_follow = ecs->ws;
3687
3688 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3689
3690 ecs->event_thread->control.stop_bpstat
3691 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3692 stop_pc, ecs->ptid, &ecs->ws);
3693
3694 /* If no catchpoint triggered for this, then keep going. Note
3695 that we're interested in knowing the bpstat actually causes a
3696 stop, not just if it may explain the signal. Software
3697 watchpoints, for example, always appear in the bpstat. */
3698 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3699 {
3700 ptid_t parent;
3701 ptid_t child;
3702 int should_resume;
3703 int follow_child
3704 = (follow_fork_mode_string == follow_fork_mode_child);
3705
3706 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3707
3708 should_resume = follow_fork ();
3709
3710 parent = ecs->ptid;
3711 child = ecs->ws.value.related_pid;
3712
3713 /* In non-stop mode, also resume the other branch. */
3714 if (non_stop && !detach_fork)
3715 {
3716 if (follow_child)
3717 switch_to_thread (parent);
3718 else
3719 switch_to_thread (child);
3720
3721 ecs->event_thread = inferior_thread ();
3722 ecs->ptid = inferior_ptid;
3723 keep_going (ecs);
3724 }
3725
3726 if (follow_child)
3727 switch_to_thread (child);
3728 else
3729 switch_to_thread (parent);
3730
3731 ecs->event_thread = inferior_thread ();
3732 ecs->ptid = inferior_ptid;
3733
3734 if (should_resume)
3735 keep_going (ecs);
3736 else
3737 stop_waiting (ecs);
3738 return;
3739 }
3740 process_event_stop_test (ecs);
3741 return;
3742
3743 case TARGET_WAITKIND_VFORK_DONE:
3744 /* Done with the shared memory region. Re-insert breakpoints in
3745 the parent, and keep going. */
3746
3747 if (debug_infrun)
3748 fprintf_unfiltered (gdb_stdlog,
3749 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3750
3751 if (!ptid_equal (ecs->ptid, inferior_ptid))
3752 context_switch (ecs->ptid);
3753
3754 current_inferior ()->waiting_for_vfork_done = 0;
3755 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3756 /* This also takes care of reinserting breakpoints in the
3757 previously locked inferior. */
3758 keep_going (ecs);
3759 return;
3760
3761 case TARGET_WAITKIND_EXECD:
3762 if (debug_infrun)
3763 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3764
3765 if (!ptid_equal (ecs->ptid, inferior_ptid))
3766 context_switch (ecs->ptid);
3767
3768 singlestep_breakpoints_inserted_p = 0;
3769 cancel_single_step_breakpoints ();
3770
3771 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3772
3773 /* Do whatever is necessary to the parent branch of the vfork. */
3774 handle_vfork_child_exec_or_exit (1);
3775
3776 /* This causes the eventpoints and symbol table to be reset.
3777 Must do this now, before trying to determine whether to
3778 stop. */
3779 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3780
3781 ecs->event_thread->control.stop_bpstat
3782 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3783 stop_pc, ecs->ptid, &ecs->ws);
3784
3785 /* Note that this may be referenced from inside
3786 bpstat_stop_status above, through inferior_has_execd. */
3787 xfree (ecs->ws.value.execd_pathname);
3788 ecs->ws.value.execd_pathname = NULL;
3789
3790 /* If no catchpoint triggered for this, then keep going. */
3791 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3792 {
3793 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3794 keep_going (ecs);
3795 return;
3796 }
3797 process_event_stop_test (ecs);
3798 return;
3799
3800 /* Be careful not to try to gather much state about a thread
3801 that's in a syscall. It's frequently a losing proposition. */
3802 case TARGET_WAITKIND_SYSCALL_ENTRY:
3803 if (debug_infrun)
3804 fprintf_unfiltered (gdb_stdlog,
3805 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3806 /* Getting the current syscall number. */
3807 if (handle_syscall_event (ecs) == 0)
3808 process_event_stop_test (ecs);
3809 return;
3810
3811 /* Before examining the threads further, step this thread to
3812 get it entirely out of the syscall. (We get notice of the
3813 event when the thread is just on the verge of exiting a
3814 syscall. Stepping one instruction seems to get it back
3815 into user code.) */
3816 case TARGET_WAITKIND_SYSCALL_RETURN:
3817 if (debug_infrun)
3818 fprintf_unfiltered (gdb_stdlog,
3819 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3820 if (handle_syscall_event (ecs) == 0)
3821 process_event_stop_test (ecs);
3822 return;
3823
3824 case TARGET_WAITKIND_STOPPED:
3825 if (debug_infrun)
3826 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3827 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3828 handle_signal_stop (ecs);
3829 return;
3830
3831 case TARGET_WAITKIND_NO_HISTORY:
3832 if (debug_infrun)
3833 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
3834 /* Reverse execution: target ran out of history info. */
3835
3836 /* Pull the single step breakpoints out of the target. */
3837 if (singlestep_breakpoints_inserted_p)
3838 {
3839 if (!ptid_equal (ecs->ptid, inferior_ptid))
3840 context_switch (ecs->ptid);
3841 remove_single_step_breakpoints ();
3842 singlestep_breakpoints_inserted_p = 0;
3843 }
3844 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3845 observer_notify_no_history ();
3846 stop_waiting (ecs);
3847 return;
3848 }
3849 }
3850
3851 /* Come here when the program has stopped with a signal. */
3852
3853 static void
3854 handle_signal_stop (struct execution_control_state *ecs)
3855 {
3856 struct frame_info *frame;
3857 struct gdbarch *gdbarch;
3858 int stopped_by_watchpoint;
3859 enum stop_kind stop_soon;
3860 int random_signal;
3861
3862 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
3863
3864 /* Do we need to clean up the state of a thread that has
3865 completed a displaced single-step? (Doing so usually affects
3866 the PC, so do it here, before we set stop_pc.) */
3867 displaced_step_fixup (ecs->ptid,
3868 ecs->event_thread->suspend.stop_signal);
3869
3870 /* If we either finished a single-step or hit a breakpoint, but
3871 the user wanted this thread to be stopped, pretend we got a
3872 SIG0 (generic unsignaled stop). */
3873 if (ecs->event_thread->stop_requested
3874 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3875 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3876
3877 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3878
3879 if (debug_infrun)
3880 {
3881 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3882 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3883 struct cleanup *old_chain = save_inferior_ptid ();
3884
3885 inferior_ptid = ecs->ptid;
3886
3887 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3888 paddress (gdbarch, stop_pc));
3889 if (target_stopped_by_watchpoint ())
3890 {
3891 CORE_ADDR addr;
3892
3893 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3894
3895 if (target_stopped_data_address (&current_target, &addr))
3896 fprintf_unfiltered (gdb_stdlog,
3897 "infrun: stopped data address = %s\n",
3898 paddress (gdbarch, addr));
3899 else
3900 fprintf_unfiltered (gdb_stdlog,
3901 "infrun: (no data address available)\n");
3902 }
3903
3904 do_cleanups (old_chain);
3905 }
3906
3907 /* This is originated from start_remote(), start_inferior() and
3908 shared libraries hook functions. */
3909 stop_soon = get_inferior_stop_soon (ecs->ptid);
3910 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3911 {
3912 if (!ptid_equal (ecs->ptid, inferior_ptid))
3913 context_switch (ecs->ptid);
3914 if (debug_infrun)
3915 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3916 stop_print_frame = 1;
3917 stop_waiting (ecs);
3918 return;
3919 }
3920
3921 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3922 && stop_after_trap)
3923 {
3924 if (!ptid_equal (ecs->ptid, inferior_ptid))
3925 context_switch (ecs->ptid);
3926 if (debug_infrun)
3927 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3928 stop_print_frame = 0;
3929 stop_waiting (ecs);
3930 return;
3931 }
3932
3933 /* This originates from attach_command(). We need to overwrite
3934 the stop_signal here, because some kernels don't ignore a
3935 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3936 See more comments in inferior.h. On the other hand, if we
3937 get a non-SIGSTOP, report it to the user - assume the backend
3938 will handle the SIGSTOP if it should show up later.
3939
3940 Also consider that the attach is complete when we see a
3941 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3942 target extended-remote report it instead of a SIGSTOP
3943 (e.g. gdbserver). We already rely on SIGTRAP being our
3944 signal, so this is no exception.
3945
3946 Also consider that the attach is complete when we see a
3947 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3948 the target to stop all threads of the inferior, in case the
3949 low level attach operation doesn't stop them implicitly. If
3950 they weren't stopped implicitly, then the stub will report a
3951 GDB_SIGNAL_0, meaning: stopped for no particular reason
3952 other than GDB's request. */
3953 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3954 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
3955 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3956 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
3957 {
3958 stop_print_frame = 1;
3959 stop_waiting (ecs);
3960 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3961 return;
3962 }
3963
3964 /* See if something interesting happened to the non-current thread. If
3965 so, then switch to that thread. */
3966 if (!ptid_equal (ecs->ptid, inferior_ptid))
3967 {
3968 if (debug_infrun)
3969 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3970
3971 context_switch (ecs->ptid);
3972
3973 if (deprecated_context_hook)
3974 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3975 }
3976
3977 /* At this point, get hold of the now-current thread's frame. */
3978 frame = get_current_frame ();
3979 gdbarch = get_frame_arch (frame);
3980
3981 /* Pull the single step breakpoints out of the target. */
3982 if (singlestep_breakpoints_inserted_p)
3983 {
3984 /* However, before doing so, if this single-step breakpoint was
3985 actually for another thread, set this thread up for moving
3986 past it. */
3987 if (!ptid_equal (ecs->ptid, singlestep_ptid)
3988 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3989 {
3990 struct regcache *regcache;
3991 struct address_space *aspace;
3992 CORE_ADDR pc;
3993
3994 regcache = get_thread_regcache (ecs->ptid);
3995 aspace = get_regcache_aspace (regcache);
3996 pc = regcache_read_pc (regcache);
3997 if (single_step_breakpoint_inserted_here_p (aspace, pc))
3998 {
3999 if (debug_infrun)
4000 {
4001 fprintf_unfiltered (gdb_stdlog,
4002 "infrun: [%s] hit step over single-step"
4003 " breakpoint of [%s]\n",
4004 target_pid_to_str (ecs->ptid),
4005 target_pid_to_str (singlestep_ptid));
4006 }
4007 ecs->hit_singlestep_breakpoint = 1;
4008 }
4009 }
4010
4011 remove_single_step_breakpoints ();
4012 singlestep_breakpoints_inserted_p = 0;
4013 }
4014
4015 if (ecs->stepped_after_stopped_by_watchpoint)
4016 stopped_by_watchpoint = 0;
4017 else
4018 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4019
4020 /* If necessary, step over this watchpoint. We'll be back to display
4021 it in a moment. */
4022 if (stopped_by_watchpoint
4023 && (target_have_steppable_watchpoint
4024 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4025 {
4026 /* At this point, we are stopped at an instruction which has
4027 attempted to write to a piece of memory under control of
4028 a watchpoint. The instruction hasn't actually executed
4029 yet. If we were to evaluate the watchpoint expression
4030 now, we would get the old value, and therefore no change
4031 would seem to have occurred.
4032
4033 In order to make watchpoints work `right', we really need
4034 to complete the memory write, and then evaluate the
4035 watchpoint expression. We do this by single-stepping the
4036 target.
4037
4038 It may not be necessary to disable the watchpoint to stop over
4039 it. For example, the PA can (with some kernel cooperation)
4040 single step over a watchpoint without disabling the watchpoint.
4041
4042 It is far more common to need to disable a watchpoint to step
4043 the inferior over it. If we have non-steppable watchpoints,
4044 we must disable the current watchpoint; it's simplest to
4045 disable all watchpoints and breakpoints. */
4046 int hw_step = 1;
4047
4048 if (!target_have_steppable_watchpoint)
4049 {
4050 remove_breakpoints ();
4051 /* See comment in resume why we need to stop bypassing signals
4052 while breakpoints have been removed. */
4053 target_pass_signals (0, NULL);
4054 }
4055 /* Single step */
4056 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4057 target_resume (ecs->ptid, hw_step, GDB_SIGNAL_0);
4058 waiton_ptid = ecs->ptid;
4059 if (target_have_steppable_watchpoint)
4060 infwait_state = infwait_step_watch_state;
4061 else
4062 infwait_state = infwait_nonstep_watch_state;
4063 prepare_to_wait (ecs);
4064 return;
4065 }
4066
4067 ecs->event_thread->stepping_over_breakpoint = 0;
4068 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4069 ecs->event_thread->control.stop_step = 0;
4070 stop_print_frame = 1;
4071 stopped_by_random_signal = 0;
4072
4073 /* Hide inlined functions starting here, unless we just performed stepi or
4074 nexti. After stepi and nexti, always show the innermost frame (not any
4075 inline function call sites). */
4076 if (ecs->event_thread->control.step_range_end != 1)
4077 {
4078 struct address_space *aspace =
4079 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4080
4081 /* skip_inline_frames is expensive, so we avoid it if we can
4082 determine that the address is one where functions cannot have
4083 been inlined. This improves performance with inferiors that
4084 load a lot of shared libraries, because the solib event
4085 breakpoint is defined as the address of a function (i.e. not
4086 inline). Note that we have to check the previous PC as well
4087 as the current one to catch cases when we have just
4088 single-stepped off a breakpoint prior to reinstating it.
4089 Note that we're assuming that the code we single-step to is
4090 not inline, but that's not definitive: there's nothing
4091 preventing the event breakpoint function from containing
4092 inlined code, and the single-step ending up there. If the
4093 user had set a breakpoint on that inlined code, the missing
4094 skip_inline_frames call would break things. Fortunately
4095 that's an extremely unlikely scenario. */
4096 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4097 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4098 && ecs->event_thread->control.trap_expected
4099 && pc_at_non_inline_function (aspace,
4100 ecs->event_thread->prev_pc,
4101 &ecs->ws)))
4102 {
4103 skip_inline_frames (ecs->ptid);
4104
4105 /* Re-fetch current thread's frame in case that invalidated
4106 the frame cache. */
4107 frame = get_current_frame ();
4108 gdbarch = get_frame_arch (frame);
4109 }
4110 }
4111
4112 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4113 && ecs->event_thread->control.trap_expected
4114 && gdbarch_single_step_through_delay_p (gdbarch)
4115 && currently_stepping (ecs->event_thread))
4116 {
4117 /* We're trying to step off a breakpoint. Turns out that we're
4118 also on an instruction that needs to be stepped multiple
4119 times before it's been fully executing. E.g., architectures
4120 with a delay slot. It needs to be stepped twice, once for
4121 the instruction and once for the delay slot. */
4122 int step_through_delay
4123 = gdbarch_single_step_through_delay (gdbarch, frame);
4124
4125 if (debug_infrun && step_through_delay)
4126 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4127 if (ecs->event_thread->control.step_range_end == 0
4128 && step_through_delay)
4129 {
4130 /* The user issued a continue when stopped at a breakpoint.
4131 Set up for another trap and get out of here. */
4132 ecs->event_thread->stepping_over_breakpoint = 1;
4133 keep_going (ecs);
4134 return;
4135 }
4136 else if (step_through_delay)
4137 {
4138 /* The user issued a step when stopped at a breakpoint.
4139 Maybe we should stop, maybe we should not - the delay
4140 slot *might* correspond to a line of source. In any
4141 case, don't decide that here, just set
4142 ecs->stepping_over_breakpoint, making sure we
4143 single-step again before breakpoints are re-inserted. */
4144 ecs->event_thread->stepping_over_breakpoint = 1;
4145 }
4146 }
4147
4148 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4149 handles this event. */
4150 ecs->event_thread->control.stop_bpstat
4151 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4152 stop_pc, ecs->ptid, &ecs->ws);
4153
4154 /* Following in case break condition called a
4155 function. */
4156 stop_print_frame = 1;
4157
4158 /* This is where we handle "moribund" watchpoints. Unlike
4159 software breakpoints traps, hardware watchpoint traps are
4160 always distinguishable from random traps. If no high-level
4161 watchpoint is associated with the reported stop data address
4162 anymore, then the bpstat does not explain the signal ---
4163 simply make sure to ignore it if `stopped_by_watchpoint' is
4164 set. */
4165
4166 if (debug_infrun
4167 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4168 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4169 GDB_SIGNAL_TRAP)
4170 && stopped_by_watchpoint)
4171 fprintf_unfiltered (gdb_stdlog,
4172 "infrun: no user watchpoint explains "
4173 "watchpoint SIGTRAP, ignoring\n");
4174
4175 /* NOTE: cagney/2003-03-29: These checks for a random signal
4176 at one stage in the past included checks for an inferior
4177 function call's call dummy's return breakpoint. The original
4178 comment, that went with the test, read:
4179
4180 ``End of a stack dummy. Some systems (e.g. Sony news) give
4181 another signal besides SIGTRAP, so check here as well as
4182 above.''
4183
4184 If someone ever tries to get call dummys on a
4185 non-executable stack to work (where the target would stop
4186 with something like a SIGSEGV), then those tests might need
4187 to be re-instated. Given, however, that the tests were only
4188 enabled when momentary breakpoints were not being used, I
4189 suspect that it won't be the case.
4190
4191 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4192 be necessary for call dummies on a non-executable stack on
4193 SPARC. */
4194
4195 /* See if the breakpoints module can explain the signal. */
4196 random_signal
4197 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4198 ecs->event_thread->suspend.stop_signal);
4199
4200 /* If not, perhaps stepping/nexting can. */
4201 if (random_signal)
4202 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4203 && currently_stepping (ecs->event_thread));
4204
4205 /* Perhaps the thread hit a single-step breakpoint of _another_
4206 thread. Single-step breakpoints are transparent to the
4207 breakpoints module. */
4208 if (random_signal)
4209 random_signal = !ecs->hit_singlestep_breakpoint;
4210
4211 /* No? Perhaps we got a moribund watchpoint. */
4212 if (random_signal)
4213 random_signal = !stopped_by_watchpoint;
4214
4215 /* For the program's own signals, act according to
4216 the signal handling tables. */
4217
4218 if (random_signal)
4219 {
4220 /* Signal not for debugging purposes. */
4221 int printed = 0;
4222 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4223 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4224
4225 if (debug_infrun)
4226 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4227 gdb_signal_to_symbol_string (stop_signal));
4228
4229 stopped_by_random_signal = 1;
4230
4231 if (signal_print[ecs->event_thread->suspend.stop_signal])
4232 {
4233 /* The signal table tells us to print about this signal. */
4234 printed = 1;
4235 target_terminal_ours_for_output ();
4236 observer_notify_signal_received (ecs->event_thread->suspend.stop_signal);
4237 }
4238 /* Always stop on signals if we're either just gaining control
4239 of the program, or the user explicitly requested this thread
4240 to remain stopped. */
4241 if (stop_soon != NO_STOP_QUIETLY
4242 || ecs->event_thread->stop_requested
4243 || (!inf->detaching
4244 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4245 {
4246 stop_waiting (ecs);
4247 return;
4248 }
4249 /* If not going to stop, give terminal back
4250 if we took it away. */
4251 else if (printed)
4252 target_terminal_inferior ();
4253
4254 /* Clear the signal if it should not be passed. */
4255 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4256 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4257
4258 if (ecs->event_thread->prev_pc == stop_pc
4259 && ecs->event_thread->control.trap_expected
4260 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4261 {
4262 /* We were just starting a new sequence, attempting to
4263 single-step off of a breakpoint and expecting a SIGTRAP.
4264 Instead this signal arrives. This signal will take us out
4265 of the stepping range so GDB needs to remember to, when
4266 the signal handler returns, resume stepping off that
4267 breakpoint. */
4268 /* To simplify things, "continue" is forced to use the same
4269 code paths as single-step - set a breakpoint at the
4270 signal return address and then, once hit, step off that
4271 breakpoint. */
4272 if (debug_infrun)
4273 fprintf_unfiltered (gdb_stdlog,
4274 "infrun: signal arrived while stepping over "
4275 "breakpoint\n");
4276
4277 insert_hp_step_resume_breakpoint_at_frame (frame);
4278 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4279 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4280 ecs->event_thread->control.trap_expected = 0;
4281
4282 /* If we were nexting/stepping some other thread, switch to
4283 it, so that we don't continue it, losing control. */
4284 if (!switch_back_to_stepped_thread (ecs))
4285 keep_going (ecs);
4286 return;
4287 }
4288
4289 if (ecs->event_thread->control.step_range_end != 0
4290 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4291 && pc_in_thread_step_range (stop_pc, ecs->event_thread)
4292 && frame_id_eq (get_stack_frame_id (frame),
4293 ecs->event_thread->control.step_stack_frame_id)
4294 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4295 {
4296 /* The inferior is about to take a signal that will take it
4297 out of the single step range. Set a breakpoint at the
4298 current PC (which is presumably where the signal handler
4299 will eventually return) and then allow the inferior to
4300 run free.
4301
4302 Note that this is only needed for a signal delivered
4303 while in the single-step range. Nested signals aren't a
4304 problem as they eventually all return. */
4305 if (debug_infrun)
4306 fprintf_unfiltered (gdb_stdlog,
4307 "infrun: signal may take us out of "
4308 "single-step range\n");
4309
4310 insert_hp_step_resume_breakpoint_at_frame (frame);
4311 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4312 ecs->event_thread->control.trap_expected = 0;
4313 keep_going (ecs);
4314 return;
4315 }
4316
4317 /* Note: step_resume_breakpoint may be non-NULL. This occures
4318 when either there's a nested signal, or when there's a
4319 pending signal enabled just as the signal handler returns
4320 (leaving the inferior at the step-resume-breakpoint without
4321 actually executing it). Either way continue until the
4322 breakpoint is really hit. */
4323
4324 if (!switch_back_to_stepped_thread (ecs))
4325 {
4326 if (debug_infrun)
4327 fprintf_unfiltered (gdb_stdlog,
4328 "infrun: random signal, keep going\n");
4329
4330 keep_going (ecs);
4331 }
4332 return;
4333 }
4334
4335 process_event_stop_test (ecs);
4336 }
4337
4338 /* Come here when we've got some debug event / signal we can explain
4339 (IOW, not a random signal), and test whether it should cause a
4340 stop, or whether we should resume the inferior (transparently).
4341 E.g., could be a breakpoint whose condition evaluates false; we
4342 could be still stepping within the line; etc. */
4343
4344 static void
4345 process_event_stop_test (struct execution_control_state *ecs)
4346 {
4347 struct symtab_and_line stop_pc_sal;
4348 struct frame_info *frame;
4349 struct gdbarch *gdbarch;
4350 CORE_ADDR jmp_buf_pc;
4351 struct bpstat_what what;
4352
4353 /* Handle cases caused by hitting a breakpoint. */
4354
4355 frame = get_current_frame ();
4356 gdbarch = get_frame_arch (frame);
4357
4358 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4359
4360 if (what.call_dummy)
4361 {
4362 stop_stack_dummy = what.call_dummy;
4363 }
4364
4365 /* If we hit an internal event that triggers symbol changes, the
4366 current frame will be invalidated within bpstat_what (e.g., if we
4367 hit an internal solib event). Re-fetch it. */
4368 frame = get_current_frame ();
4369 gdbarch = get_frame_arch (frame);
4370
4371 switch (what.main_action)
4372 {
4373 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4374 /* If we hit the breakpoint at longjmp while stepping, we
4375 install a momentary breakpoint at the target of the
4376 jmp_buf. */
4377
4378 if (debug_infrun)
4379 fprintf_unfiltered (gdb_stdlog,
4380 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4381
4382 ecs->event_thread->stepping_over_breakpoint = 1;
4383
4384 if (what.is_longjmp)
4385 {
4386 struct value *arg_value;
4387
4388 /* If we set the longjmp breakpoint via a SystemTap probe,
4389 then use it to extract the arguments. The destination PC
4390 is the third argument to the probe. */
4391 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4392 if (arg_value)
4393 jmp_buf_pc = value_as_address (arg_value);
4394 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4395 || !gdbarch_get_longjmp_target (gdbarch,
4396 frame, &jmp_buf_pc))
4397 {
4398 if (debug_infrun)
4399 fprintf_unfiltered (gdb_stdlog,
4400 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4401 "(!gdbarch_get_longjmp_target)\n");
4402 keep_going (ecs);
4403 return;
4404 }
4405
4406 /* Insert a breakpoint at resume address. */
4407 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4408 }
4409 else
4410 check_exception_resume (ecs, frame);
4411 keep_going (ecs);
4412 return;
4413
4414 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4415 {
4416 struct frame_info *init_frame;
4417
4418 /* There are several cases to consider.
4419
4420 1. The initiating frame no longer exists. In this case we
4421 must stop, because the exception or longjmp has gone too
4422 far.
4423
4424 2. The initiating frame exists, and is the same as the
4425 current frame. We stop, because the exception or longjmp
4426 has been caught.
4427
4428 3. The initiating frame exists and is different from the
4429 current frame. This means the exception or longjmp has
4430 been caught beneath the initiating frame, so keep going.
4431
4432 4. longjmp breakpoint has been placed just to protect
4433 against stale dummy frames and user is not interested in
4434 stopping around longjmps. */
4435
4436 if (debug_infrun)
4437 fprintf_unfiltered (gdb_stdlog,
4438 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4439
4440 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4441 != NULL);
4442 delete_exception_resume_breakpoint (ecs->event_thread);
4443
4444 if (what.is_longjmp)
4445 {
4446 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
4447
4448 if (!frame_id_p (ecs->event_thread->initiating_frame))
4449 {
4450 /* Case 4. */
4451 keep_going (ecs);
4452 return;
4453 }
4454 }
4455
4456 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4457
4458 if (init_frame)
4459 {
4460 struct frame_id current_id
4461 = get_frame_id (get_current_frame ());
4462 if (frame_id_eq (current_id,
4463 ecs->event_thread->initiating_frame))
4464 {
4465 /* Case 2. Fall through. */
4466 }
4467 else
4468 {
4469 /* Case 3. */
4470 keep_going (ecs);
4471 return;
4472 }
4473 }
4474
4475 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4476 exists. */
4477 delete_step_resume_breakpoint (ecs->event_thread);
4478
4479 end_stepping_range (ecs);
4480 }
4481 return;
4482
4483 case BPSTAT_WHAT_SINGLE:
4484 if (debug_infrun)
4485 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4486 ecs->event_thread->stepping_over_breakpoint = 1;
4487 /* Still need to check other stuff, at least the case where we
4488 are stepping and step out of the right range. */
4489 break;
4490
4491 case BPSTAT_WHAT_STEP_RESUME:
4492 if (debug_infrun)
4493 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4494
4495 delete_step_resume_breakpoint (ecs->event_thread);
4496 if (ecs->event_thread->control.proceed_to_finish
4497 && execution_direction == EXEC_REVERSE)
4498 {
4499 struct thread_info *tp = ecs->event_thread;
4500
4501 /* We are finishing a function in reverse, and just hit the
4502 step-resume breakpoint at the start address of the
4503 function, and we're almost there -- just need to back up
4504 by one more single-step, which should take us back to the
4505 function call. */
4506 tp->control.step_range_start = tp->control.step_range_end = 1;
4507 keep_going (ecs);
4508 return;
4509 }
4510 fill_in_stop_func (gdbarch, ecs);
4511 if (stop_pc == ecs->stop_func_start
4512 && execution_direction == EXEC_REVERSE)
4513 {
4514 /* We are stepping over a function call in reverse, and just
4515 hit the step-resume breakpoint at the start address of
4516 the function. Go back to single-stepping, which should
4517 take us back to the function call. */
4518 ecs->event_thread->stepping_over_breakpoint = 1;
4519 keep_going (ecs);
4520 return;
4521 }
4522 break;
4523
4524 case BPSTAT_WHAT_STOP_NOISY:
4525 if (debug_infrun)
4526 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4527 stop_print_frame = 1;
4528
4529 /* Assume the thread stopped for a breapoint. We'll still check
4530 whether a/the breakpoint is there when the thread is next
4531 resumed. */
4532 ecs->event_thread->stepping_over_breakpoint = 1;
4533
4534 stop_waiting (ecs);
4535 return;
4536
4537 case BPSTAT_WHAT_STOP_SILENT:
4538 if (debug_infrun)
4539 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4540 stop_print_frame = 0;
4541
4542 /* Assume the thread stopped for a breapoint. We'll still check
4543 whether a/the breakpoint is there when the thread is next
4544 resumed. */
4545 ecs->event_thread->stepping_over_breakpoint = 1;
4546 stop_waiting (ecs);
4547 return;
4548
4549 case BPSTAT_WHAT_HP_STEP_RESUME:
4550 if (debug_infrun)
4551 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4552
4553 delete_step_resume_breakpoint (ecs->event_thread);
4554 if (ecs->event_thread->step_after_step_resume_breakpoint)
4555 {
4556 /* Back when the step-resume breakpoint was inserted, we
4557 were trying to single-step off a breakpoint. Go back to
4558 doing that. */
4559 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4560 ecs->event_thread->stepping_over_breakpoint = 1;
4561 keep_going (ecs);
4562 return;
4563 }
4564 break;
4565
4566 case BPSTAT_WHAT_KEEP_CHECKING:
4567 break;
4568 }
4569
4570 /* We come here if we hit a breakpoint but should not stop for it.
4571 Possibly we also were stepping and should stop for that. So fall
4572 through and test for stepping. But, if not stepping, do not
4573 stop. */
4574
4575 /* In all-stop mode, if we're currently stepping but have stopped in
4576 some other thread, we need to switch back to the stepped thread. */
4577 if (switch_back_to_stepped_thread (ecs))
4578 return;
4579
4580 if (ecs->event_thread->control.step_resume_breakpoint)
4581 {
4582 if (debug_infrun)
4583 fprintf_unfiltered (gdb_stdlog,
4584 "infrun: step-resume breakpoint is inserted\n");
4585
4586 /* Having a step-resume breakpoint overrides anything
4587 else having to do with stepping commands until
4588 that breakpoint is reached. */
4589 keep_going (ecs);
4590 return;
4591 }
4592
4593 if (ecs->event_thread->control.step_range_end == 0)
4594 {
4595 if (debug_infrun)
4596 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4597 /* Likewise if we aren't even stepping. */
4598 keep_going (ecs);
4599 return;
4600 }
4601
4602 /* Re-fetch current thread's frame in case the code above caused
4603 the frame cache to be re-initialized, making our FRAME variable
4604 a dangling pointer. */
4605 frame = get_current_frame ();
4606 gdbarch = get_frame_arch (frame);
4607 fill_in_stop_func (gdbarch, ecs);
4608
4609 /* If stepping through a line, keep going if still within it.
4610
4611 Note that step_range_end is the address of the first instruction
4612 beyond the step range, and NOT the address of the last instruction
4613 within it!
4614
4615 Note also that during reverse execution, we may be stepping
4616 through a function epilogue and therefore must detect when
4617 the current-frame changes in the middle of a line. */
4618
4619 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4620 && (execution_direction != EXEC_REVERSE
4621 || frame_id_eq (get_frame_id (frame),
4622 ecs->event_thread->control.step_frame_id)))
4623 {
4624 if (debug_infrun)
4625 fprintf_unfiltered
4626 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4627 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4628 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4629
4630 /* Tentatively re-enable range stepping; `resume' disables it if
4631 necessary (e.g., if we're stepping over a breakpoint or we
4632 have software watchpoints). */
4633 ecs->event_thread->control.may_range_step = 1;
4634
4635 /* When stepping backward, stop at beginning of line range
4636 (unless it's the function entry point, in which case
4637 keep going back to the call point). */
4638 if (stop_pc == ecs->event_thread->control.step_range_start
4639 && stop_pc != ecs->stop_func_start
4640 && execution_direction == EXEC_REVERSE)
4641 end_stepping_range (ecs);
4642 else
4643 keep_going (ecs);
4644
4645 return;
4646 }
4647
4648 /* We stepped out of the stepping range. */
4649
4650 /* If we are stepping at the source level and entered the runtime
4651 loader dynamic symbol resolution code...
4652
4653 EXEC_FORWARD: we keep on single stepping until we exit the run
4654 time loader code and reach the callee's address.
4655
4656 EXEC_REVERSE: we've already executed the callee (backward), and
4657 the runtime loader code is handled just like any other
4658 undebuggable function call. Now we need only keep stepping
4659 backward through the trampoline code, and that's handled further
4660 down, so there is nothing for us to do here. */
4661
4662 if (execution_direction != EXEC_REVERSE
4663 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4664 && in_solib_dynsym_resolve_code (stop_pc))
4665 {
4666 CORE_ADDR pc_after_resolver =
4667 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4668
4669 if (debug_infrun)
4670 fprintf_unfiltered (gdb_stdlog,
4671 "infrun: stepped into dynsym resolve code\n");
4672
4673 if (pc_after_resolver)
4674 {
4675 /* Set up a step-resume breakpoint at the address
4676 indicated by SKIP_SOLIB_RESOLVER. */
4677 struct symtab_and_line sr_sal;
4678
4679 init_sal (&sr_sal);
4680 sr_sal.pc = pc_after_resolver;
4681 sr_sal.pspace = get_frame_program_space (frame);
4682
4683 insert_step_resume_breakpoint_at_sal (gdbarch,
4684 sr_sal, null_frame_id);
4685 }
4686
4687 keep_going (ecs);
4688 return;
4689 }
4690
4691 if (ecs->event_thread->control.step_range_end != 1
4692 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4693 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4694 && get_frame_type (frame) == SIGTRAMP_FRAME)
4695 {
4696 if (debug_infrun)
4697 fprintf_unfiltered (gdb_stdlog,
4698 "infrun: stepped into signal trampoline\n");
4699 /* The inferior, while doing a "step" or "next", has ended up in
4700 a signal trampoline (either by a signal being delivered or by
4701 the signal handler returning). Just single-step until the
4702 inferior leaves the trampoline (either by calling the handler
4703 or returning). */
4704 keep_going (ecs);
4705 return;
4706 }
4707
4708 /* If we're in the return path from a shared library trampoline,
4709 we want to proceed through the trampoline when stepping. */
4710 /* macro/2012-04-25: This needs to come before the subroutine
4711 call check below as on some targets return trampolines look
4712 like subroutine calls (MIPS16 return thunks). */
4713 if (gdbarch_in_solib_return_trampoline (gdbarch,
4714 stop_pc, ecs->stop_func_name)
4715 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4716 {
4717 /* Determine where this trampoline returns. */
4718 CORE_ADDR real_stop_pc;
4719
4720 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4721
4722 if (debug_infrun)
4723 fprintf_unfiltered (gdb_stdlog,
4724 "infrun: stepped into solib return tramp\n");
4725
4726 /* Only proceed through if we know where it's going. */
4727 if (real_stop_pc)
4728 {
4729 /* And put the step-breakpoint there and go until there. */
4730 struct symtab_and_line sr_sal;
4731
4732 init_sal (&sr_sal); /* initialize to zeroes */
4733 sr_sal.pc = real_stop_pc;
4734 sr_sal.section = find_pc_overlay (sr_sal.pc);
4735 sr_sal.pspace = get_frame_program_space (frame);
4736
4737 /* Do not specify what the fp should be when we stop since
4738 on some machines the prologue is where the new fp value
4739 is established. */
4740 insert_step_resume_breakpoint_at_sal (gdbarch,
4741 sr_sal, null_frame_id);
4742
4743 /* Restart without fiddling with the step ranges or
4744 other state. */
4745 keep_going (ecs);
4746 return;
4747 }
4748 }
4749
4750 /* Check for subroutine calls. The check for the current frame
4751 equalling the step ID is not necessary - the check of the
4752 previous frame's ID is sufficient - but it is a common case and
4753 cheaper than checking the previous frame's ID.
4754
4755 NOTE: frame_id_eq will never report two invalid frame IDs as
4756 being equal, so to get into this block, both the current and
4757 previous frame must have valid frame IDs. */
4758 /* The outer_frame_id check is a heuristic to detect stepping
4759 through startup code. If we step over an instruction which
4760 sets the stack pointer from an invalid value to a valid value,
4761 we may detect that as a subroutine call from the mythical
4762 "outermost" function. This could be fixed by marking
4763 outermost frames as !stack_p,code_p,special_p. Then the
4764 initial outermost frame, before sp was valid, would
4765 have code_addr == &_start. See the comment in frame_id_eq
4766 for more. */
4767 if (!frame_id_eq (get_stack_frame_id (frame),
4768 ecs->event_thread->control.step_stack_frame_id)
4769 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4770 ecs->event_thread->control.step_stack_frame_id)
4771 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4772 outer_frame_id)
4773 || step_start_function != find_pc_function (stop_pc))))
4774 {
4775 CORE_ADDR real_stop_pc;
4776
4777 if (debug_infrun)
4778 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4779
4780 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4781 || ((ecs->event_thread->control.step_range_end == 1)
4782 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4783 ecs->stop_func_start)))
4784 {
4785 /* I presume that step_over_calls is only 0 when we're
4786 supposed to be stepping at the assembly language level
4787 ("stepi"). Just stop. */
4788 /* Also, maybe we just did a "nexti" inside a prolog, so we
4789 thought it was a subroutine call but it was not. Stop as
4790 well. FENN */
4791 /* And this works the same backward as frontward. MVS */
4792 end_stepping_range (ecs);
4793 return;
4794 }
4795
4796 /* Reverse stepping through solib trampolines. */
4797
4798 if (execution_direction == EXEC_REVERSE
4799 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4800 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4801 || (ecs->stop_func_start == 0
4802 && in_solib_dynsym_resolve_code (stop_pc))))
4803 {
4804 /* Any solib trampoline code can be handled in reverse
4805 by simply continuing to single-step. We have already
4806 executed the solib function (backwards), and a few
4807 steps will take us back through the trampoline to the
4808 caller. */
4809 keep_going (ecs);
4810 return;
4811 }
4812
4813 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4814 {
4815 /* We're doing a "next".
4816
4817 Normal (forward) execution: set a breakpoint at the
4818 callee's return address (the address at which the caller
4819 will resume).
4820
4821 Reverse (backward) execution. set the step-resume
4822 breakpoint at the start of the function that we just
4823 stepped into (backwards), and continue to there. When we
4824 get there, we'll need to single-step back to the caller. */
4825
4826 if (execution_direction == EXEC_REVERSE)
4827 {
4828 /* If we're already at the start of the function, we've either
4829 just stepped backward into a single instruction function,
4830 or stepped back out of a signal handler to the first instruction
4831 of the function. Just keep going, which will single-step back
4832 to the caller. */
4833 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
4834 {
4835 struct symtab_and_line sr_sal;
4836
4837 /* Normal function call return (static or dynamic). */
4838 init_sal (&sr_sal);
4839 sr_sal.pc = ecs->stop_func_start;
4840 sr_sal.pspace = get_frame_program_space (frame);
4841 insert_step_resume_breakpoint_at_sal (gdbarch,
4842 sr_sal, null_frame_id);
4843 }
4844 }
4845 else
4846 insert_step_resume_breakpoint_at_caller (frame);
4847
4848 keep_going (ecs);
4849 return;
4850 }
4851
4852 /* If we are in a function call trampoline (a stub between the
4853 calling routine and the real function), locate the real
4854 function. That's what tells us (a) whether we want to step
4855 into it at all, and (b) what prologue we want to run to the
4856 end of, if we do step into it. */
4857 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4858 if (real_stop_pc == 0)
4859 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4860 if (real_stop_pc != 0)
4861 ecs->stop_func_start = real_stop_pc;
4862
4863 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4864 {
4865 struct symtab_and_line sr_sal;
4866
4867 init_sal (&sr_sal);
4868 sr_sal.pc = ecs->stop_func_start;
4869 sr_sal.pspace = get_frame_program_space (frame);
4870
4871 insert_step_resume_breakpoint_at_sal (gdbarch,
4872 sr_sal, null_frame_id);
4873 keep_going (ecs);
4874 return;
4875 }
4876
4877 /* If we have line number information for the function we are
4878 thinking of stepping into and the function isn't on the skip
4879 list, step into it.
4880
4881 If there are several symtabs at that PC (e.g. with include
4882 files), just want to know whether *any* of them have line
4883 numbers. find_pc_line handles this. */
4884 {
4885 struct symtab_and_line tmp_sal;
4886
4887 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4888 if (tmp_sal.line != 0
4889 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4890 &tmp_sal))
4891 {
4892 if (execution_direction == EXEC_REVERSE)
4893 handle_step_into_function_backward (gdbarch, ecs);
4894 else
4895 handle_step_into_function (gdbarch, ecs);
4896 return;
4897 }
4898 }
4899
4900 /* If we have no line number and the step-stop-if-no-debug is
4901 set, we stop the step so that the user has a chance to switch
4902 in assembly mode. */
4903 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4904 && step_stop_if_no_debug)
4905 {
4906 end_stepping_range (ecs);
4907 return;
4908 }
4909
4910 if (execution_direction == EXEC_REVERSE)
4911 {
4912 /* If we're already at the start of the function, we've either just
4913 stepped backward into a single instruction function without line
4914 number info, or stepped back out of a signal handler to the first
4915 instruction of the function without line number info. Just keep
4916 going, which will single-step back to the caller. */
4917 if (ecs->stop_func_start != stop_pc)
4918 {
4919 /* Set a breakpoint at callee's start address.
4920 From there we can step once and be back in the caller. */
4921 struct symtab_and_line sr_sal;
4922
4923 init_sal (&sr_sal);
4924 sr_sal.pc = ecs->stop_func_start;
4925 sr_sal.pspace = get_frame_program_space (frame);
4926 insert_step_resume_breakpoint_at_sal (gdbarch,
4927 sr_sal, null_frame_id);
4928 }
4929 }
4930 else
4931 /* Set a breakpoint at callee's return address (the address
4932 at which the caller will resume). */
4933 insert_step_resume_breakpoint_at_caller (frame);
4934
4935 keep_going (ecs);
4936 return;
4937 }
4938
4939 /* Reverse stepping through solib trampolines. */
4940
4941 if (execution_direction == EXEC_REVERSE
4942 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4943 {
4944 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4945 || (ecs->stop_func_start == 0
4946 && in_solib_dynsym_resolve_code (stop_pc)))
4947 {
4948 /* Any solib trampoline code can be handled in reverse
4949 by simply continuing to single-step. We have already
4950 executed the solib function (backwards), and a few
4951 steps will take us back through the trampoline to the
4952 caller. */
4953 keep_going (ecs);
4954 return;
4955 }
4956 else if (in_solib_dynsym_resolve_code (stop_pc))
4957 {
4958 /* Stepped backward into the solib dynsym resolver.
4959 Set a breakpoint at its start and continue, then
4960 one more step will take us out. */
4961 struct symtab_and_line sr_sal;
4962
4963 init_sal (&sr_sal);
4964 sr_sal.pc = ecs->stop_func_start;
4965 sr_sal.pspace = get_frame_program_space (frame);
4966 insert_step_resume_breakpoint_at_sal (gdbarch,
4967 sr_sal, null_frame_id);
4968 keep_going (ecs);
4969 return;
4970 }
4971 }
4972
4973 stop_pc_sal = find_pc_line (stop_pc, 0);
4974
4975 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4976 the trampoline processing logic, however, there are some trampolines
4977 that have no names, so we should do trampoline handling first. */
4978 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4979 && ecs->stop_func_name == NULL
4980 && stop_pc_sal.line == 0)
4981 {
4982 if (debug_infrun)
4983 fprintf_unfiltered (gdb_stdlog,
4984 "infrun: stepped into undebuggable function\n");
4985
4986 /* The inferior just stepped into, or returned to, an
4987 undebuggable function (where there is no debugging information
4988 and no line number corresponding to the address where the
4989 inferior stopped). Since we want to skip this kind of code,
4990 we keep going until the inferior returns from this
4991 function - unless the user has asked us not to (via
4992 set step-mode) or we no longer know how to get back
4993 to the call site. */
4994 if (step_stop_if_no_debug
4995 || !frame_id_p (frame_unwind_caller_id (frame)))
4996 {
4997 /* If we have no line number and the step-stop-if-no-debug
4998 is set, we stop the step so that the user has a chance to
4999 switch in assembly mode. */
5000 end_stepping_range (ecs);
5001 return;
5002 }
5003 else
5004 {
5005 /* Set a breakpoint at callee's return address (the address
5006 at which the caller will resume). */
5007 insert_step_resume_breakpoint_at_caller (frame);
5008 keep_going (ecs);
5009 return;
5010 }
5011 }
5012
5013 if (ecs->event_thread->control.step_range_end == 1)
5014 {
5015 /* It is stepi or nexti. We always want to stop stepping after
5016 one instruction. */
5017 if (debug_infrun)
5018 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5019 end_stepping_range (ecs);
5020 return;
5021 }
5022
5023 if (stop_pc_sal.line == 0)
5024 {
5025 /* We have no line number information. That means to stop
5026 stepping (does this always happen right after one instruction,
5027 when we do "s" in a function with no line numbers,
5028 or can this happen as a result of a return or longjmp?). */
5029 if (debug_infrun)
5030 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5031 end_stepping_range (ecs);
5032 return;
5033 }
5034
5035 /* Look for "calls" to inlined functions, part one. If the inline
5036 frame machinery detected some skipped call sites, we have entered
5037 a new inline function. */
5038
5039 if (frame_id_eq (get_frame_id (get_current_frame ()),
5040 ecs->event_thread->control.step_frame_id)
5041 && inline_skipped_frames (ecs->ptid))
5042 {
5043 struct symtab_and_line call_sal;
5044
5045 if (debug_infrun)
5046 fprintf_unfiltered (gdb_stdlog,
5047 "infrun: stepped into inlined function\n");
5048
5049 find_frame_sal (get_current_frame (), &call_sal);
5050
5051 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5052 {
5053 /* For "step", we're going to stop. But if the call site
5054 for this inlined function is on the same source line as
5055 we were previously stepping, go down into the function
5056 first. Otherwise stop at the call site. */
5057
5058 if (call_sal.line == ecs->event_thread->current_line
5059 && call_sal.symtab == ecs->event_thread->current_symtab)
5060 step_into_inline_frame (ecs->ptid);
5061
5062 end_stepping_range (ecs);
5063 return;
5064 }
5065 else
5066 {
5067 /* For "next", we should stop at the call site if it is on a
5068 different source line. Otherwise continue through the
5069 inlined function. */
5070 if (call_sal.line == ecs->event_thread->current_line
5071 && call_sal.symtab == ecs->event_thread->current_symtab)
5072 keep_going (ecs);
5073 else
5074 end_stepping_range (ecs);
5075 return;
5076 }
5077 }
5078
5079 /* Look for "calls" to inlined functions, part two. If we are still
5080 in the same real function we were stepping through, but we have
5081 to go further up to find the exact frame ID, we are stepping
5082 through a more inlined call beyond its call site. */
5083
5084 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5085 && !frame_id_eq (get_frame_id (get_current_frame ()),
5086 ecs->event_thread->control.step_frame_id)
5087 && stepped_in_from (get_current_frame (),
5088 ecs->event_thread->control.step_frame_id))
5089 {
5090 if (debug_infrun)
5091 fprintf_unfiltered (gdb_stdlog,
5092 "infrun: stepping through inlined function\n");
5093
5094 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5095 keep_going (ecs);
5096 else
5097 end_stepping_range (ecs);
5098 return;
5099 }
5100
5101 if ((stop_pc == stop_pc_sal.pc)
5102 && (ecs->event_thread->current_line != stop_pc_sal.line
5103 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5104 {
5105 /* We are at the start of a different line. So stop. Note that
5106 we don't stop if we step into the middle of a different line.
5107 That is said to make things like for (;;) statements work
5108 better. */
5109 if (debug_infrun)
5110 fprintf_unfiltered (gdb_stdlog,
5111 "infrun: stepped to a different line\n");
5112 end_stepping_range (ecs);
5113 return;
5114 }
5115
5116 /* We aren't done stepping.
5117
5118 Optimize by setting the stepping range to the line.
5119 (We might not be in the original line, but if we entered a
5120 new line in mid-statement, we continue stepping. This makes
5121 things like for(;;) statements work better.) */
5122
5123 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5124 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5125 ecs->event_thread->control.may_range_step = 1;
5126 set_step_info (frame, stop_pc_sal);
5127
5128 if (debug_infrun)
5129 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5130 keep_going (ecs);
5131 }
5132
5133 /* In all-stop mode, if we're currently stepping but have stopped in
5134 some other thread, we may need to switch back to the stepped
5135 thread. Returns true we set the inferior running, false if we left
5136 it stopped (and the event needs further processing). */
5137
5138 static int
5139 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5140 {
5141 if (!non_stop)
5142 {
5143 struct thread_info *tp;
5144 struct thread_info *stepping_thread;
5145 struct thread_info *step_over;
5146
5147 /* If any thread is blocked on some internal breakpoint, and we
5148 simply need to step over that breakpoint to get it going
5149 again, do that first. */
5150
5151 /* However, if we see an event for the stepping thread, then we
5152 know all other threads have been moved past their breakpoints
5153 already. Let the caller check whether the step is finished,
5154 etc., before deciding to move it past a breakpoint. */
5155 if (ecs->event_thread->control.step_range_end != 0)
5156 return 0;
5157
5158 /* Check if the current thread is blocked on an incomplete
5159 step-over, interrupted by a random signal. */
5160 if (ecs->event_thread->control.trap_expected
5161 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5162 {
5163 if (debug_infrun)
5164 {
5165 fprintf_unfiltered (gdb_stdlog,
5166 "infrun: need to finish step-over of [%s]\n",
5167 target_pid_to_str (ecs->event_thread->ptid));
5168 }
5169 keep_going (ecs);
5170 return 1;
5171 }
5172
5173 /* Check if the current thread is blocked by a single-step
5174 breakpoint of another thread. */
5175 if (ecs->hit_singlestep_breakpoint)
5176 {
5177 if (debug_infrun)
5178 {
5179 fprintf_unfiltered (gdb_stdlog,
5180 "infrun: need to step [%s] over single-step "
5181 "breakpoint\n",
5182 target_pid_to_str (ecs->ptid));
5183 }
5184 keep_going (ecs);
5185 return 1;
5186 }
5187
5188 /* Otherwise, we no longer expect a trap in the current thread.
5189 Clear the trap_expected flag before switching back -- this is
5190 what keep_going does as well, if we call it. */
5191 ecs->event_thread->control.trap_expected = 0;
5192
5193 /* If scheduler locking applies even if not stepping, there's no
5194 need to walk over threads. Above we've checked whether the
5195 current thread is stepping. If some other thread not the
5196 event thread is stepping, then it must be that scheduler
5197 locking is not in effect. */
5198 if (schedlock_applies (0))
5199 return 0;
5200
5201 /* Look for the stepping/nexting thread, and check if any other
5202 thread other than the stepping thread needs to start a
5203 step-over. Do all step-overs before actually proceeding with
5204 step/next/etc. */
5205 stepping_thread = NULL;
5206 step_over = NULL;
5207 ALL_NON_EXITED_THREADS (tp)
5208 {
5209 /* Ignore threads of processes we're not resuming. */
5210 if (!sched_multi
5211 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
5212 continue;
5213
5214 /* When stepping over a breakpoint, we lock all threads
5215 except the one that needs to move past the breakpoint.
5216 If a non-event thread has this set, the "incomplete
5217 step-over" check above should have caught it earlier. */
5218 gdb_assert (!tp->control.trap_expected);
5219
5220 /* Did we find the stepping thread? */
5221 if (tp->control.step_range_end)
5222 {
5223 /* Yep. There should only one though. */
5224 gdb_assert (stepping_thread == NULL);
5225
5226 /* The event thread is handled at the top, before we
5227 enter this loop. */
5228 gdb_assert (tp != ecs->event_thread);
5229
5230 /* If some thread other than the event thread is
5231 stepping, then scheduler locking can't be in effect,
5232 otherwise we wouldn't have resumed the current event
5233 thread in the first place. */
5234 gdb_assert (!schedlock_applies (1));
5235
5236 stepping_thread = tp;
5237 }
5238 else if (thread_still_needs_step_over (tp))
5239 {
5240 step_over = tp;
5241
5242 /* At the top we've returned early if the event thread
5243 is stepping. If some other thread not the event
5244 thread is stepping, then scheduler locking can't be
5245 in effect, and we can resume this thread. No need to
5246 keep looking for the stepping thread then. */
5247 break;
5248 }
5249 }
5250
5251 if (step_over != NULL)
5252 {
5253 tp = step_over;
5254 if (debug_infrun)
5255 {
5256 fprintf_unfiltered (gdb_stdlog,
5257 "infrun: need to step-over [%s]\n",
5258 target_pid_to_str (tp->ptid));
5259 }
5260
5261 /* Only the stepping thread should have this set. */
5262 gdb_assert (tp->control.step_range_end == 0);
5263
5264 ecs->ptid = tp->ptid;
5265 ecs->event_thread = tp;
5266 switch_to_thread (ecs->ptid);
5267 keep_going (ecs);
5268 return 1;
5269 }
5270
5271 if (stepping_thread != NULL)
5272 {
5273 struct frame_info *frame;
5274 struct gdbarch *gdbarch;
5275
5276 tp = stepping_thread;
5277
5278 /* If the stepping thread exited, then don't try to switch
5279 back and resume it, which could fail in several different
5280 ways depending on the target. Instead, just keep going.
5281
5282 We can find a stepping dead thread in the thread list in
5283 two cases:
5284
5285 - The target supports thread exit events, and when the
5286 target tries to delete the thread from the thread list,
5287 inferior_ptid pointed at the exiting thread. In such
5288 case, calling delete_thread does not really remove the
5289 thread from the list; instead, the thread is left listed,
5290 with 'exited' state.
5291
5292 - The target's debug interface does not support thread
5293 exit events, and so we have no idea whatsoever if the
5294 previously stepping thread is still alive. For that
5295 reason, we need to synchronously query the target
5296 now. */
5297 if (is_exited (tp->ptid)
5298 || !target_thread_alive (tp->ptid))
5299 {
5300 if (debug_infrun)
5301 fprintf_unfiltered (gdb_stdlog,
5302 "infrun: not switching back to "
5303 "stepped thread, it has vanished\n");
5304
5305 delete_thread (tp->ptid);
5306 keep_going (ecs);
5307 return 1;
5308 }
5309
5310 if (debug_infrun)
5311 fprintf_unfiltered (gdb_stdlog,
5312 "infrun: switching back to stepped thread\n");
5313
5314 ecs->event_thread = tp;
5315 ecs->ptid = tp->ptid;
5316 context_switch (ecs->ptid);
5317
5318 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5319 frame = get_current_frame ();
5320 gdbarch = get_frame_arch (frame);
5321
5322 /* If the PC of the thread we were trying to single-step has
5323 changed, then that thread has trapped or been signaled,
5324 but the event has not been reported to GDB yet. Re-poll
5325 the target looking for this particular thread's event
5326 (i.e. temporarily enable schedlock) by:
5327
5328 - setting a break at the current PC
5329 - resuming that particular thread, only (by setting
5330 trap expected)
5331
5332 This prevents us continuously moving the single-step
5333 breakpoint forward, one instruction at a time,
5334 overstepping. */
5335
5336 if (gdbarch_software_single_step_p (gdbarch)
5337 && stop_pc != tp->prev_pc)
5338 {
5339 if (debug_infrun)
5340 fprintf_unfiltered (gdb_stdlog,
5341 "infrun: expected thread advanced also\n");
5342
5343 insert_single_step_breakpoint (get_frame_arch (frame),
5344 get_frame_address_space (frame),
5345 stop_pc);
5346 singlestep_breakpoints_inserted_p = 1;
5347 ecs->event_thread->control.trap_expected = 1;
5348 singlestep_ptid = inferior_ptid;
5349 singlestep_pc = stop_pc;
5350
5351 resume (0, GDB_SIGNAL_0);
5352 prepare_to_wait (ecs);
5353 }
5354 else
5355 {
5356 if (debug_infrun)
5357 fprintf_unfiltered (gdb_stdlog,
5358 "infrun: expected thread still "
5359 "hasn't advanced\n");
5360 keep_going (ecs);
5361 }
5362
5363 return 1;
5364 }
5365 }
5366 return 0;
5367 }
5368
5369 /* Is thread TP in the middle of single-stepping? */
5370
5371 static int
5372 currently_stepping (struct thread_info *tp)
5373 {
5374 return ((tp->control.step_range_end
5375 && tp->control.step_resume_breakpoint == NULL)
5376 || tp->control.trap_expected
5377 || bpstat_should_step ());
5378 }
5379
5380 /* Inferior has stepped into a subroutine call with source code that
5381 we should not step over. Do step to the first line of code in
5382 it. */
5383
5384 static void
5385 handle_step_into_function (struct gdbarch *gdbarch,
5386 struct execution_control_state *ecs)
5387 {
5388 struct symtab *s;
5389 struct symtab_and_line stop_func_sal, sr_sal;
5390
5391 fill_in_stop_func (gdbarch, ecs);
5392
5393 s = find_pc_symtab (stop_pc);
5394 if (s && s->language != language_asm)
5395 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5396 ecs->stop_func_start);
5397
5398 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5399 /* Use the step_resume_break to step until the end of the prologue,
5400 even if that involves jumps (as it seems to on the vax under
5401 4.2). */
5402 /* If the prologue ends in the middle of a source line, continue to
5403 the end of that source line (if it is still within the function).
5404 Otherwise, just go to end of prologue. */
5405 if (stop_func_sal.end
5406 && stop_func_sal.pc != ecs->stop_func_start
5407 && stop_func_sal.end < ecs->stop_func_end)
5408 ecs->stop_func_start = stop_func_sal.end;
5409
5410 /* Architectures which require breakpoint adjustment might not be able
5411 to place a breakpoint at the computed address. If so, the test
5412 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5413 ecs->stop_func_start to an address at which a breakpoint may be
5414 legitimately placed.
5415
5416 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5417 made, GDB will enter an infinite loop when stepping through
5418 optimized code consisting of VLIW instructions which contain
5419 subinstructions corresponding to different source lines. On
5420 FR-V, it's not permitted to place a breakpoint on any but the
5421 first subinstruction of a VLIW instruction. When a breakpoint is
5422 set, GDB will adjust the breakpoint address to the beginning of
5423 the VLIW instruction. Thus, we need to make the corresponding
5424 adjustment here when computing the stop address. */
5425
5426 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5427 {
5428 ecs->stop_func_start
5429 = gdbarch_adjust_breakpoint_address (gdbarch,
5430 ecs->stop_func_start);
5431 }
5432
5433 if (ecs->stop_func_start == stop_pc)
5434 {
5435 /* We are already there: stop now. */
5436 end_stepping_range (ecs);
5437 return;
5438 }
5439 else
5440 {
5441 /* Put the step-breakpoint there and go until there. */
5442 init_sal (&sr_sal); /* initialize to zeroes */
5443 sr_sal.pc = ecs->stop_func_start;
5444 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5445 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5446
5447 /* Do not specify what the fp should be when we stop since on
5448 some machines the prologue is where the new fp value is
5449 established. */
5450 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5451
5452 /* And make sure stepping stops right away then. */
5453 ecs->event_thread->control.step_range_end
5454 = ecs->event_thread->control.step_range_start;
5455 }
5456 keep_going (ecs);
5457 }
5458
5459 /* Inferior has stepped backward into a subroutine call with source
5460 code that we should not step over. Do step to the beginning of the
5461 last line of code in it. */
5462
5463 static void
5464 handle_step_into_function_backward (struct gdbarch *gdbarch,
5465 struct execution_control_state *ecs)
5466 {
5467 struct symtab *s;
5468 struct symtab_and_line stop_func_sal;
5469
5470 fill_in_stop_func (gdbarch, ecs);
5471
5472 s = find_pc_symtab (stop_pc);
5473 if (s && s->language != language_asm)
5474 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5475 ecs->stop_func_start);
5476
5477 stop_func_sal = find_pc_line (stop_pc, 0);
5478
5479 /* OK, we're just going to keep stepping here. */
5480 if (stop_func_sal.pc == stop_pc)
5481 {
5482 /* We're there already. Just stop stepping now. */
5483 end_stepping_range (ecs);
5484 }
5485 else
5486 {
5487 /* Else just reset the step range and keep going.
5488 No step-resume breakpoint, they don't work for
5489 epilogues, which can have multiple entry paths. */
5490 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5491 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5492 keep_going (ecs);
5493 }
5494 return;
5495 }
5496
5497 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5498 This is used to both functions and to skip over code. */
5499
5500 static void
5501 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5502 struct symtab_and_line sr_sal,
5503 struct frame_id sr_id,
5504 enum bptype sr_type)
5505 {
5506 /* There should never be more than one step-resume or longjmp-resume
5507 breakpoint per thread, so we should never be setting a new
5508 step_resume_breakpoint when one is already active. */
5509 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5510 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5511
5512 if (debug_infrun)
5513 fprintf_unfiltered (gdb_stdlog,
5514 "infrun: inserting step-resume breakpoint at %s\n",
5515 paddress (gdbarch, sr_sal.pc));
5516
5517 inferior_thread ()->control.step_resume_breakpoint
5518 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5519 }
5520
5521 void
5522 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5523 struct symtab_and_line sr_sal,
5524 struct frame_id sr_id)
5525 {
5526 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5527 sr_sal, sr_id,
5528 bp_step_resume);
5529 }
5530
5531 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5532 This is used to skip a potential signal handler.
5533
5534 This is called with the interrupted function's frame. The signal
5535 handler, when it returns, will resume the interrupted function at
5536 RETURN_FRAME.pc. */
5537
5538 static void
5539 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5540 {
5541 struct symtab_and_line sr_sal;
5542 struct gdbarch *gdbarch;
5543
5544 gdb_assert (return_frame != NULL);
5545 init_sal (&sr_sal); /* initialize to zeros */
5546
5547 gdbarch = get_frame_arch (return_frame);
5548 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5549 sr_sal.section = find_pc_overlay (sr_sal.pc);
5550 sr_sal.pspace = get_frame_program_space (return_frame);
5551
5552 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5553 get_stack_frame_id (return_frame),
5554 bp_hp_step_resume);
5555 }
5556
5557 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5558 is used to skip a function after stepping into it (for "next" or if
5559 the called function has no debugging information).
5560
5561 The current function has almost always been reached by single
5562 stepping a call or return instruction. NEXT_FRAME belongs to the
5563 current function, and the breakpoint will be set at the caller's
5564 resume address.
5565
5566 This is a separate function rather than reusing
5567 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5568 get_prev_frame, which may stop prematurely (see the implementation
5569 of frame_unwind_caller_id for an example). */
5570
5571 static void
5572 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5573 {
5574 struct symtab_and_line sr_sal;
5575 struct gdbarch *gdbarch;
5576
5577 /* We shouldn't have gotten here if we don't know where the call site
5578 is. */
5579 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5580
5581 init_sal (&sr_sal); /* initialize to zeros */
5582
5583 gdbarch = frame_unwind_caller_arch (next_frame);
5584 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5585 frame_unwind_caller_pc (next_frame));
5586 sr_sal.section = find_pc_overlay (sr_sal.pc);
5587 sr_sal.pspace = frame_unwind_program_space (next_frame);
5588
5589 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5590 frame_unwind_caller_id (next_frame));
5591 }
5592
5593 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5594 new breakpoint at the target of a jmp_buf. The handling of
5595 longjmp-resume uses the same mechanisms used for handling
5596 "step-resume" breakpoints. */
5597
5598 static void
5599 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5600 {
5601 /* There should never be more than one longjmp-resume breakpoint per
5602 thread, so we should never be setting a new
5603 longjmp_resume_breakpoint when one is already active. */
5604 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
5605
5606 if (debug_infrun)
5607 fprintf_unfiltered (gdb_stdlog,
5608 "infrun: inserting longjmp-resume breakpoint at %s\n",
5609 paddress (gdbarch, pc));
5610
5611 inferior_thread ()->control.exception_resume_breakpoint =
5612 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5613 }
5614
5615 /* Insert an exception resume breakpoint. TP is the thread throwing
5616 the exception. The block B is the block of the unwinder debug hook
5617 function. FRAME is the frame corresponding to the call to this
5618 function. SYM is the symbol of the function argument holding the
5619 target PC of the exception. */
5620
5621 static void
5622 insert_exception_resume_breakpoint (struct thread_info *tp,
5623 const struct block *b,
5624 struct frame_info *frame,
5625 struct symbol *sym)
5626 {
5627 volatile struct gdb_exception e;
5628
5629 /* We want to ignore errors here. */
5630 TRY_CATCH (e, RETURN_MASK_ERROR)
5631 {
5632 struct symbol *vsym;
5633 struct value *value;
5634 CORE_ADDR handler;
5635 struct breakpoint *bp;
5636
5637 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5638 value = read_var_value (vsym, frame);
5639 /* If the value was optimized out, revert to the old behavior. */
5640 if (! value_optimized_out (value))
5641 {
5642 handler = value_as_address (value);
5643
5644 if (debug_infrun)
5645 fprintf_unfiltered (gdb_stdlog,
5646 "infrun: exception resume at %lx\n",
5647 (unsigned long) handler);
5648
5649 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5650 handler, bp_exception_resume);
5651
5652 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
5653 frame = NULL;
5654
5655 bp->thread = tp->num;
5656 inferior_thread ()->control.exception_resume_breakpoint = bp;
5657 }
5658 }
5659 }
5660
5661 /* A helper for check_exception_resume that sets an
5662 exception-breakpoint based on a SystemTap probe. */
5663
5664 static void
5665 insert_exception_resume_from_probe (struct thread_info *tp,
5666 const struct bound_probe *probe,
5667 struct frame_info *frame)
5668 {
5669 struct value *arg_value;
5670 CORE_ADDR handler;
5671 struct breakpoint *bp;
5672
5673 arg_value = probe_safe_evaluate_at_pc (frame, 1);
5674 if (!arg_value)
5675 return;
5676
5677 handler = value_as_address (arg_value);
5678
5679 if (debug_infrun)
5680 fprintf_unfiltered (gdb_stdlog,
5681 "infrun: exception resume at %s\n",
5682 paddress (get_objfile_arch (probe->objfile),
5683 handler));
5684
5685 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5686 handler, bp_exception_resume);
5687 bp->thread = tp->num;
5688 inferior_thread ()->control.exception_resume_breakpoint = bp;
5689 }
5690
5691 /* This is called when an exception has been intercepted. Check to
5692 see whether the exception's destination is of interest, and if so,
5693 set an exception resume breakpoint there. */
5694
5695 static void
5696 check_exception_resume (struct execution_control_state *ecs,
5697 struct frame_info *frame)
5698 {
5699 volatile struct gdb_exception e;
5700 struct bound_probe probe;
5701 struct symbol *func;
5702
5703 /* First see if this exception unwinding breakpoint was set via a
5704 SystemTap probe point. If so, the probe has two arguments: the
5705 CFA and the HANDLER. We ignore the CFA, extract the handler, and
5706 set a breakpoint there. */
5707 probe = find_probe_by_pc (get_frame_pc (frame));
5708 if (probe.probe)
5709 {
5710 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
5711 return;
5712 }
5713
5714 func = get_frame_function (frame);
5715 if (!func)
5716 return;
5717
5718 TRY_CATCH (e, RETURN_MASK_ERROR)
5719 {
5720 const struct block *b;
5721 struct block_iterator iter;
5722 struct symbol *sym;
5723 int argno = 0;
5724
5725 /* The exception breakpoint is a thread-specific breakpoint on
5726 the unwinder's debug hook, declared as:
5727
5728 void _Unwind_DebugHook (void *cfa, void *handler);
5729
5730 The CFA argument indicates the frame to which control is
5731 about to be transferred. HANDLER is the destination PC.
5732
5733 We ignore the CFA and set a temporary breakpoint at HANDLER.
5734 This is not extremely efficient but it avoids issues in gdb
5735 with computing the DWARF CFA, and it also works even in weird
5736 cases such as throwing an exception from inside a signal
5737 handler. */
5738
5739 b = SYMBOL_BLOCK_VALUE (func);
5740 ALL_BLOCK_SYMBOLS (b, iter, sym)
5741 {
5742 if (!SYMBOL_IS_ARGUMENT (sym))
5743 continue;
5744
5745 if (argno == 0)
5746 ++argno;
5747 else
5748 {
5749 insert_exception_resume_breakpoint (ecs->event_thread,
5750 b, frame, sym);
5751 break;
5752 }
5753 }
5754 }
5755 }
5756
5757 static void
5758 stop_waiting (struct execution_control_state *ecs)
5759 {
5760 if (debug_infrun)
5761 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
5762
5763 clear_step_over_info ();
5764
5765 /* Let callers know we don't want to wait for the inferior anymore. */
5766 ecs->wait_some_more = 0;
5767 }
5768
5769 /* Called when we should continue running the inferior, because the
5770 current event doesn't cause a user visible stop. This does the
5771 resuming part; waiting for the next event is done elsewhere. */
5772
5773 static void
5774 keep_going (struct execution_control_state *ecs)
5775 {
5776 /* Make sure normal_stop is called if we get a QUIT handled before
5777 reaching resume. */
5778 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5779
5780 /* Save the pc before execution, to compare with pc after stop. */
5781 ecs->event_thread->prev_pc
5782 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5783
5784 if (ecs->event_thread->control.trap_expected
5785 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5786 {
5787 /* We haven't yet gotten our trap, and either: intercepted a
5788 non-signal event (e.g., a fork); or took a signal which we
5789 are supposed to pass through to the inferior. Simply
5790 continue. */
5791 discard_cleanups (old_cleanups);
5792 resume (currently_stepping (ecs->event_thread),
5793 ecs->event_thread->suspend.stop_signal);
5794 }
5795 else
5796 {
5797 volatile struct gdb_exception e;
5798 struct regcache *regcache = get_current_regcache ();
5799
5800 /* Either the trap was not expected, but we are continuing
5801 anyway (if we got a signal, the user asked it be passed to
5802 the child)
5803 -- or --
5804 We got our expected trap, but decided we should resume from
5805 it.
5806
5807 We're going to run this baby now!
5808
5809 Note that insert_breakpoints won't try to re-insert
5810 already inserted breakpoints. Therefore, we don't
5811 care if breakpoints were already inserted, or not. */
5812
5813 /* If we need to step over a breakpoint, and we're not using
5814 displaced stepping to do so, insert all breakpoints
5815 (watchpoints, etc.) but the one we're stepping over, step one
5816 instruction, and then re-insert the breakpoint when that step
5817 is finished. */
5818 if ((ecs->hit_singlestep_breakpoint
5819 || thread_still_needs_step_over (ecs->event_thread))
5820 && !use_displaced_stepping (get_regcache_arch (regcache)))
5821 {
5822 set_step_over_info (get_regcache_aspace (regcache),
5823 regcache_read_pc (regcache));
5824 }
5825 else
5826 clear_step_over_info ();
5827
5828 /* Stop stepping if inserting breakpoints fails. */
5829 TRY_CATCH (e, RETURN_MASK_ERROR)
5830 {
5831 insert_breakpoints ();
5832 }
5833 if (e.reason < 0)
5834 {
5835 exception_print (gdb_stderr, e);
5836 stop_waiting (ecs);
5837 return;
5838 }
5839
5840 ecs->event_thread->control.trap_expected
5841 = (ecs->event_thread->stepping_over_breakpoint
5842 || ecs->hit_singlestep_breakpoint);
5843
5844 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
5845 explicitly specifies that such a signal should be delivered
5846 to the target program). Typically, that would occur when a
5847 user is debugging a target monitor on a simulator: the target
5848 monitor sets a breakpoint; the simulator encounters this
5849 breakpoint and halts the simulation handing control to GDB;
5850 GDB, noting that the stop address doesn't map to any known
5851 breakpoint, returns control back to the simulator; the
5852 simulator then delivers the hardware equivalent of a
5853 GDB_SIGNAL_TRAP to the program being debugged. */
5854 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5855 && !signal_program[ecs->event_thread->suspend.stop_signal])
5856 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5857
5858 discard_cleanups (old_cleanups);
5859 resume (currently_stepping (ecs->event_thread),
5860 ecs->event_thread->suspend.stop_signal);
5861 }
5862
5863 prepare_to_wait (ecs);
5864 }
5865
5866 /* This function normally comes after a resume, before
5867 handle_inferior_event exits. It takes care of any last bits of
5868 housekeeping, and sets the all-important wait_some_more flag. */
5869
5870 static void
5871 prepare_to_wait (struct execution_control_state *ecs)
5872 {
5873 if (debug_infrun)
5874 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5875
5876 /* This is the old end of the while loop. Let everybody know we
5877 want to wait for the inferior some more and get called again
5878 soon. */
5879 ecs->wait_some_more = 1;
5880 }
5881
5882 /* We are done with the step range of a step/next/si/ni command.
5883 Called once for each n of a "step n" operation. Notify observers
5884 if not in the middle of doing a "step N" operation for N > 1. */
5885
5886 static void
5887 end_stepping_range (struct execution_control_state *ecs)
5888 {
5889 ecs->event_thread->control.stop_step = 1;
5890 if (!ecs->event_thread->step_multi)
5891 observer_notify_end_stepping_range ();
5892 stop_waiting (ecs);
5893 }
5894
5895 /* Several print_*_reason functions to print why the inferior has stopped.
5896 We always print something when the inferior exits, or receives a signal.
5897 The rest of the cases are dealt with later on in normal_stop and
5898 print_it_typical. Ideally there should be a call to one of these
5899 print_*_reason functions functions from handle_inferior_event each time
5900 stop_waiting is called.
5901
5902 Note that we don't call these directly, instead we delegate that to
5903 the interpreters, through observers. Interpreters then call these
5904 with whatever uiout is right. */
5905
5906 void
5907 print_end_stepping_range_reason (struct ui_out *uiout)
5908 {
5909 /* For CLI-like interpreters, print nothing. */
5910
5911 if (ui_out_is_mi_like_p (uiout))
5912 {
5913 ui_out_field_string (uiout, "reason",
5914 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5915 }
5916 }
5917
5918 void
5919 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
5920 {
5921 annotate_signalled ();
5922 if (ui_out_is_mi_like_p (uiout))
5923 ui_out_field_string
5924 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5925 ui_out_text (uiout, "\nProgram terminated with signal ");
5926 annotate_signal_name ();
5927 ui_out_field_string (uiout, "signal-name",
5928 gdb_signal_to_name (siggnal));
5929 annotate_signal_name_end ();
5930 ui_out_text (uiout, ", ");
5931 annotate_signal_string ();
5932 ui_out_field_string (uiout, "signal-meaning",
5933 gdb_signal_to_string (siggnal));
5934 annotate_signal_string_end ();
5935 ui_out_text (uiout, ".\n");
5936 ui_out_text (uiout, "The program no longer exists.\n");
5937 }
5938
5939 void
5940 print_exited_reason (struct ui_out *uiout, int exitstatus)
5941 {
5942 struct inferior *inf = current_inferior ();
5943 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5944
5945 annotate_exited (exitstatus);
5946 if (exitstatus)
5947 {
5948 if (ui_out_is_mi_like_p (uiout))
5949 ui_out_field_string (uiout, "reason",
5950 async_reason_lookup (EXEC_ASYNC_EXITED));
5951 ui_out_text (uiout, "[Inferior ");
5952 ui_out_text (uiout, plongest (inf->num));
5953 ui_out_text (uiout, " (");
5954 ui_out_text (uiout, pidstr);
5955 ui_out_text (uiout, ") exited with code ");
5956 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5957 ui_out_text (uiout, "]\n");
5958 }
5959 else
5960 {
5961 if (ui_out_is_mi_like_p (uiout))
5962 ui_out_field_string
5963 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5964 ui_out_text (uiout, "[Inferior ");
5965 ui_out_text (uiout, plongest (inf->num));
5966 ui_out_text (uiout, " (");
5967 ui_out_text (uiout, pidstr);
5968 ui_out_text (uiout, ") exited normally]\n");
5969 }
5970 }
5971
5972 void
5973 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
5974 {
5975 annotate_signal ();
5976
5977 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5978 {
5979 struct thread_info *t = inferior_thread ();
5980
5981 ui_out_text (uiout, "\n[");
5982 ui_out_field_string (uiout, "thread-name",
5983 target_pid_to_str (t->ptid));
5984 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5985 ui_out_text (uiout, " stopped");
5986 }
5987 else
5988 {
5989 ui_out_text (uiout, "\nProgram received signal ");
5990 annotate_signal_name ();
5991 if (ui_out_is_mi_like_p (uiout))
5992 ui_out_field_string
5993 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5994 ui_out_field_string (uiout, "signal-name",
5995 gdb_signal_to_name (siggnal));
5996 annotate_signal_name_end ();
5997 ui_out_text (uiout, ", ");
5998 annotate_signal_string ();
5999 ui_out_field_string (uiout, "signal-meaning",
6000 gdb_signal_to_string (siggnal));
6001 annotate_signal_string_end ();
6002 }
6003 ui_out_text (uiout, ".\n");
6004 }
6005
6006 void
6007 print_no_history_reason (struct ui_out *uiout)
6008 {
6009 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
6010 }
6011
6012 /* Print current location without a level number, if we have changed
6013 functions or hit a breakpoint. Print source line if we have one.
6014 bpstat_print contains the logic deciding in detail what to print,
6015 based on the event(s) that just occurred. */
6016
6017 void
6018 print_stop_event (struct target_waitstatus *ws)
6019 {
6020 int bpstat_ret;
6021 int source_flag;
6022 int do_frame_printing = 1;
6023 struct thread_info *tp = inferior_thread ();
6024
6025 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
6026 switch (bpstat_ret)
6027 {
6028 case PRINT_UNKNOWN:
6029 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
6030 should) carry around the function and does (or should) use
6031 that when doing a frame comparison. */
6032 if (tp->control.stop_step
6033 && frame_id_eq (tp->control.step_frame_id,
6034 get_frame_id (get_current_frame ()))
6035 && step_start_function == find_pc_function (stop_pc))
6036 {
6037 /* Finished step, just print source line. */
6038 source_flag = SRC_LINE;
6039 }
6040 else
6041 {
6042 /* Print location and source line. */
6043 source_flag = SRC_AND_LOC;
6044 }
6045 break;
6046 case PRINT_SRC_AND_LOC:
6047 /* Print location and source line. */
6048 source_flag = SRC_AND_LOC;
6049 break;
6050 case PRINT_SRC_ONLY:
6051 source_flag = SRC_LINE;
6052 break;
6053 case PRINT_NOTHING:
6054 /* Something bogus. */
6055 source_flag = SRC_LINE;
6056 do_frame_printing = 0;
6057 break;
6058 default:
6059 internal_error (__FILE__, __LINE__, _("Unknown value."));
6060 }
6061
6062 /* The behavior of this routine with respect to the source
6063 flag is:
6064 SRC_LINE: Print only source line
6065 LOCATION: Print only location
6066 SRC_AND_LOC: Print location and source line. */
6067 if (do_frame_printing)
6068 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6069
6070 /* Display the auto-display expressions. */
6071 do_displays ();
6072 }
6073
6074 /* Here to return control to GDB when the inferior stops for real.
6075 Print appropriate messages, remove breakpoints, give terminal our modes.
6076
6077 STOP_PRINT_FRAME nonzero means print the executing frame
6078 (pc, function, args, file, line number and line text).
6079 BREAKPOINTS_FAILED nonzero means stop was due to error
6080 attempting to insert breakpoints. */
6081
6082 void
6083 normal_stop (void)
6084 {
6085 struct target_waitstatus last;
6086 ptid_t last_ptid;
6087 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6088
6089 get_last_target_status (&last_ptid, &last);
6090
6091 /* If an exception is thrown from this point on, make sure to
6092 propagate GDB's knowledge of the executing state to the
6093 frontend/user running state. A QUIT is an easy exception to see
6094 here, so do this before any filtered output. */
6095 if (!non_stop)
6096 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6097 else if (last.kind != TARGET_WAITKIND_SIGNALLED
6098 && last.kind != TARGET_WAITKIND_EXITED
6099 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6100 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6101
6102 /* As with the notification of thread events, we want to delay
6103 notifying the user that we've switched thread context until
6104 the inferior actually stops.
6105
6106 There's no point in saying anything if the inferior has exited.
6107 Note that SIGNALLED here means "exited with a signal", not
6108 "received a signal".
6109
6110 Also skip saying anything in non-stop mode. In that mode, as we
6111 don't want GDB to switch threads behind the user's back, to avoid
6112 races where the user is typing a command to apply to thread x,
6113 but GDB switches to thread y before the user finishes entering
6114 the command, fetch_inferior_event installs a cleanup to restore
6115 the current thread back to the thread the user had selected right
6116 after this event is handled, so we're not really switching, only
6117 informing of a stop. */
6118 if (!non_stop
6119 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6120 && target_has_execution
6121 && last.kind != TARGET_WAITKIND_SIGNALLED
6122 && last.kind != TARGET_WAITKIND_EXITED
6123 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6124 {
6125 target_terminal_ours_for_output ();
6126 printf_filtered (_("[Switching to %s]\n"),
6127 target_pid_to_str (inferior_ptid));
6128 annotate_thread_changed ();
6129 previous_inferior_ptid = inferior_ptid;
6130 }
6131
6132 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6133 {
6134 gdb_assert (sync_execution || !target_can_async_p ());
6135
6136 target_terminal_ours_for_output ();
6137 printf_filtered (_("No unwaited-for children left.\n"));
6138 }
6139
6140 if (!breakpoints_always_inserted_mode () && target_has_execution)
6141 {
6142 if (remove_breakpoints ())
6143 {
6144 target_terminal_ours_for_output ();
6145 printf_filtered (_("Cannot remove breakpoints because "
6146 "program is no longer writable.\nFurther "
6147 "execution is probably impossible.\n"));
6148 }
6149 }
6150
6151 /* If an auto-display called a function and that got a signal,
6152 delete that auto-display to avoid an infinite recursion. */
6153
6154 if (stopped_by_random_signal)
6155 disable_current_display ();
6156
6157 /* Don't print a message if in the middle of doing a "step n"
6158 operation for n > 1 */
6159 if (target_has_execution
6160 && last.kind != TARGET_WAITKIND_SIGNALLED
6161 && last.kind != TARGET_WAITKIND_EXITED
6162 && inferior_thread ()->step_multi
6163 && inferior_thread ()->control.stop_step)
6164 goto done;
6165
6166 target_terminal_ours ();
6167 async_enable_stdin ();
6168
6169 /* Set the current source location. This will also happen if we
6170 display the frame below, but the current SAL will be incorrect
6171 during a user hook-stop function. */
6172 if (has_stack_frames () && !stop_stack_dummy)
6173 set_current_sal_from_frame (get_current_frame ());
6174
6175 /* Let the user/frontend see the threads as stopped, but do nothing
6176 if the thread was running an infcall. We may be e.g., evaluating
6177 a breakpoint condition. In that case, the thread had state
6178 THREAD_RUNNING before the infcall, and shall remain set to
6179 running, all without informing the user/frontend about state
6180 transition changes. If this is actually a call command, then the
6181 thread was originally already stopped, so there's no state to
6182 finish either. */
6183 if (target_has_execution && inferior_thread ()->control.in_infcall)
6184 discard_cleanups (old_chain);
6185 else
6186 do_cleanups (old_chain);
6187
6188 /* Look up the hook_stop and run it (CLI internally handles problem
6189 of stop_command's pre-hook not existing). */
6190 if (stop_command)
6191 catch_errors (hook_stop_stub, stop_command,
6192 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6193
6194 if (!has_stack_frames ())
6195 goto done;
6196
6197 if (last.kind == TARGET_WAITKIND_SIGNALLED
6198 || last.kind == TARGET_WAITKIND_EXITED)
6199 goto done;
6200
6201 /* Select innermost stack frame - i.e., current frame is frame 0,
6202 and current location is based on that.
6203 Don't do this on return from a stack dummy routine,
6204 or if the program has exited. */
6205
6206 if (!stop_stack_dummy)
6207 {
6208 select_frame (get_current_frame ());
6209
6210 /* If --batch-silent is enabled then there's no need to print the current
6211 source location, and to try risks causing an error message about
6212 missing source files. */
6213 if (stop_print_frame && !batch_silent)
6214 print_stop_event (&last);
6215 }
6216
6217 /* Save the function value return registers, if we care.
6218 We might be about to restore their previous contents. */
6219 if (inferior_thread ()->control.proceed_to_finish
6220 && execution_direction != EXEC_REVERSE)
6221 {
6222 /* This should not be necessary. */
6223 if (stop_registers)
6224 regcache_xfree (stop_registers);
6225
6226 /* NB: The copy goes through to the target picking up the value of
6227 all the registers. */
6228 stop_registers = regcache_dup (get_current_regcache ());
6229 }
6230
6231 if (stop_stack_dummy == STOP_STACK_DUMMY)
6232 {
6233 /* Pop the empty frame that contains the stack dummy.
6234 This also restores inferior state prior to the call
6235 (struct infcall_suspend_state). */
6236 struct frame_info *frame = get_current_frame ();
6237
6238 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6239 frame_pop (frame);
6240 /* frame_pop() calls reinit_frame_cache as the last thing it
6241 does which means there's currently no selected frame. We
6242 don't need to re-establish a selected frame if the dummy call
6243 returns normally, that will be done by
6244 restore_infcall_control_state. However, we do have to handle
6245 the case where the dummy call is returning after being
6246 stopped (e.g. the dummy call previously hit a breakpoint).
6247 We can't know which case we have so just always re-establish
6248 a selected frame here. */
6249 select_frame (get_current_frame ());
6250 }
6251
6252 done:
6253 annotate_stopped ();
6254
6255 /* Suppress the stop observer if we're in the middle of:
6256
6257 - a step n (n > 1), as there still more steps to be done.
6258
6259 - a "finish" command, as the observer will be called in
6260 finish_command_continuation, so it can include the inferior
6261 function's return value.
6262
6263 - calling an inferior function, as we pretend we inferior didn't
6264 run at all. The return value of the call is handled by the
6265 expression evaluator, through call_function_by_hand. */
6266
6267 if (!target_has_execution
6268 || last.kind == TARGET_WAITKIND_SIGNALLED
6269 || last.kind == TARGET_WAITKIND_EXITED
6270 || last.kind == TARGET_WAITKIND_NO_RESUMED
6271 || (!(inferior_thread ()->step_multi
6272 && inferior_thread ()->control.stop_step)
6273 && !(inferior_thread ()->control.stop_bpstat
6274 && inferior_thread ()->control.proceed_to_finish)
6275 && !inferior_thread ()->control.in_infcall))
6276 {
6277 if (!ptid_equal (inferior_ptid, null_ptid))
6278 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6279 stop_print_frame);
6280 else
6281 observer_notify_normal_stop (NULL, stop_print_frame);
6282 }
6283
6284 if (target_has_execution)
6285 {
6286 if (last.kind != TARGET_WAITKIND_SIGNALLED
6287 && last.kind != TARGET_WAITKIND_EXITED)
6288 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6289 Delete any breakpoint that is to be deleted at the next stop. */
6290 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6291 }
6292
6293 /* Try to get rid of automatically added inferiors that are no
6294 longer needed. Keeping those around slows down things linearly.
6295 Note that this never removes the current inferior. */
6296 prune_inferiors ();
6297 }
6298
6299 static int
6300 hook_stop_stub (void *cmd)
6301 {
6302 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6303 return (0);
6304 }
6305 \f
6306 int
6307 signal_stop_state (int signo)
6308 {
6309 return signal_stop[signo];
6310 }
6311
6312 int
6313 signal_print_state (int signo)
6314 {
6315 return signal_print[signo];
6316 }
6317
6318 int
6319 signal_pass_state (int signo)
6320 {
6321 return signal_program[signo];
6322 }
6323
6324 static void
6325 signal_cache_update (int signo)
6326 {
6327 if (signo == -1)
6328 {
6329 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6330 signal_cache_update (signo);
6331
6332 return;
6333 }
6334
6335 signal_pass[signo] = (signal_stop[signo] == 0
6336 && signal_print[signo] == 0
6337 && signal_program[signo] == 1
6338 && signal_catch[signo] == 0);
6339 }
6340
6341 int
6342 signal_stop_update (int signo, int state)
6343 {
6344 int ret = signal_stop[signo];
6345
6346 signal_stop[signo] = state;
6347 signal_cache_update (signo);
6348 return ret;
6349 }
6350
6351 int
6352 signal_print_update (int signo, int state)
6353 {
6354 int ret = signal_print[signo];
6355
6356 signal_print[signo] = state;
6357 signal_cache_update (signo);
6358 return ret;
6359 }
6360
6361 int
6362 signal_pass_update (int signo, int state)
6363 {
6364 int ret = signal_program[signo];
6365
6366 signal_program[signo] = state;
6367 signal_cache_update (signo);
6368 return ret;
6369 }
6370
6371 /* Update the global 'signal_catch' from INFO and notify the
6372 target. */
6373
6374 void
6375 signal_catch_update (const unsigned int *info)
6376 {
6377 int i;
6378
6379 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6380 signal_catch[i] = info[i] > 0;
6381 signal_cache_update (-1);
6382 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6383 }
6384
6385 static void
6386 sig_print_header (void)
6387 {
6388 printf_filtered (_("Signal Stop\tPrint\tPass "
6389 "to program\tDescription\n"));
6390 }
6391
6392 static void
6393 sig_print_info (enum gdb_signal oursig)
6394 {
6395 const char *name = gdb_signal_to_name (oursig);
6396 int name_padding = 13 - strlen (name);
6397
6398 if (name_padding <= 0)
6399 name_padding = 0;
6400
6401 printf_filtered ("%s", name);
6402 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6403 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6404 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6405 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6406 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6407 }
6408
6409 /* Specify how various signals in the inferior should be handled. */
6410
6411 static void
6412 handle_command (char *args, int from_tty)
6413 {
6414 char **argv;
6415 int digits, wordlen;
6416 int sigfirst, signum, siglast;
6417 enum gdb_signal oursig;
6418 int allsigs;
6419 int nsigs;
6420 unsigned char *sigs;
6421 struct cleanup *old_chain;
6422
6423 if (args == NULL)
6424 {
6425 error_no_arg (_("signal to handle"));
6426 }
6427
6428 /* Allocate and zero an array of flags for which signals to handle. */
6429
6430 nsigs = (int) GDB_SIGNAL_LAST;
6431 sigs = (unsigned char *) alloca (nsigs);
6432 memset (sigs, 0, nsigs);
6433
6434 /* Break the command line up into args. */
6435
6436 argv = gdb_buildargv (args);
6437 old_chain = make_cleanup_freeargv (argv);
6438
6439 /* Walk through the args, looking for signal oursigs, signal names, and
6440 actions. Signal numbers and signal names may be interspersed with
6441 actions, with the actions being performed for all signals cumulatively
6442 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6443
6444 while (*argv != NULL)
6445 {
6446 wordlen = strlen (*argv);
6447 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6448 {;
6449 }
6450 allsigs = 0;
6451 sigfirst = siglast = -1;
6452
6453 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6454 {
6455 /* Apply action to all signals except those used by the
6456 debugger. Silently skip those. */
6457 allsigs = 1;
6458 sigfirst = 0;
6459 siglast = nsigs - 1;
6460 }
6461 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6462 {
6463 SET_SIGS (nsigs, sigs, signal_stop);
6464 SET_SIGS (nsigs, sigs, signal_print);
6465 }
6466 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6467 {
6468 UNSET_SIGS (nsigs, sigs, signal_program);
6469 }
6470 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6471 {
6472 SET_SIGS (nsigs, sigs, signal_print);
6473 }
6474 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6475 {
6476 SET_SIGS (nsigs, sigs, signal_program);
6477 }
6478 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6479 {
6480 UNSET_SIGS (nsigs, sigs, signal_stop);
6481 }
6482 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6483 {
6484 SET_SIGS (nsigs, sigs, signal_program);
6485 }
6486 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6487 {
6488 UNSET_SIGS (nsigs, sigs, signal_print);
6489 UNSET_SIGS (nsigs, sigs, signal_stop);
6490 }
6491 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6492 {
6493 UNSET_SIGS (nsigs, sigs, signal_program);
6494 }
6495 else if (digits > 0)
6496 {
6497 /* It is numeric. The numeric signal refers to our own
6498 internal signal numbering from target.h, not to host/target
6499 signal number. This is a feature; users really should be
6500 using symbolic names anyway, and the common ones like
6501 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6502
6503 sigfirst = siglast = (int)
6504 gdb_signal_from_command (atoi (*argv));
6505 if ((*argv)[digits] == '-')
6506 {
6507 siglast = (int)
6508 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6509 }
6510 if (sigfirst > siglast)
6511 {
6512 /* Bet he didn't figure we'd think of this case... */
6513 signum = sigfirst;
6514 sigfirst = siglast;
6515 siglast = signum;
6516 }
6517 }
6518 else
6519 {
6520 oursig = gdb_signal_from_name (*argv);
6521 if (oursig != GDB_SIGNAL_UNKNOWN)
6522 {
6523 sigfirst = siglast = (int) oursig;
6524 }
6525 else
6526 {
6527 /* Not a number and not a recognized flag word => complain. */
6528 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6529 }
6530 }
6531
6532 /* If any signal numbers or symbol names were found, set flags for
6533 which signals to apply actions to. */
6534
6535 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6536 {
6537 switch ((enum gdb_signal) signum)
6538 {
6539 case GDB_SIGNAL_TRAP:
6540 case GDB_SIGNAL_INT:
6541 if (!allsigs && !sigs[signum])
6542 {
6543 if (query (_("%s is used by the debugger.\n\
6544 Are you sure you want to change it? "),
6545 gdb_signal_to_name ((enum gdb_signal) signum)))
6546 {
6547 sigs[signum] = 1;
6548 }
6549 else
6550 {
6551 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6552 gdb_flush (gdb_stdout);
6553 }
6554 }
6555 break;
6556 case GDB_SIGNAL_0:
6557 case GDB_SIGNAL_DEFAULT:
6558 case GDB_SIGNAL_UNKNOWN:
6559 /* Make sure that "all" doesn't print these. */
6560 break;
6561 default:
6562 sigs[signum] = 1;
6563 break;
6564 }
6565 }
6566
6567 argv++;
6568 }
6569
6570 for (signum = 0; signum < nsigs; signum++)
6571 if (sigs[signum])
6572 {
6573 signal_cache_update (-1);
6574 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6575 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
6576
6577 if (from_tty)
6578 {
6579 /* Show the results. */
6580 sig_print_header ();
6581 for (; signum < nsigs; signum++)
6582 if (sigs[signum])
6583 sig_print_info (signum);
6584 }
6585
6586 break;
6587 }
6588
6589 do_cleanups (old_chain);
6590 }
6591
6592 /* Complete the "handle" command. */
6593
6594 static VEC (char_ptr) *
6595 handle_completer (struct cmd_list_element *ignore,
6596 const char *text, const char *word)
6597 {
6598 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
6599 static const char * const keywords[] =
6600 {
6601 "all",
6602 "stop",
6603 "ignore",
6604 "print",
6605 "pass",
6606 "nostop",
6607 "noignore",
6608 "noprint",
6609 "nopass",
6610 NULL,
6611 };
6612
6613 vec_signals = signal_completer (ignore, text, word);
6614 vec_keywords = complete_on_enum (keywords, word, word);
6615
6616 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
6617 VEC_free (char_ptr, vec_signals);
6618 VEC_free (char_ptr, vec_keywords);
6619 return return_val;
6620 }
6621
6622 static void
6623 xdb_handle_command (char *args, int from_tty)
6624 {
6625 char **argv;
6626 struct cleanup *old_chain;
6627
6628 if (args == NULL)
6629 error_no_arg (_("xdb command"));
6630
6631 /* Break the command line up into args. */
6632
6633 argv = gdb_buildargv (args);
6634 old_chain = make_cleanup_freeargv (argv);
6635 if (argv[1] != (char *) NULL)
6636 {
6637 char *argBuf;
6638 int bufLen;
6639
6640 bufLen = strlen (argv[0]) + 20;
6641 argBuf = (char *) xmalloc (bufLen);
6642 if (argBuf)
6643 {
6644 int validFlag = 1;
6645 enum gdb_signal oursig;
6646
6647 oursig = gdb_signal_from_name (argv[0]);
6648 memset (argBuf, 0, bufLen);
6649 if (strcmp (argv[1], "Q") == 0)
6650 sprintf (argBuf, "%s %s", argv[0], "noprint");
6651 else
6652 {
6653 if (strcmp (argv[1], "s") == 0)
6654 {
6655 if (!signal_stop[oursig])
6656 sprintf (argBuf, "%s %s", argv[0], "stop");
6657 else
6658 sprintf (argBuf, "%s %s", argv[0], "nostop");
6659 }
6660 else if (strcmp (argv[1], "i") == 0)
6661 {
6662 if (!signal_program[oursig])
6663 sprintf (argBuf, "%s %s", argv[0], "pass");
6664 else
6665 sprintf (argBuf, "%s %s", argv[0], "nopass");
6666 }
6667 else if (strcmp (argv[1], "r") == 0)
6668 {
6669 if (!signal_print[oursig])
6670 sprintf (argBuf, "%s %s", argv[0], "print");
6671 else
6672 sprintf (argBuf, "%s %s", argv[0], "noprint");
6673 }
6674 else
6675 validFlag = 0;
6676 }
6677 if (validFlag)
6678 handle_command (argBuf, from_tty);
6679 else
6680 printf_filtered (_("Invalid signal handling flag.\n"));
6681 if (argBuf)
6682 xfree (argBuf);
6683 }
6684 }
6685 do_cleanups (old_chain);
6686 }
6687
6688 enum gdb_signal
6689 gdb_signal_from_command (int num)
6690 {
6691 if (num >= 1 && num <= 15)
6692 return (enum gdb_signal) num;
6693 error (_("Only signals 1-15 are valid as numeric signals.\n\
6694 Use \"info signals\" for a list of symbolic signals."));
6695 }
6696
6697 /* Print current contents of the tables set by the handle command.
6698 It is possible we should just be printing signals actually used
6699 by the current target (but for things to work right when switching
6700 targets, all signals should be in the signal tables). */
6701
6702 static void
6703 signals_info (char *signum_exp, int from_tty)
6704 {
6705 enum gdb_signal oursig;
6706
6707 sig_print_header ();
6708
6709 if (signum_exp)
6710 {
6711 /* First see if this is a symbol name. */
6712 oursig = gdb_signal_from_name (signum_exp);
6713 if (oursig == GDB_SIGNAL_UNKNOWN)
6714 {
6715 /* No, try numeric. */
6716 oursig =
6717 gdb_signal_from_command (parse_and_eval_long (signum_exp));
6718 }
6719 sig_print_info (oursig);
6720 return;
6721 }
6722
6723 printf_filtered ("\n");
6724 /* These ugly casts brought to you by the native VAX compiler. */
6725 for (oursig = GDB_SIGNAL_FIRST;
6726 (int) oursig < (int) GDB_SIGNAL_LAST;
6727 oursig = (enum gdb_signal) ((int) oursig + 1))
6728 {
6729 QUIT;
6730
6731 if (oursig != GDB_SIGNAL_UNKNOWN
6732 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
6733 sig_print_info (oursig);
6734 }
6735
6736 printf_filtered (_("\nUse the \"handle\" command "
6737 "to change these tables.\n"));
6738 }
6739
6740 /* Check if it makes sense to read $_siginfo from the current thread
6741 at this point. If not, throw an error. */
6742
6743 static void
6744 validate_siginfo_access (void)
6745 {
6746 /* No current inferior, no siginfo. */
6747 if (ptid_equal (inferior_ptid, null_ptid))
6748 error (_("No thread selected."));
6749
6750 /* Don't try to read from a dead thread. */
6751 if (is_exited (inferior_ptid))
6752 error (_("The current thread has terminated"));
6753
6754 /* ... or from a spinning thread. */
6755 if (is_running (inferior_ptid))
6756 error (_("Selected thread is running."));
6757 }
6758
6759 /* The $_siginfo convenience variable is a bit special. We don't know
6760 for sure the type of the value until we actually have a chance to
6761 fetch the data. The type can change depending on gdbarch, so it is
6762 also dependent on which thread you have selected.
6763
6764 1. making $_siginfo be an internalvar that creates a new value on
6765 access.
6766
6767 2. making the value of $_siginfo be an lval_computed value. */
6768
6769 /* This function implements the lval_computed support for reading a
6770 $_siginfo value. */
6771
6772 static void
6773 siginfo_value_read (struct value *v)
6774 {
6775 LONGEST transferred;
6776
6777 validate_siginfo_access ();
6778
6779 transferred =
6780 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6781 NULL,
6782 value_contents_all_raw (v),
6783 value_offset (v),
6784 TYPE_LENGTH (value_type (v)));
6785
6786 if (transferred != TYPE_LENGTH (value_type (v)))
6787 error (_("Unable to read siginfo"));
6788 }
6789
6790 /* This function implements the lval_computed support for writing a
6791 $_siginfo value. */
6792
6793 static void
6794 siginfo_value_write (struct value *v, struct value *fromval)
6795 {
6796 LONGEST transferred;
6797
6798 validate_siginfo_access ();
6799
6800 transferred = target_write (&current_target,
6801 TARGET_OBJECT_SIGNAL_INFO,
6802 NULL,
6803 value_contents_all_raw (fromval),
6804 value_offset (v),
6805 TYPE_LENGTH (value_type (fromval)));
6806
6807 if (transferred != TYPE_LENGTH (value_type (fromval)))
6808 error (_("Unable to write siginfo"));
6809 }
6810
6811 static const struct lval_funcs siginfo_value_funcs =
6812 {
6813 siginfo_value_read,
6814 siginfo_value_write
6815 };
6816
6817 /* Return a new value with the correct type for the siginfo object of
6818 the current thread using architecture GDBARCH. Return a void value
6819 if there's no object available. */
6820
6821 static struct value *
6822 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
6823 void *ignore)
6824 {
6825 if (target_has_stack
6826 && !ptid_equal (inferior_ptid, null_ptid)
6827 && gdbarch_get_siginfo_type_p (gdbarch))
6828 {
6829 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6830
6831 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6832 }
6833
6834 return allocate_value (builtin_type (gdbarch)->builtin_void);
6835 }
6836
6837 \f
6838 /* infcall_suspend_state contains state about the program itself like its
6839 registers and any signal it received when it last stopped.
6840 This state must be restored regardless of how the inferior function call
6841 ends (either successfully, or after it hits a breakpoint or signal)
6842 if the program is to properly continue where it left off. */
6843
6844 struct infcall_suspend_state
6845 {
6846 struct thread_suspend_state thread_suspend;
6847 #if 0 /* Currently unused and empty structures are not valid C. */
6848 struct inferior_suspend_state inferior_suspend;
6849 #endif
6850
6851 /* Other fields: */
6852 CORE_ADDR stop_pc;
6853 struct regcache *registers;
6854
6855 /* Format of SIGINFO_DATA or NULL if it is not present. */
6856 struct gdbarch *siginfo_gdbarch;
6857
6858 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6859 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6860 content would be invalid. */
6861 gdb_byte *siginfo_data;
6862 };
6863
6864 struct infcall_suspend_state *
6865 save_infcall_suspend_state (void)
6866 {
6867 struct infcall_suspend_state *inf_state;
6868 struct thread_info *tp = inferior_thread ();
6869 #if 0
6870 struct inferior *inf = current_inferior ();
6871 #endif
6872 struct regcache *regcache = get_current_regcache ();
6873 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6874 gdb_byte *siginfo_data = NULL;
6875
6876 if (gdbarch_get_siginfo_type_p (gdbarch))
6877 {
6878 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6879 size_t len = TYPE_LENGTH (type);
6880 struct cleanup *back_to;
6881
6882 siginfo_data = xmalloc (len);
6883 back_to = make_cleanup (xfree, siginfo_data);
6884
6885 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6886 siginfo_data, 0, len) == len)
6887 discard_cleanups (back_to);
6888 else
6889 {
6890 /* Errors ignored. */
6891 do_cleanups (back_to);
6892 siginfo_data = NULL;
6893 }
6894 }
6895
6896 inf_state = XCNEW (struct infcall_suspend_state);
6897
6898 if (siginfo_data)
6899 {
6900 inf_state->siginfo_gdbarch = gdbarch;
6901 inf_state->siginfo_data = siginfo_data;
6902 }
6903
6904 inf_state->thread_suspend = tp->suspend;
6905 #if 0 /* Currently unused and empty structures are not valid C. */
6906 inf_state->inferior_suspend = inf->suspend;
6907 #endif
6908
6909 /* run_inferior_call will not use the signal due to its `proceed' call with
6910 GDB_SIGNAL_0 anyway. */
6911 tp->suspend.stop_signal = GDB_SIGNAL_0;
6912
6913 inf_state->stop_pc = stop_pc;
6914
6915 inf_state->registers = regcache_dup (regcache);
6916
6917 return inf_state;
6918 }
6919
6920 /* Restore inferior session state to INF_STATE. */
6921
6922 void
6923 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6924 {
6925 struct thread_info *tp = inferior_thread ();
6926 #if 0
6927 struct inferior *inf = current_inferior ();
6928 #endif
6929 struct regcache *regcache = get_current_regcache ();
6930 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6931
6932 tp->suspend = inf_state->thread_suspend;
6933 #if 0 /* Currently unused and empty structures are not valid C. */
6934 inf->suspend = inf_state->inferior_suspend;
6935 #endif
6936
6937 stop_pc = inf_state->stop_pc;
6938
6939 if (inf_state->siginfo_gdbarch == gdbarch)
6940 {
6941 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6942
6943 /* Errors ignored. */
6944 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6945 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
6946 }
6947
6948 /* The inferior can be gone if the user types "print exit(0)"
6949 (and perhaps other times). */
6950 if (target_has_execution)
6951 /* NB: The register write goes through to the target. */
6952 regcache_cpy (regcache, inf_state->registers);
6953
6954 discard_infcall_suspend_state (inf_state);
6955 }
6956
6957 static void
6958 do_restore_infcall_suspend_state_cleanup (void *state)
6959 {
6960 restore_infcall_suspend_state (state);
6961 }
6962
6963 struct cleanup *
6964 make_cleanup_restore_infcall_suspend_state
6965 (struct infcall_suspend_state *inf_state)
6966 {
6967 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6968 }
6969
6970 void
6971 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6972 {
6973 regcache_xfree (inf_state->registers);
6974 xfree (inf_state->siginfo_data);
6975 xfree (inf_state);
6976 }
6977
6978 struct regcache *
6979 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6980 {
6981 return inf_state->registers;
6982 }
6983
6984 /* infcall_control_state contains state regarding gdb's control of the
6985 inferior itself like stepping control. It also contains session state like
6986 the user's currently selected frame. */
6987
6988 struct infcall_control_state
6989 {
6990 struct thread_control_state thread_control;
6991 struct inferior_control_state inferior_control;
6992
6993 /* Other fields: */
6994 enum stop_stack_kind stop_stack_dummy;
6995 int stopped_by_random_signal;
6996 int stop_after_trap;
6997
6998 /* ID if the selected frame when the inferior function call was made. */
6999 struct frame_id selected_frame_id;
7000 };
7001
7002 /* Save all of the information associated with the inferior<==>gdb
7003 connection. */
7004
7005 struct infcall_control_state *
7006 save_infcall_control_state (void)
7007 {
7008 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
7009 struct thread_info *tp = inferior_thread ();
7010 struct inferior *inf = current_inferior ();
7011
7012 inf_status->thread_control = tp->control;
7013 inf_status->inferior_control = inf->control;
7014
7015 tp->control.step_resume_breakpoint = NULL;
7016 tp->control.exception_resume_breakpoint = NULL;
7017
7018 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
7019 chain. If caller's caller is walking the chain, they'll be happier if we
7020 hand them back the original chain when restore_infcall_control_state is
7021 called. */
7022 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
7023
7024 /* Other fields: */
7025 inf_status->stop_stack_dummy = stop_stack_dummy;
7026 inf_status->stopped_by_random_signal = stopped_by_random_signal;
7027 inf_status->stop_after_trap = stop_after_trap;
7028
7029 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
7030
7031 return inf_status;
7032 }
7033
7034 static int
7035 restore_selected_frame (void *args)
7036 {
7037 struct frame_id *fid = (struct frame_id *) args;
7038 struct frame_info *frame;
7039
7040 frame = frame_find_by_id (*fid);
7041
7042 /* If inf_status->selected_frame_id is NULL, there was no previously
7043 selected frame. */
7044 if (frame == NULL)
7045 {
7046 warning (_("Unable to restore previously selected frame."));
7047 return 0;
7048 }
7049
7050 select_frame (frame);
7051
7052 return (1);
7053 }
7054
7055 /* Restore inferior session state to INF_STATUS. */
7056
7057 void
7058 restore_infcall_control_state (struct infcall_control_state *inf_status)
7059 {
7060 struct thread_info *tp = inferior_thread ();
7061 struct inferior *inf = current_inferior ();
7062
7063 if (tp->control.step_resume_breakpoint)
7064 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7065
7066 if (tp->control.exception_resume_breakpoint)
7067 tp->control.exception_resume_breakpoint->disposition
7068 = disp_del_at_next_stop;
7069
7070 /* Handle the bpstat_copy of the chain. */
7071 bpstat_clear (&tp->control.stop_bpstat);
7072
7073 tp->control = inf_status->thread_control;
7074 inf->control = inf_status->inferior_control;
7075
7076 /* Other fields: */
7077 stop_stack_dummy = inf_status->stop_stack_dummy;
7078 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7079 stop_after_trap = inf_status->stop_after_trap;
7080
7081 if (target_has_stack)
7082 {
7083 /* The point of catch_errors is that if the stack is clobbered,
7084 walking the stack might encounter a garbage pointer and
7085 error() trying to dereference it. */
7086 if (catch_errors
7087 (restore_selected_frame, &inf_status->selected_frame_id,
7088 "Unable to restore previously selected frame:\n",
7089 RETURN_MASK_ERROR) == 0)
7090 /* Error in restoring the selected frame. Select the innermost
7091 frame. */
7092 select_frame (get_current_frame ());
7093 }
7094
7095 xfree (inf_status);
7096 }
7097
7098 static void
7099 do_restore_infcall_control_state_cleanup (void *sts)
7100 {
7101 restore_infcall_control_state (sts);
7102 }
7103
7104 struct cleanup *
7105 make_cleanup_restore_infcall_control_state
7106 (struct infcall_control_state *inf_status)
7107 {
7108 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7109 }
7110
7111 void
7112 discard_infcall_control_state (struct infcall_control_state *inf_status)
7113 {
7114 if (inf_status->thread_control.step_resume_breakpoint)
7115 inf_status->thread_control.step_resume_breakpoint->disposition
7116 = disp_del_at_next_stop;
7117
7118 if (inf_status->thread_control.exception_resume_breakpoint)
7119 inf_status->thread_control.exception_resume_breakpoint->disposition
7120 = disp_del_at_next_stop;
7121
7122 /* See save_infcall_control_state for info on stop_bpstat. */
7123 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7124
7125 xfree (inf_status);
7126 }
7127 \f
7128 /* restore_inferior_ptid() will be used by the cleanup machinery
7129 to restore the inferior_ptid value saved in a call to
7130 save_inferior_ptid(). */
7131
7132 static void
7133 restore_inferior_ptid (void *arg)
7134 {
7135 ptid_t *saved_ptid_ptr = arg;
7136
7137 inferior_ptid = *saved_ptid_ptr;
7138 xfree (arg);
7139 }
7140
7141 /* Save the value of inferior_ptid so that it may be restored by a
7142 later call to do_cleanups(). Returns the struct cleanup pointer
7143 needed for later doing the cleanup. */
7144
7145 struct cleanup *
7146 save_inferior_ptid (void)
7147 {
7148 ptid_t *saved_ptid_ptr;
7149
7150 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7151 *saved_ptid_ptr = inferior_ptid;
7152 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7153 }
7154
7155 /* See inferior.h. */
7156
7157 void
7158 clear_exit_convenience_vars (void)
7159 {
7160 clear_internalvar (lookup_internalvar ("_exitsignal"));
7161 clear_internalvar (lookup_internalvar ("_exitcode"));
7162 }
7163 \f
7164
7165 /* User interface for reverse debugging:
7166 Set exec-direction / show exec-direction commands
7167 (returns error unless target implements to_set_exec_direction method). */
7168
7169 int execution_direction = EXEC_FORWARD;
7170 static const char exec_forward[] = "forward";
7171 static const char exec_reverse[] = "reverse";
7172 static const char *exec_direction = exec_forward;
7173 static const char *const exec_direction_names[] = {
7174 exec_forward,
7175 exec_reverse,
7176 NULL
7177 };
7178
7179 static void
7180 set_exec_direction_func (char *args, int from_tty,
7181 struct cmd_list_element *cmd)
7182 {
7183 if (target_can_execute_reverse)
7184 {
7185 if (!strcmp (exec_direction, exec_forward))
7186 execution_direction = EXEC_FORWARD;
7187 else if (!strcmp (exec_direction, exec_reverse))
7188 execution_direction = EXEC_REVERSE;
7189 }
7190 else
7191 {
7192 exec_direction = exec_forward;
7193 error (_("Target does not support this operation."));
7194 }
7195 }
7196
7197 static void
7198 show_exec_direction_func (struct ui_file *out, int from_tty,
7199 struct cmd_list_element *cmd, const char *value)
7200 {
7201 switch (execution_direction) {
7202 case EXEC_FORWARD:
7203 fprintf_filtered (out, _("Forward.\n"));
7204 break;
7205 case EXEC_REVERSE:
7206 fprintf_filtered (out, _("Reverse.\n"));
7207 break;
7208 default:
7209 internal_error (__FILE__, __LINE__,
7210 _("bogus execution_direction value: %d"),
7211 (int) execution_direction);
7212 }
7213 }
7214
7215 static void
7216 show_schedule_multiple (struct ui_file *file, int from_tty,
7217 struct cmd_list_element *c, const char *value)
7218 {
7219 fprintf_filtered (file, _("Resuming the execution of threads "
7220 "of all processes is %s.\n"), value);
7221 }
7222
7223 /* Implementation of `siginfo' variable. */
7224
7225 static const struct internalvar_funcs siginfo_funcs =
7226 {
7227 siginfo_make_value,
7228 NULL,
7229 NULL
7230 };
7231
7232 void
7233 _initialize_infrun (void)
7234 {
7235 int i;
7236 int numsigs;
7237 struct cmd_list_element *c;
7238
7239 add_info ("signals", signals_info, _("\
7240 What debugger does when program gets various signals.\n\
7241 Specify a signal as argument to print info on that signal only."));
7242 add_info_alias ("handle", "signals", 0);
7243
7244 c = add_com ("handle", class_run, handle_command, _("\
7245 Specify how to handle signals.\n\
7246 Usage: handle SIGNAL [ACTIONS]\n\
7247 Args are signals and actions to apply to those signals.\n\
7248 If no actions are specified, the current settings for the specified signals\n\
7249 will be displayed instead.\n\
7250 \n\
7251 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7252 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7253 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7254 The special arg \"all\" is recognized to mean all signals except those\n\
7255 used by the debugger, typically SIGTRAP and SIGINT.\n\
7256 \n\
7257 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7258 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7259 Stop means reenter debugger if this signal happens (implies print).\n\
7260 Print means print a message if this signal happens.\n\
7261 Pass means let program see this signal; otherwise program doesn't know.\n\
7262 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7263 Pass and Stop may be combined.\n\
7264 \n\
7265 Multiple signals may be specified. Signal numbers and signal names\n\
7266 may be interspersed with actions, with the actions being performed for\n\
7267 all signals cumulatively specified."));
7268 set_cmd_completer (c, handle_completer);
7269
7270 if (xdb_commands)
7271 {
7272 add_com ("lz", class_info, signals_info, _("\
7273 What debugger does when program gets various signals.\n\
7274 Specify a signal as argument to print info on that signal only."));
7275 add_com ("z", class_run, xdb_handle_command, _("\
7276 Specify how to handle a signal.\n\
7277 Args are signals and actions to apply to those signals.\n\
7278 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7279 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7280 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7281 The special arg \"all\" is recognized to mean all signals except those\n\
7282 used by the debugger, typically SIGTRAP and SIGINT.\n\
7283 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7284 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7285 nopass), \"Q\" (noprint)\n\
7286 Stop means reenter debugger if this signal happens (implies print).\n\
7287 Print means print a message if this signal happens.\n\
7288 Pass means let program see this signal; otherwise program doesn't know.\n\
7289 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7290 Pass and Stop may be combined."));
7291 }
7292
7293 if (!dbx_commands)
7294 stop_command = add_cmd ("stop", class_obscure,
7295 not_just_help_class_command, _("\
7296 There is no `stop' command, but you can set a hook on `stop'.\n\
7297 This allows you to set a list of commands to be run each time execution\n\
7298 of the program stops."), &cmdlist);
7299
7300 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7301 Set inferior debugging."), _("\
7302 Show inferior debugging."), _("\
7303 When non-zero, inferior specific debugging is enabled."),
7304 NULL,
7305 show_debug_infrun,
7306 &setdebuglist, &showdebuglist);
7307
7308 add_setshow_boolean_cmd ("displaced", class_maintenance,
7309 &debug_displaced, _("\
7310 Set displaced stepping debugging."), _("\
7311 Show displaced stepping debugging."), _("\
7312 When non-zero, displaced stepping specific debugging is enabled."),
7313 NULL,
7314 show_debug_displaced,
7315 &setdebuglist, &showdebuglist);
7316
7317 add_setshow_boolean_cmd ("non-stop", no_class,
7318 &non_stop_1, _("\
7319 Set whether gdb controls the inferior in non-stop mode."), _("\
7320 Show whether gdb controls the inferior in non-stop mode."), _("\
7321 When debugging a multi-threaded program and this setting is\n\
7322 off (the default, also called all-stop mode), when one thread stops\n\
7323 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7324 all other threads in the program while you interact with the thread of\n\
7325 interest. When you continue or step a thread, you can allow the other\n\
7326 threads to run, or have them remain stopped, but while you inspect any\n\
7327 thread's state, all threads stop.\n\
7328 \n\
7329 In non-stop mode, when one thread stops, other threads can continue\n\
7330 to run freely. You'll be able to step each thread independently,\n\
7331 leave it stopped or free to run as needed."),
7332 set_non_stop,
7333 show_non_stop,
7334 &setlist,
7335 &showlist);
7336
7337 numsigs = (int) GDB_SIGNAL_LAST;
7338 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7339 signal_print = (unsigned char *)
7340 xmalloc (sizeof (signal_print[0]) * numsigs);
7341 signal_program = (unsigned char *)
7342 xmalloc (sizeof (signal_program[0]) * numsigs);
7343 signal_catch = (unsigned char *)
7344 xmalloc (sizeof (signal_catch[0]) * numsigs);
7345 signal_pass = (unsigned char *)
7346 xmalloc (sizeof (signal_pass[0]) * numsigs);
7347 for (i = 0; i < numsigs; i++)
7348 {
7349 signal_stop[i] = 1;
7350 signal_print[i] = 1;
7351 signal_program[i] = 1;
7352 signal_catch[i] = 0;
7353 }
7354
7355 /* Signals caused by debugger's own actions
7356 should not be given to the program afterwards. */
7357 signal_program[GDB_SIGNAL_TRAP] = 0;
7358 signal_program[GDB_SIGNAL_INT] = 0;
7359
7360 /* Signals that are not errors should not normally enter the debugger. */
7361 signal_stop[GDB_SIGNAL_ALRM] = 0;
7362 signal_print[GDB_SIGNAL_ALRM] = 0;
7363 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7364 signal_print[GDB_SIGNAL_VTALRM] = 0;
7365 signal_stop[GDB_SIGNAL_PROF] = 0;
7366 signal_print[GDB_SIGNAL_PROF] = 0;
7367 signal_stop[GDB_SIGNAL_CHLD] = 0;
7368 signal_print[GDB_SIGNAL_CHLD] = 0;
7369 signal_stop[GDB_SIGNAL_IO] = 0;
7370 signal_print[GDB_SIGNAL_IO] = 0;
7371 signal_stop[GDB_SIGNAL_POLL] = 0;
7372 signal_print[GDB_SIGNAL_POLL] = 0;
7373 signal_stop[GDB_SIGNAL_URG] = 0;
7374 signal_print[GDB_SIGNAL_URG] = 0;
7375 signal_stop[GDB_SIGNAL_WINCH] = 0;
7376 signal_print[GDB_SIGNAL_WINCH] = 0;
7377 signal_stop[GDB_SIGNAL_PRIO] = 0;
7378 signal_print[GDB_SIGNAL_PRIO] = 0;
7379
7380 /* These signals are used internally by user-level thread
7381 implementations. (See signal(5) on Solaris.) Like the above
7382 signals, a healthy program receives and handles them as part of
7383 its normal operation. */
7384 signal_stop[GDB_SIGNAL_LWP] = 0;
7385 signal_print[GDB_SIGNAL_LWP] = 0;
7386 signal_stop[GDB_SIGNAL_WAITING] = 0;
7387 signal_print[GDB_SIGNAL_WAITING] = 0;
7388 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7389 signal_print[GDB_SIGNAL_CANCEL] = 0;
7390
7391 /* Update cached state. */
7392 signal_cache_update (-1);
7393
7394 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7395 &stop_on_solib_events, _("\
7396 Set stopping for shared library events."), _("\
7397 Show stopping for shared library events."), _("\
7398 If nonzero, gdb will give control to the user when the dynamic linker\n\
7399 notifies gdb of shared library events. The most common event of interest\n\
7400 to the user would be loading/unloading of a new library."),
7401 set_stop_on_solib_events,
7402 show_stop_on_solib_events,
7403 &setlist, &showlist);
7404
7405 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7406 follow_fork_mode_kind_names,
7407 &follow_fork_mode_string, _("\
7408 Set debugger response to a program call of fork or vfork."), _("\
7409 Show debugger response to a program call of fork or vfork."), _("\
7410 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7411 parent - the original process is debugged after a fork\n\
7412 child - the new process is debugged after a fork\n\
7413 The unfollowed process will continue to run.\n\
7414 By default, the debugger will follow the parent process."),
7415 NULL,
7416 show_follow_fork_mode_string,
7417 &setlist, &showlist);
7418
7419 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7420 follow_exec_mode_names,
7421 &follow_exec_mode_string, _("\
7422 Set debugger response to a program call of exec."), _("\
7423 Show debugger response to a program call of exec."), _("\
7424 An exec call replaces the program image of a process.\n\
7425 \n\
7426 follow-exec-mode can be:\n\
7427 \n\
7428 new - the debugger creates a new inferior and rebinds the process\n\
7429 to this new inferior. The program the process was running before\n\
7430 the exec call can be restarted afterwards by restarting the original\n\
7431 inferior.\n\
7432 \n\
7433 same - the debugger keeps the process bound to the same inferior.\n\
7434 The new executable image replaces the previous executable loaded in\n\
7435 the inferior. Restarting the inferior after the exec call restarts\n\
7436 the executable the process was running after the exec call.\n\
7437 \n\
7438 By default, the debugger will use the same inferior."),
7439 NULL,
7440 show_follow_exec_mode_string,
7441 &setlist, &showlist);
7442
7443 add_setshow_enum_cmd ("scheduler-locking", class_run,
7444 scheduler_enums, &scheduler_mode, _("\
7445 Set mode for locking scheduler during execution."), _("\
7446 Show mode for locking scheduler during execution."), _("\
7447 off == no locking (threads may preempt at any time)\n\
7448 on == full locking (no thread except the current thread may run)\n\
7449 step == scheduler locked during every single-step operation.\n\
7450 In this mode, no other thread may run during a step command.\n\
7451 Other threads may run while stepping over a function call ('next')."),
7452 set_schedlock_func, /* traps on target vector */
7453 show_scheduler_mode,
7454 &setlist, &showlist);
7455
7456 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7457 Set mode for resuming threads of all processes."), _("\
7458 Show mode for resuming threads of all processes."), _("\
7459 When on, execution commands (such as 'continue' or 'next') resume all\n\
7460 threads of all processes. When off (which is the default), execution\n\
7461 commands only resume the threads of the current process. The set of\n\
7462 threads that are resumed is further refined by the scheduler-locking\n\
7463 mode (see help set scheduler-locking)."),
7464 NULL,
7465 show_schedule_multiple,
7466 &setlist, &showlist);
7467
7468 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7469 Set mode of the step operation."), _("\
7470 Show mode of the step operation."), _("\
7471 When set, doing a step over a function without debug line information\n\
7472 will stop at the first instruction of that function. Otherwise, the\n\
7473 function is skipped and the step command stops at a different source line."),
7474 NULL,
7475 show_step_stop_if_no_debug,
7476 &setlist, &showlist);
7477
7478 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7479 &can_use_displaced_stepping, _("\
7480 Set debugger's willingness to use displaced stepping."), _("\
7481 Show debugger's willingness to use displaced stepping."), _("\
7482 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7483 supported by the target architecture. If off, gdb will not use displaced\n\
7484 stepping to step over breakpoints, even if such is supported by the target\n\
7485 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7486 if the target architecture supports it and non-stop mode is active, but will not\n\
7487 use it in all-stop mode (see help set non-stop)."),
7488 NULL,
7489 show_can_use_displaced_stepping,
7490 &setlist, &showlist);
7491
7492 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7493 &exec_direction, _("Set direction of execution.\n\
7494 Options are 'forward' or 'reverse'."),
7495 _("Show direction of execution (forward/reverse)."),
7496 _("Tells gdb whether to execute forward or backward."),
7497 set_exec_direction_func, show_exec_direction_func,
7498 &setlist, &showlist);
7499
7500 /* Set/show detach-on-fork: user-settable mode. */
7501
7502 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7503 Set whether gdb will detach the child of a fork."), _("\
7504 Show whether gdb will detach the child of a fork."), _("\
7505 Tells gdb whether to detach the child of a fork."),
7506 NULL, NULL, &setlist, &showlist);
7507
7508 /* Set/show disable address space randomization mode. */
7509
7510 add_setshow_boolean_cmd ("disable-randomization", class_support,
7511 &disable_randomization, _("\
7512 Set disabling of debuggee's virtual address space randomization."), _("\
7513 Show disabling of debuggee's virtual address space randomization."), _("\
7514 When this mode is on (which is the default), randomization of the virtual\n\
7515 address space is disabled. Standalone programs run with the randomization\n\
7516 enabled by default on some platforms."),
7517 &set_disable_randomization,
7518 &show_disable_randomization,
7519 &setlist, &showlist);
7520
7521 /* ptid initializations */
7522 inferior_ptid = null_ptid;
7523 target_last_wait_ptid = minus_one_ptid;
7524
7525 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7526 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7527 observer_attach_thread_exit (infrun_thread_thread_exit);
7528 observer_attach_inferior_exit (infrun_inferior_exit);
7529
7530 /* Explicitly create without lookup, since that tries to create a
7531 value with a void typed value, and when we get here, gdbarch
7532 isn't initialized yet. At this point, we're quite sure there
7533 isn't another convenience variable of the same name. */
7534 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7535
7536 add_setshow_boolean_cmd ("observer", no_class,
7537 &observer_mode_1, _("\
7538 Set whether gdb controls the inferior in observer mode."), _("\
7539 Show whether gdb controls the inferior in observer mode."), _("\
7540 In observer mode, GDB can get data from the inferior, but not\n\
7541 affect its execution. Registers and memory may not be changed,\n\
7542 breakpoints may not be set, and the program cannot be interrupted\n\
7543 or signalled."),
7544 set_observer_mode,
7545 show_observer_mode,
7546 &setlist,
7547 &showlist);
7548 }