]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/infrun.c
Always pass signals to the right thread
[thirdparty/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2014 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <string.h>
24 #include <ctype.h>
25 #include "symtab.h"
26 #include "frame.h"
27 #include "inferior.h"
28 #include "exceptions.h"
29 #include "breakpoint.h"
30 #include "gdb_wait.h"
31 #include "gdbcore.h"
32 #include "gdbcmd.h"
33 #include "cli/cli-script.h"
34 #include "target.h"
35 #include "gdbthread.h"
36 #include "annotate.h"
37 #include "symfile.h"
38 #include "top.h"
39 #include <signal.h>
40 #include "inf-loop.h"
41 #include "regcache.h"
42 #include "value.h"
43 #include "observer.h"
44 #include "language.h"
45 #include "solib.h"
46 #include "main.h"
47 #include "dictionary.h"
48 #include "block.h"
49 #include "gdb_assert.h"
50 #include "mi/mi-common.h"
51 #include "event-top.h"
52 #include "record.h"
53 #include "record-full.h"
54 #include "inline-frame.h"
55 #include "jit.h"
56 #include "tracepoint.h"
57 #include "continuations.h"
58 #include "interps.h"
59 #include "skip.h"
60 #include "probe.h"
61 #include "objfiles.h"
62 #include "completer.h"
63 #include "target-descriptions.h"
64 #include "target-dcache.h"
65
66 /* Prototypes for local functions */
67
68 static void signals_info (char *, int);
69
70 static void handle_command (char *, int);
71
72 static void sig_print_info (enum gdb_signal);
73
74 static void sig_print_header (void);
75
76 static void resume_cleanups (void *);
77
78 static int hook_stop_stub (void *);
79
80 static int restore_selected_frame (void *);
81
82 static int follow_fork (void);
83
84 static void set_schedlock_func (char *args, int from_tty,
85 struct cmd_list_element *c);
86
87 static int currently_stepping (struct thread_info *tp);
88
89 static void xdb_handle_command (char *args, int from_tty);
90
91 void _initialize_infrun (void);
92
93 void nullify_last_target_wait_ptid (void);
94
95 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
96
97 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
98
99 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
100
101 /* When set, stop the 'step' command if we enter a function which has
102 no line number information. The normal behavior is that we step
103 over such function. */
104 int step_stop_if_no_debug = 0;
105 static void
106 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
107 struct cmd_list_element *c, const char *value)
108 {
109 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
110 }
111
112 /* In asynchronous mode, but simulating synchronous execution. */
113
114 int sync_execution = 0;
115
116 /* proceed and normal_stop use this to notify the user when the
117 inferior stopped in a different thread than it had been running
118 in. */
119
120 static ptid_t previous_inferior_ptid;
121
122 /* If set (default for legacy reasons), when following a fork, GDB
123 will detach from one of the fork branches, child or parent.
124 Exactly which branch is detached depends on 'set follow-fork-mode'
125 setting. */
126
127 static int detach_fork = 1;
128
129 int debug_displaced = 0;
130 static void
131 show_debug_displaced (struct ui_file *file, int from_tty,
132 struct cmd_list_element *c, const char *value)
133 {
134 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
135 }
136
137 unsigned int debug_infrun = 0;
138 static void
139 show_debug_infrun (struct ui_file *file, int from_tty,
140 struct cmd_list_element *c, const char *value)
141 {
142 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
143 }
144
145
146 /* Support for disabling address space randomization. */
147
148 int disable_randomization = 1;
149
150 static void
151 show_disable_randomization (struct ui_file *file, int from_tty,
152 struct cmd_list_element *c, const char *value)
153 {
154 if (target_supports_disable_randomization ())
155 fprintf_filtered (file,
156 _("Disabling randomization of debuggee's "
157 "virtual address space is %s.\n"),
158 value);
159 else
160 fputs_filtered (_("Disabling randomization of debuggee's "
161 "virtual address space is unsupported on\n"
162 "this platform.\n"), file);
163 }
164
165 static void
166 set_disable_randomization (char *args, int from_tty,
167 struct cmd_list_element *c)
168 {
169 if (!target_supports_disable_randomization ())
170 error (_("Disabling randomization of debuggee's "
171 "virtual address space is unsupported on\n"
172 "this platform."));
173 }
174
175 /* User interface for non-stop mode. */
176
177 int non_stop = 0;
178 static int non_stop_1 = 0;
179
180 static void
181 set_non_stop (char *args, int from_tty,
182 struct cmd_list_element *c)
183 {
184 if (target_has_execution)
185 {
186 non_stop_1 = non_stop;
187 error (_("Cannot change this setting while the inferior is running."));
188 }
189
190 non_stop = non_stop_1;
191 }
192
193 static void
194 show_non_stop (struct ui_file *file, int from_tty,
195 struct cmd_list_element *c, const char *value)
196 {
197 fprintf_filtered (file,
198 _("Controlling the inferior in non-stop mode is %s.\n"),
199 value);
200 }
201
202 /* "Observer mode" is somewhat like a more extreme version of
203 non-stop, in which all GDB operations that might affect the
204 target's execution have been disabled. */
205
206 int observer_mode = 0;
207 static int observer_mode_1 = 0;
208
209 static void
210 set_observer_mode (char *args, int from_tty,
211 struct cmd_list_element *c)
212 {
213 if (target_has_execution)
214 {
215 observer_mode_1 = observer_mode;
216 error (_("Cannot change this setting while the inferior is running."));
217 }
218
219 observer_mode = observer_mode_1;
220
221 may_write_registers = !observer_mode;
222 may_write_memory = !observer_mode;
223 may_insert_breakpoints = !observer_mode;
224 may_insert_tracepoints = !observer_mode;
225 /* We can insert fast tracepoints in or out of observer mode,
226 but enable them if we're going into this mode. */
227 if (observer_mode)
228 may_insert_fast_tracepoints = 1;
229 may_stop = !observer_mode;
230 update_target_permissions ();
231
232 /* Going *into* observer mode we must force non-stop, then
233 going out we leave it that way. */
234 if (observer_mode)
235 {
236 pagination_enabled = 0;
237 non_stop = non_stop_1 = 1;
238 }
239
240 if (from_tty)
241 printf_filtered (_("Observer mode is now %s.\n"),
242 (observer_mode ? "on" : "off"));
243 }
244
245 static void
246 show_observer_mode (struct ui_file *file, int from_tty,
247 struct cmd_list_element *c, const char *value)
248 {
249 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
250 }
251
252 /* This updates the value of observer mode based on changes in
253 permissions. Note that we are deliberately ignoring the values of
254 may-write-registers and may-write-memory, since the user may have
255 reason to enable these during a session, for instance to turn on a
256 debugging-related global. */
257
258 void
259 update_observer_mode (void)
260 {
261 int newval;
262
263 newval = (!may_insert_breakpoints
264 && !may_insert_tracepoints
265 && may_insert_fast_tracepoints
266 && !may_stop
267 && non_stop);
268
269 /* Let the user know if things change. */
270 if (newval != observer_mode)
271 printf_filtered (_("Observer mode is now %s.\n"),
272 (newval ? "on" : "off"));
273
274 observer_mode = observer_mode_1 = newval;
275 }
276
277 /* Tables of how to react to signals; the user sets them. */
278
279 static unsigned char *signal_stop;
280 static unsigned char *signal_print;
281 static unsigned char *signal_program;
282
283 /* Table of signals that are registered with "catch signal". A
284 non-zero entry indicates that the signal is caught by some "catch
285 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
286 signals. */
287 static unsigned char *signal_catch;
288
289 /* Table of signals that the target may silently handle.
290 This is automatically determined from the flags above,
291 and simply cached here. */
292 static unsigned char *signal_pass;
293
294 #define SET_SIGS(nsigs,sigs,flags) \
295 do { \
296 int signum = (nsigs); \
297 while (signum-- > 0) \
298 if ((sigs)[signum]) \
299 (flags)[signum] = 1; \
300 } while (0)
301
302 #define UNSET_SIGS(nsigs,sigs,flags) \
303 do { \
304 int signum = (nsigs); \
305 while (signum-- > 0) \
306 if ((sigs)[signum]) \
307 (flags)[signum] = 0; \
308 } while (0)
309
310 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
311 this function is to avoid exporting `signal_program'. */
312
313 void
314 update_signals_program_target (void)
315 {
316 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
317 }
318
319 /* Value to pass to target_resume() to cause all threads to resume. */
320
321 #define RESUME_ALL minus_one_ptid
322
323 /* Command list pointer for the "stop" placeholder. */
324
325 static struct cmd_list_element *stop_command;
326
327 /* Function inferior was in as of last step command. */
328
329 static struct symbol *step_start_function;
330
331 /* Nonzero if we want to give control to the user when we're notified
332 of shared library events by the dynamic linker. */
333 int stop_on_solib_events;
334
335 /* Enable or disable optional shared library event breakpoints
336 as appropriate when the above flag is changed. */
337
338 static void
339 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
340 {
341 update_solib_breakpoints ();
342 }
343
344 static void
345 show_stop_on_solib_events (struct ui_file *file, int from_tty,
346 struct cmd_list_element *c, const char *value)
347 {
348 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
349 value);
350 }
351
352 /* Nonzero means expecting a trace trap
353 and should stop the inferior and return silently when it happens. */
354
355 int stop_after_trap;
356
357 /* Save register contents here when executing a "finish" command or are
358 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
359 Thus this contains the return value from the called function (assuming
360 values are returned in a register). */
361
362 struct regcache *stop_registers;
363
364 /* Nonzero after stop if current stack frame should be printed. */
365
366 static int stop_print_frame;
367
368 /* This is a cached copy of the pid/waitstatus of the last event
369 returned by target_wait()/deprecated_target_wait_hook(). This
370 information is returned by get_last_target_status(). */
371 static ptid_t target_last_wait_ptid;
372 static struct target_waitstatus target_last_waitstatus;
373
374 static void context_switch (ptid_t ptid);
375
376 void init_thread_stepping_state (struct thread_info *tss);
377
378 static void init_infwait_state (void);
379
380 static const char follow_fork_mode_child[] = "child";
381 static const char follow_fork_mode_parent[] = "parent";
382
383 static const char *const follow_fork_mode_kind_names[] = {
384 follow_fork_mode_child,
385 follow_fork_mode_parent,
386 NULL
387 };
388
389 static const char *follow_fork_mode_string = follow_fork_mode_parent;
390 static void
391 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
392 struct cmd_list_element *c, const char *value)
393 {
394 fprintf_filtered (file,
395 _("Debugger response to a program "
396 "call of fork or vfork is \"%s\".\n"),
397 value);
398 }
399 \f
400
401 /* Tell the target to follow the fork we're stopped at. Returns true
402 if the inferior should be resumed; false, if the target for some
403 reason decided it's best not to resume. */
404
405 static int
406 follow_fork (void)
407 {
408 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
409 int should_resume = 1;
410 struct thread_info *tp;
411
412 /* Copy user stepping state to the new inferior thread. FIXME: the
413 followed fork child thread should have a copy of most of the
414 parent thread structure's run control related fields, not just these.
415 Initialized to avoid "may be used uninitialized" warnings from gcc. */
416 struct breakpoint *step_resume_breakpoint = NULL;
417 struct breakpoint *exception_resume_breakpoint = NULL;
418 CORE_ADDR step_range_start = 0;
419 CORE_ADDR step_range_end = 0;
420 struct frame_id step_frame_id = { 0 };
421 struct interp *command_interp = NULL;
422
423 if (!non_stop)
424 {
425 ptid_t wait_ptid;
426 struct target_waitstatus wait_status;
427
428 /* Get the last target status returned by target_wait(). */
429 get_last_target_status (&wait_ptid, &wait_status);
430
431 /* If not stopped at a fork event, then there's nothing else to
432 do. */
433 if (wait_status.kind != TARGET_WAITKIND_FORKED
434 && wait_status.kind != TARGET_WAITKIND_VFORKED)
435 return 1;
436
437 /* Check if we switched over from WAIT_PTID, since the event was
438 reported. */
439 if (!ptid_equal (wait_ptid, minus_one_ptid)
440 && !ptid_equal (inferior_ptid, wait_ptid))
441 {
442 /* We did. Switch back to WAIT_PTID thread, to tell the
443 target to follow it (in either direction). We'll
444 afterwards refuse to resume, and inform the user what
445 happened. */
446 switch_to_thread (wait_ptid);
447 should_resume = 0;
448 }
449 }
450
451 tp = inferior_thread ();
452
453 /* If there were any forks/vforks that were caught and are now to be
454 followed, then do so now. */
455 switch (tp->pending_follow.kind)
456 {
457 case TARGET_WAITKIND_FORKED:
458 case TARGET_WAITKIND_VFORKED:
459 {
460 ptid_t parent, child;
461
462 /* If the user did a next/step, etc, over a fork call,
463 preserve the stepping state in the fork child. */
464 if (follow_child && should_resume)
465 {
466 step_resume_breakpoint = clone_momentary_breakpoint
467 (tp->control.step_resume_breakpoint);
468 step_range_start = tp->control.step_range_start;
469 step_range_end = tp->control.step_range_end;
470 step_frame_id = tp->control.step_frame_id;
471 exception_resume_breakpoint
472 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
473 command_interp = tp->control.command_interp;
474
475 /* For now, delete the parent's sr breakpoint, otherwise,
476 parent/child sr breakpoints are considered duplicates,
477 and the child version will not be installed. Remove
478 this when the breakpoints module becomes aware of
479 inferiors and address spaces. */
480 delete_step_resume_breakpoint (tp);
481 tp->control.step_range_start = 0;
482 tp->control.step_range_end = 0;
483 tp->control.step_frame_id = null_frame_id;
484 delete_exception_resume_breakpoint (tp);
485 tp->control.command_interp = NULL;
486 }
487
488 parent = inferior_ptid;
489 child = tp->pending_follow.value.related_pid;
490
491 /* Tell the target to do whatever is necessary to follow
492 either parent or child. */
493 if (target_follow_fork (follow_child, detach_fork))
494 {
495 /* Target refused to follow, or there's some other reason
496 we shouldn't resume. */
497 should_resume = 0;
498 }
499 else
500 {
501 /* This pending follow fork event is now handled, one way
502 or another. The previous selected thread may be gone
503 from the lists by now, but if it is still around, need
504 to clear the pending follow request. */
505 tp = find_thread_ptid (parent);
506 if (tp)
507 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
508
509 /* This makes sure we don't try to apply the "Switched
510 over from WAIT_PID" logic above. */
511 nullify_last_target_wait_ptid ();
512
513 /* If we followed the child, switch to it... */
514 if (follow_child)
515 {
516 switch_to_thread (child);
517
518 /* ... and preserve the stepping state, in case the
519 user was stepping over the fork call. */
520 if (should_resume)
521 {
522 tp = inferior_thread ();
523 tp->control.step_resume_breakpoint
524 = step_resume_breakpoint;
525 tp->control.step_range_start = step_range_start;
526 tp->control.step_range_end = step_range_end;
527 tp->control.step_frame_id = step_frame_id;
528 tp->control.exception_resume_breakpoint
529 = exception_resume_breakpoint;
530 tp->control.command_interp = command_interp;
531 }
532 else
533 {
534 /* If we get here, it was because we're trying to
535 resume from a fork catchpoint, but, the user
536 has switched threads away from the thread that
537 forked. In that case, the resume command
538 issued is most likely not applicable to the
539 child, so just warn, and refuse to resume. */
540 warning (_("Not resuming: switched threads "
541 "before following fork child.\n"));
542 }
543
544 /* Reset breakpoints in the child as appropriate. */
545 follow_inferior_reset_breakpoints ();
546 }
547 else
548 switch_to_thread (parent);
549 }
550 }
551 break;
552 case TARGET_WAITKIND_SPURIOUS:
553 /* Nothing to follow. */
554 break;
555 default:
556 internal_error (__FILE__, __LINE__,
557 "Unexpected pending_follow.kind %d\n",
558 tp->pending_follow.kind);
559 break;
560 }
561
562 return should_resume;
563 }
564
565 void
566 follow_inferior_reset_breakpoints (void)
567 {
568 struct thread_info *tp = inferior_thread ();
569
570 /* Was there a step_resume breakpoint? (There was if the user
571 did a "next" at the fork() call.) If so, explicitly reset its
572 thread number. Cloned step_resume breakpoints are disabled on
573 creation, so enable it here now that it is associated with the
574 correct thread.
575
576 step_resumes are a form of bp that are made to be per-thread.
577 Since we created the step_resume bp when the parent process
578 was being debugged, and now are switching to the child process,
579 from the breakpoint package's viewpoint, that's a switch of
580 "threads". We must update the bp's notion of which thread
581 it is for, or it'll be ignored when it triggers. */
582
583 if (tp->control.step_resume_breakpoint)
584 {
585 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
586 tp->control.step_resume_breakpoint->loc->enabled = 1;
587 }
588
589 /* Treat exception_resume breakpoints like step_resume breakpoints. */
590 if (tp->control.exception_resume_breakpoint)
591 {
592 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
593 tp->control.exception_resume_breakpoint->loc->enabled = 1;
594 }
595
596 /* Reinsert all breakpoints in the child. The user may have set
597 breakpoints after catching the fork, in which case those
598 were never set in the child, but only in the parent. This makes
599 sure the inserted breakpoints match the breakpoint list. */
600
601 breakpoint_re_set ();
602 insert_breakpoints ();
603 }
604
605 /* The child has exited or execed: resume threads of the parent the
606 user wanted to be executing. */
607
608 static int
609 proceed_after_vfork_done (struct thread_info *thread,
610 void *arg)
611 {
612 int pid = * (int *) arg;
613
614 if (ptid_get_pid (thread->ptid) == pid
615 && is_running (thread->ptid)
616 && !is_executing (thread->ptid)
617 && !thread->stop_requested
618 && thread->suspend.stop_signal == GDB_SIGNAL_0)
619 {
620 if (debug_infrun)
621 fprintf_unfiltered (gdb_stdlog,
622 "infrun: resuming vfork parent thread %s\n",
623 target_pid_to_str (thread->ptid));
624
625 switch_to_thread (thread->ptid);
626 clear_proceed_status (0);
627 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT, 0);
628 }
629
630 return 0;
631 }
632
633 /* Called whenever we notice an exec or exit event, to handle
634 detaching or resuming a vfork parent. */
635
636 static void
637 handle_vfork_child_exec_or_exit (int exec)
638 {
639 struct inferior *inf = current_inferior ();
640
641 if (inf->vfork_parent)
642 {
643 int resume_parent = -1;
644
645 /* This exec or exit marks the end of the shared memory region
646 between the parent and the child. If the user wanted to
647 detach from the parent, now is the time. */
648
649 if (inf->vfork_parent->pending_detach)
650 {
651 struct thread_info *tp;
652 struct cleanup *old_chain;
653 struct program_space *pspace;
654 struct address_space *aspace;
655
656 /* follow-fork child, detach-on-fork on. */
657
658 inf->vfork_parent->pending_detach = 0;
659
660 if (!exec)
661 {
662 /* If we're handling a child exit, then inferior_ptid
663 points at the inferior's pid, not to a thread. */
664 old_chain = save_inferior_ptid ();
665 save_current_program_space ();
666 save_current_inferior ();
667 }
668 else
669 old_chain = save_current_space_and_thread ();
670
671 /* We're letting loose of the parent. */
672 tp = any_live_thread_of_process (inf->vfork_parent->pid);
673 switch_to_thread (tp->ptid);
674
675 /* We're about to detach from the parent, which implicitly
676 removes breakpoints from its address space. There's a
677 catch here: we want to reuse the spaces for the child,
678 but, parent/child are still sharing the pspace at this
679 point, although the exec in reality makes the kernel give
680 the child a fresh set of new pages. The problem here is
681 that the breakpoints module being unaware of this, would
682 likely chose the child process to write to the parent
683 address space. Swapping the child temporarily away from
684 the spaces has the desired effect. Yes, this is "sort
685 of" a hack. */
686
687 pspace = inf->pspace;
688 aspace = inf->aspace;
689 inf->aspace = NULL;
690 inf->pspace = NULL;
691
692 if (debug_infrun || info_verbose)
693 {
694 target_terminal_ours ();
695
696 if (exec)
697 fprintf_filtered (gdb_stdlog,
698 "Detaching vfork parent process "
699 "%d after child exec.\n",
700 inf->vfork_parent->pid);
701 else
702 fprintf_filtered (gdb_stdlog,
703 "Detaching vfork parent process "
704 "%d after child exit.\n",
705 inf->vfork_parent->pid);
706 }
707
708 target_detach (NULL, 0);
709
710 /* Put it back. */
711 inf->pspace = pspace;
712 inf->aspace = aspace;
713
714 do_cleanups (old_chain);
715 }
716 else if (exec)
717 {
718 /* We're staying attached to the parent, so, really give the
719 child a new address space. */
720 inf->pspace = add_program_space (maybe_new_address_space ());
721 inf->aspace = inf->pspace->aspace;
722 inf->removable = 1;
723 set_current_program_space (inf->pspace);
724
725 resume_parent = inf->vfork_parent->pid;
726
727 /* Break the bonds. */
728 inf->vfork_parent->vfork_child = NULL;
729 }
730 else
731 {
732 struct cleanup *old_chain;
733 struct program_space *pspace;
734
735 /* If this is a vfork child exiting, then the pspace and
736 aspaces were shared with the parent. Since we're
737 reporting the process exit, we'll be mourning all that is
738 found in the address space, and switching to null_ptid,
739 preparing to start a new inferior. But, since we don't
740 want to clobber the parent's address/program spaces, we
741 go ahead and create a new one for this exiting
742 inferior. */
743
744 /* Switch to null_ptid, so that clone_program_space doesn't want
745 to read the selected frame of a dead process. */
746 old_chain = save_inferior_ptid ();
747 inferior_ptid = null_ptid;
748
749 /* This inferior is dead, so avoid giving the breakpoints
750 module the option to write through to it (cloning a
751 program space resets breakpoints). */
752 inf->aspace = NULL;
753 inf->pspace = NULL;
754 pspace = add_program_space (maybe_new_address_space ());
755 set_current_program_space (pspace);
756 inf->removable = 1;
757 inf->symfile_flags = SYMFILE_NO_READ;
758 clone_program_space (pspace, inf->vfork_parent->pspace);
759 inf->pspace = pspace;
760 inf->aspace = pspace->aspace;
761
762 /* Put back inferior_ptid. We'll continue mourning this
763 inferior. */
764 do_cleanups (old_chain);
765
766 resume_parent = inf->vfork_parent->pid;
767 /* Break the bonds. */
768 inf->vfork_parent->vfork_child = NULL;
769 }
770
771 inf->vfork_parent = NULL;
772
773 gdb_assert (current_program_space == inf->pspace);
774
775 if (non_stop && resume_parent != -1)
776 {
777 /* If the user wanted the parent to be running, let it go
778 free now. */
779 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
780
781 if (debug_infrun)
782 fprintf_unfiltered (gdb_stdlog,
783 "infrun: resuming vfork parent process %d\n",
784 resume_parent);
785
786 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
787
788 do_cleanups (old_chain);
789 }
790 }
791 }
792
793 /* Enum strings for "set|show follow-exec-mode". */
794
795 static const char follow_exec_mode_new[] = "new";
796 static const char follow_exec_mode_same[] = "same";
797 static const char *const follow_exec_mode_names[] =
798 {
799 follow_exec_mode_new,
800 follow_exec_mode_same,
801 NULL,
802 };
803
804 static const char *follow_exec_mode_string = follow_exec_mode_same;
805 static void
806 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
807 struct cmd_list_element *c, const char *value)
808 {
809 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
810 }
811
812 /* EXECD_PATHNAME is assumed to be non-NULL. */
813
814 static void
815 follow_exec (ptid_t pid, char *execd_pathname)
816 {
817 struct thread_info *th = inferior_thread ();
818 struct inferior *inf = current_inferior ();
819
820 /* This is an exec event that we actually wish to pay attention to.
821 Refresh our symbol table to the newly exec'd program, remove any
822 momentary bp's, etc.
823
824 If there are breakpoints, they aren't really inserted now,
825 since the exec() transformed our inferior into a fresh set
826 of instructions.
827
828 We want to preserve symbolic breakpoints on the list, since
829 we have hopes that they can be reset after the new a.out's
830 symbol table is read.
831
832 However, any "raw" breakpoints must be removed from the list
833 (e.g., the solib bp's), since their address is probably invalid
834 now.
835
836 And, we DON'T want to call delete_breakpoints() here, since
837 that may write the bp's "shadow contents" (the instruction
838 value that was overwritten witha TRAP instruction). Since
839 we now have a new a.out, those shadow contents aren't valid. */
840
841 mark_breakpoints_out ();
842
843 update_breakpoints_after_exec ();
844
845 /* If there was one, it's gone now. We cannot truly step-to-next
846 statement through an exec(). */
847 th->control.step_resume_breakpoint = NULL;
848 th->control.exception_resume_breakpoint = NULL;
849 th->control.step_range_start = 0;
850 th->control.step_range_end = 0;
851
852 /* The target reports the exec event to the main thread, even if
853 some other thread does the exec, and even if the main thread was
854 already stopped --- if debugging in non-stop mode, it's possible
855 the user had the main thread held stopped in the previous image
856 --- release it now. This is the same behavior as step-over-exec
857 with scheduler-locking on in all-stop mode. */
858 th->stop_requested = 0;
859
860 /* What is this a.out's name? */
861 printf_unfiltered (_("%s is executing new program: %s\n"),
862 target_pid_to_str (inferior_ptid),
863 execd_pathname);
864
865 /* We've followed the inferior through an exec. Therefore, the
866 inferior has essentially been killed & reborn. */
867
868 gdb_flush (gdb_stdout);
869
870 breakpoint_init_inferior (inf_execd);
871
872 if (gdb_sysroot && *gdb_sysroot)
873 {
874 char *name = alloca (strlen (gdb_sysroot)
875 + strlen (execd_pathname)
876 + 1);
877
878 strcpy (name, gdb_sysroot);
879 strcat (name, execd_pathname);
880 execd_pathname = name;
881 }
882
883 /* Reset the shared library package. This ensures that we get a
884 shlib event when the child reaches "_start", at which point the
885 dld will have had a chance to initialize the child. */
886 /* Also, loading a symbol file below may trigger symbol lookups, and
887 we don't want those to be satisfied by the libraries of the
888 previous incarnation of this process. */
889 no_shared_libraries (NULL, 0);
890
891 if (follow_exec_mode_string == follow_exec_mode_new)
892 {
893 struct program_space *pspace;
894
895 /* The user wants to keep the old inferior and program spaces
896 around. Create a new fresh one, and switch to it. */
897
898 inf = add_inferior (current_inferior ()->pid);
899 pspace = add_program_space (maybe_new_address_space ());
900 inf->pspace = pspace;
901 inf->aspace = pspace->aspace;
902
903 exit_inferior_num_silent (current_inferior ()->num);
904
905 set_current_inferior (inf);
906 set_current_program_space (pspace);
907 }
908 else
909 {
910 /* The old description may no longer be fit for the new image.
911 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
912 old description; we'll read a new one below. No need to do
913 this on "follow-exec-mode new", as the old inferior stays
914 around (its description is later cleared/refetched on
915 restart). */
916 target_clear_description ();
917 }
918
919 gdb_assert (current_program_space == inf->pspace);
920
921 /* That a.out is now the one to use. */
922 exec_file_attach (execd_pathname, 0);
923
924 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
925 (Position Independent Executable) main symbol file will get applied by
926 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
927 the breakpoints with the zero displacement. */
928
929 symbol_file_add (execd_pathname,
930 (inf->symfile_flags
931 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
932 NULL, 0);
933
934 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
935 set_initial_language ();
936
937 /* If the target can specify a description, read it. Must do this
938 after flipping to the new executable (because the target supplied
939 description must be compatible with the executable's
940 architecture, and the old executable may e.g., be 32-bit, while
941 the new one 64-bit), and before anything involving memory or
942 registers. */
943 target_find_description ();
944
945 solib_create_inferior_hook (0);
946
947 jit_inferior_created_hook ();
948
949 breakpoint_re_set ();
950
951 /* Reinsert all breakpoints. (Those which were symbolic have
952 been reset to the proper address in the new a.out, thanks
953 to symbol_file_command...). */
954 insert_breakpoints ();
955
956 /* The next resume of this inferior should bring it to the shlib
957 startup breakpoints. (If the user had also set bp's on
958 "main" from the old (parent) process, then they'll auto-
959 matically get reset there in the new process.). */
960 }
961
962 /* Non-zero if we just simulating a single-step. This is needed
963 because we cannot remove the breakpoints in the inferior process
964 until after the `wait' in `wait_for_inferior'. */
965 static int singlestep_breakpoints_inserted_p = 0;
966
967 /* The thread we inserted single-step breakpoints for. */
968 static ptid_t singlestep_ptid;
969
970 /* PC when we started this single-step. */
971 static CORE_ADDR singlestep_pc;
972
973 /* Info about an instruction that is being stepped over. Invalid if
974 ASPACE is NULL. */
975
976 struct step_over_info
977 {
978 /* The instruction's address space. */
979 struct address_space *aspace;
980
981 /* The instruction's address. */
982 CORE_ADDR address;
983 };
984
985 /* The step-over info of the location that is being stepped over.
986
987 Note that with async/breakpoint always-inserted mode, a user might
988 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
989 being stepped over. As setting a new breakpoint inserts all
990 breakpoints, we need to make sure the breakpoint being stepped over
991 isn't inserted then. We do that by only clearing the step-over
992 info when the step-over is actually finished (or aborted).
993
994 Presently GDB can only step over one breakpoint at any given time.
995 Given threads that can't run code in the same address space as the
996 breakpoint's can't really miss the breakpoint, GDB could be taught
997 to step-over at most one breakpoint per address space (so this info
998 could move to the address space object if/when GDB is extended).
999 The set of breakpoints being stepped over will normally be much
1000 smaller than the set of all breakpoints, so a flag in the
1001 breakpoint location structure would be wasteful. A separate list
1002 also saves complexity and run-time, as otherwise we'd have to go
1003 through all breakpoint locations clearing their flag whenever we
1004 start a new sequence. Similar considerations weigh against storing
1005 this info in the thread object. Plus, not all step overs actually
1006 have breakpoint locations -- e.g., stepping past a single-step
1007 breakpoint, or stepping to complete a non-continuable
1008 watchpoint. */
1009 static struct step_over_info step_over_info;
1010
1011 /* Record the address of the breakpoint/instruction we're currently
1012 stepping over. */
1013
1014 static void
1015 set_step_over_info (struct address_space *aspace, CORE_ADDR address)
1016 {
1017 step_over_info.aspace = aspace;
1018 step_over_info.address = address;
1019 }
1020
1021 /* Called when we're not longer stepping over a breakpoint / an
1022 instruction, so all breakpoints are free to be (re)inserted. */
1023
1024 static void
1025 clear_step_over_info (void)
1026 {
1027 step_over_info.aspace = NULL;
1028 step_over_info.address = 0;
1029 }
1030
1031 /* See inferior.h. */
1032
1033 int
1034 stepping_past_instruction_at (struct address_space *aspace,
1035 CORE_ADDR address)
1036 {
1037 return (step_over_info.aspace != NULL
1038 && breakpoint_address_match (aspace, address,
1039 step_over_info.aspace,
1040 step_over_info.address));
1041 }
1042
1043 \f
1044 /* Displaced stepping. */
1045
1046 /* In non-stop debugging mode, we must take special care to manage
1047 breakpoints properly; in particular, the traditional strategy for
1048 stepping a thread past a breakpoint it has hit is unsuitable.
1049 'Displaced stepping' is a tactic for stepping one thread past a
1050 breakpoint it has hit while ensuring that other threads running
1051 concurrently will hit the breakpoint as they should.
1052
1053 The traditional way to step a thread T off a breakpoint in a
1054 multi-threaded program in all-stop mode is as follows:
1055
1056 a0) Initially, all threads are stopped, and breakpoints are not
1057 inserted.
1058 a1) We single-step T, leaving breakpoints uninserted.
1059 a2) We insert breakpoints, and resume all threads.
1060
1061 In non-stop debugging, however, this strategy is unsuitable: we
1062 don't want to have to stop all threads in the system in order to
1063 continue or step T past a breakpoint. Instead, we use displaced
1064 stepping:
1065
1066 n0) Initially, T is stopped, other threads are running, and
1067 breakpoints are inserted.
1068 n1) We copy the instruction "under" the breakpoint to a separate
1069 location, outside the main code stream, making any adjustments
1070 to the instruction, register, and memory state as directed by
1071 T's architecture.
1072 n2) We single-step T over the instruction at its new location.
1073 n3) We adjust the resulting register and memory state as directed
1074 by T's architecture. This includes resetting T's PC to point
1075 back into the main instruction stream.
1076 n4) We resume T.
1077
1078 This approach depends on the following gdbarch methods:
1079
1080 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1081 indicate where to copy the instruction, and how much space must
1082 be reserved there. We use these in step n1.
1083
1084 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1085 address, and makes any necessary adjustments to the instruction,
1086 register contents, and memory. We use this in step n1.
1087
1088 - gdbarch_displaced_step_fixup adjusts registers and memory after
1089 we have successfuly single-stepped the instruction, to yield the
1090 same effect the instruction would have had if we had executed it
1091 at its original address. We use this in step n3.
1092
1093 - gdbarch_displaced_step_free_closure provides cleanup.
1094
1095 The gdbarch_displaced_step_copy_insn and
1096 gdbarch_displaced_step_fixup functions must be written so that
1097 copying an instruction with gdbarch_displaced_step_copy_insn,
1098 single-stepping across the copied instruction, and then applying
1099 gdbarch_displaced_insn_fixup should have the same effects on the
1100 thread's memory and registers as stepping the instruction in place
1101 would have. Exactly which responsibilities fall to the copy and
1102 which fall to the fixup is up to the author of those functions.
1103
1104 See the comments in gdbarch.sh for details.
1105
1106 Note that displaced stepping and software single-step cannot
1107 currently be used in combination, although with some care I think
1108 they could be made to. Software single-step works by placing
1109 breakpoints on all possible subsequent instructions; if the
1110 displaced instruction is a PC-relative jump, those breakpoints
1111 could fall in very strange places --- on pages that aren't
1112 executable, or at addresses that are not proper instruction
1113 boundaries. (We do generally let other threads run while we wait
1114 to hit the software single-step breakpoint, and they might
1115 encounter such a corrupted instruction.) One way to work around
1116 this would be to have gdbarch_displaced_step_copy_insn fully
1117 simulate the effect of PC-relative instructions (and return NULL)
1118 on architectures that use software single-stepping.
1119
1120 In non-stop mode, we can have independent and simultaneous step
1121 requests, so more than one thread may need to simultaneously step
1122 over a breakpoint. The current implementation assumes there is
1123 only one scratch space per process. In this case, we have to
1124 serialize access to the scratch space. If thread A wants to step
1125 over a breakpoint, but we are currently waiting for some other
1126 thread to complete a displaced step, we leave thread A stopped and
1127 place it in the displaced_step_request_queue. Whenever a displaced
1128 step finishes, we pick the next thread in the queue and start a new
1129 displaced step operation on it. See displaced_step_prepare and
1130 displaced_step_fixup for details. */
1131
1132 struct displaced_step_request
1133 {
1134 ptid_t ptid;
1135 struct displaced_step_request *next;
1136 };
1137
1138 /* Per-inferior displaced stepping state. */
1139 struct displaced_step_inferior_state
1140 {
1141 /* Pointer to next in linked list. */
1142 struct displaced_step_inferior_state *next;
1143
1144 /* The process this displaced step state refers to. */
1145 int pid;
1146
1147 /* A queue of pending displaced stepping requests. One entry per
1148 thread that needs to do a displaced step. */
1149 struct displaced_step_request *step_request_queue;
1150
1151 /* If this is not null_ptid, this is the thread carrying out a
1152 displaced single-step in process PID. This thread's state will
1153 require fixing up once it has completed its step. */
1154 ptid_t step_ptid;
1155
1156 /* The architecture the thread had when we stepped it. */
1157 struct gdbarch *step_gdbarch;
1158
1159 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1160 for post-step cleanup. */
1161 struct displaced_step_closure *step_closure;
1162
1163 /* The address of the original instruction, and the copy we
1164 made. */
1165 CORE_ADDR step_original, step_copy;
1166
1167 /* Saved contents of copy area. */
1168 gdb_byte *step_saved_copy;
1169 };
1170
1171 /* The list of states of processes involved in displaced stepping
1172 presently. */
1173 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1174
1175 /* Get the displaced stepping state of process PID. */
1176
1177 static struct displaced_step_inferior_state *
1178 get_displaced_stepping_state (int pid)
1179 {
1180 struct displaced_step_inferior_state *state;
1181
1182 for (state = displaced_step_inferior_states;
1183 state != NULL;
1184 state = state->next)
1185 if (state->pid == pid)
1186 return state;
1187
1188 return NULL;
1189 }
1190
1191 /* Add a new displaced stepping state for process PID to the displaced
1192 stepping state list, or return a pointer to an already existing
1193 entry, if it already exists. Never returns NULL. */
1194
1195 static struct displaced_step_inferior_state *
1196 add_displaced_stepping_state (int pid)
1197 {
1198 struct displaced_step_inferior_state *state;
1199
1200 for (state = displaced_step_inferior_states;
1201 state != NULL;
1202 state = state->next)
1203 if (state->pid == pid)
1204 return state;
1205
1206 state = xcalloc (1, sizeof (*state));
1207 state->pid = pid;
1208 state->next = displaced_step_inferior_states;
1209 displaced_step_inferior_states = state;
1210
1211 return state;
1212 }
1213
1214 /* If inferior is in displaced stepping, and ADDR equals to starting address
1215 of copy area, return corresponding displaced_step_closure. Otherwise,
1216 return NULL. */
1217
1218 struct displaced_step_closure*
1219 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1220 {
1221 struct displaced_step_inferior_state *displaced
1222 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1223
1224 /* If checking the mode of displaced instruction in copy area. */
1225 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1226 && (displaced->step_copy == addr))
1227 return displaced->step_closure;
1228
1229 return NULL;
1230 }
1231
1232 /* Remove the displaced stepping state of process PID. */
1233
1234 static void
1235 remove_displaced_stepping_state (int pid)
1236 {
1237 struct displaced_step_inferior_state *it, **prev_next_p;
1238
1239 gdb_assert (pid != 0);
1240
1241 it = displaced_step_inferior_states;
1242 prev_next_p = &displaced_step_inferior_states;
1243 while (it)
1244 {
1245 if (it->pid == pid)
1246 {
1247 *prev_next_p = it->next;
1248 xfree (it);
1249 return;
1250 }
1251
1252 prev_next_p = &it->next;
1253 it = *prev_next_p;
1254 }
1255 }
1256
1257 static void
1258 infrun_inferior_exit (struct inferior *inf)
1259 {
1260 remove_displaced_stepping_state (inf->pid);
1261 }
1262
1263 /* If ON, and the architecture supports it, GDB will use displaced
1264 stepping to step over breakpoints. If OFF, or if the architecture
1265 doesn't support it, GDB will instead use the traditional
1266 hold-and-step approach. If AUTO (which is the default), GDB will
1267 decide which technique to use to step over breakpoints depending on
1268 which of all-stop or non-stop mode is active --- displaced stepping
1269 in non-stop mode; hold-and-step in all-stop mode. */
1270
1271 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1272
1273 static void
1274 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1275 struct cmd_list_element *c,
1276 const char *value)
1277 {
1278 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1279 fprintf_filtered (file,
1280 _("Debugger's willingness to use displaced stepping "
1281 "to step over breakpoints is %s (currently %s).\n"),
1282 value, non_stop ? "on" : "off");
1283 else
1284 fprintf_filtered (file,
1285 _("Debugger's willingness to use displaced stepping "
1286 "to step over breakpoints is %s.\n"), value);
1287 }
1288
1289 /* Return non-zero if displaced stepping can/should be used to step
1290 over breakpoints. */
1291
1292 static int
1293 use_displaced_stepping (struct gdbarch *gdbarch)
1294 {
1295 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1296 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1297 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1298 && find_record_target () == NULL);
1299 }
1300
1301 /* Clean out any stray displaced stepping state. */
1302 static void
1303 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1304 {
1305 /* Indicate that there is no cleanup pending. */
1306 displaced->step_ptid = null_ptid;
1307
1308 if (displaced->step_closure)
1309 {
1310 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1311 displaced->step_closure);
1312 displaced->step_closure = NULL;
1313 }
1314 }
1315
1316 static void
1317 displaced_step_clear_cleanup (void *arg)
1318 {
1319 struct displaced_step_inferior_state *state = arg;
1320
1321 displaced_step_clear (state);
1322 }
1323
1324 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1325 void
1326 displaced_step_dump_bytes (struct ui_file *file,
1327 const gdb_byte *buf,
1328 size_t len)
1329 {
1330 int i;
1331
1332 for (i = 0; i < len; i++)
1333 fprintf_unfiltered (file, "%02x ", buf[i]);
1334 fputs_unfiltered ("\n", file);
1335 }
1336
1337 /* Prepare to single-step, using displaced stepping.
1338
1339 Note that we cannot use displaced stepping when we have a signal to
1340 deliver. If we have a signal to deliver and an instruction to step
1341 over, then after the step, there will be no indication from the
1342 target whether the thread entered a signal handler or ignored the
1343 signal and stepped over the instruction successfully --- both cases
1344 result in a simple SIGTRAP. In the first case we mustn't do a
1345 fixup, and in the second case we must --- but we can't tell which.
1346 Comments in the code for 'random signals' in handle_inferior_event
1347 explain how we handle this case instead.
1348
1349 Returns 1 if preparing was successful -- this thread is going to be
1350 stepped now; or 0 if displaced stepping this thread got queued. */
1351 static int
1352 displaced_step_prepare (ptid_t ptid)
1353 {
1354 struct cleanup *old_cleanups, *ignore_cleanups;
1355 struct thread_info *tp = find_thread_ptid (ptid);
1356 struct regcache *regcache = get_thread_regcache (ptid);
1357 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1358 CORE_ADDR original, copy;
1359 ULONGEST len;
1360 struct displaced_step_closure *closure;
1361 struct displaced_step_inferior_state *displaced;
1362 int status;
1363
1364 /* We should never reach this function if the architecture does not
1365 support displaced stepping. */
1366 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1367
1368 /* Disable range stepping while executing in the scratch pad. We
1369 want a single-step even if executing the displaced instruction in
1370 the scratch buffer lands within the stepping range (e.g., a
1371 jump/branch). */
1372 tp->control.may_range_step = 0;
1373
1374 /* We have to displaced step one thread at a time, as we only have
1375 access to a single scratch space per inferior. */
1376
1377 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1378
1379 if (!ptid_equal (displaced->step_ptid, null_ptid))
1380 {
1381 /* Already waiting for a displaced step to finish. Defer this
1382 request and place in queue. */
1383 struct displaced_step_request *req, *new_req;
1384
1385 if (debug_displaced)
1386 fprintf_unfiltered (gdb_stdlog,
1387 "displaced: defering step of %s\n",
1388 target_pid_to_str (ptid));
1389
1390 new_req = xmalloc (sizeof (*new_req));
1391 new_req->ptid = ptid;
1392 new_req->next = NULL;
1393
1394 if (displaced->step_request_queue)
1395 {
1396 for (req = displaced->step_request_queue;
1397 req && req->next;
1398 req = req->next)
1399 ;
1400 req->next = new_req;
1401 }
1402 else
1403 displaced->step_request_queue = new_req;
1404
1405 return 0;
1406 }
1407 else
1408 {
1409 if (debug_displaced)
1410 fprintf_unfiltered (gdb_stdlog,
1411 "displaced: stepping %s now\n",
1412 target_pid_to_str (ptid));
1413 }
1414
1415 displaced_step_clear (displaced);
1416
1417 old_cleanups = save_inferior_ptid ();
1418 inferior_ptid = ptid;
1419
1420 original = regcache_read_pc (regcache);
1421
1422 copy = gdbarch_displaced_step_location (gdbarch);
1423 len = gdbarch_max_insn_length (gdbarch);
1424
1425 /* Save the original contents of the copy area. */
1426 displaced->step_saved_copy = xmalloc (len);
1427 ignore_cleanups = make_cleanup (free_current_contents,
1428 &displaced->step_saved_copy);
1429 status = target_read_memory (copy, displaced->step_saved_copy, len);
1430 if (status != 0)
1431 throw_error (MEMORY_ERROR,
1432 _("Error accessing memory address %s (%s) for "
1433 "displaced-stepping scratch space."),
1434 paddress (gdbarch, copy), safe_strerror (status));
1435 if (debug_displaced)
1436 {
1437 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1438 paddress (gdbarch, copy));
1439 displaced_step_dump_bytes (gdb_stdlog,
1440 displaced->step_saved_copy,
1441 len);
1442 };
1443
1444 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1445 original, copy, regcache);
1446
1447 /* We don't support the fully-simulated case at present. */
1448 gdb_assert (closure);
1449
1450 /* Save the information we need to fix things up if the step
1451 succeeds. */
1452 displaced->step_ptid = ptid;
1453 displaced->step_gdbarch = gdbarch;
1454 displaced->step_closure = closure;
1455 displaced->step_original = original;
1456 displaced->step_copy = copy;
1457
1458 make_cleanup (displaced_step_clear_cleanup, displaced);
1459
1460 /* Resume execution at the copy. */
1461 regcache_write_pc (regcache, copy);
1462
1463 discard_cleanups (ignore_cleanups);
1464
1465 do_cleanups (old_cleanups);
1466
1467 if (debug_displaced)
1468 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1469 paddress (gdbarch, copy));
1470
1471 return 1;
1472 }
1473
1474 static void
1475 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1476 const gdb_byte *myaddr, int len)
1477 {
1478 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1479
1480 inferior_ptid = ptid;
1481 write_memory (memaddr, myaddr, len);
1482 do_cleanups (ptid_cleanup);
1483 }
1484
1485 /* Restore the contents of the copy area for thread PTID. */
1486
1487 static void
1488 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1489 ptid_t ptid)
1490 {
1491 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1492
1493 write_memory_ptid (ptid, displaced->step_copy,
1494 displaced->step_saved_copy, len);
1495 if (debug_displaced)
1496 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1497 target_pid_to_str (ptid),
1498 paddress (displaced->step_gdbarch,
1499 displaced->step_copy));
1500 }
1501
1502 static void
1503 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1504 {
1505 struct cleanup *old_cleanups;
1506 struct displaced_step_inferior_state *displaced
1507 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1508
1509 /* Was any thread of this process doing a displaced step? */
1510 if (displaced == NULL)
1511 return;
1512
1513 /* Was this event for the pid we displaced? */
1514 if (ptid_equal (displaced->step_ptid, null_ptid)
1515 || ! ptid_equal (displaced->step_ptid, event_ptid))
1516 return;
1517
1518 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1519
1520 displaced_step_restore (displaced, displaced->step_ptid);
1521
1522 /* Did the instruction complete successfully? */
1523 if (signal == GDB_SIGNAL_TRAP)
1524 {
1525 /* Fix up the resulting state. */
1526 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1527 displaced->step_closure,
1528 displaced->step_original,
1529 displaced->step_copy,
1530 get_thread_regcache (displaced->step_ptid));
1531 }
1532 else
1533 {
1534 /* Since the instruction didn't complete, all we can do is
1535 relocate the PC. */
1536 struct regcache *regcache = get_thread_regcache (event_ptid);
1537 CORE_ADDR pc = regcache_read_pc (regcache);
1538
1539 pc = displaced->step_original + (pc - displaced->step_copy);
1540 regcache_write_pc (regcache, pc);
1541 }
1542
1543 do_cleanups (old_cleanups);
1544
1545 displaced->step_ptid = null_ptid;
1546
1547 /* Are there any pending displaced stepping requests? If so, run
1548 one now. Leave the state object around, since we're likely to
1549 need it again soon. */
1550 while (displaced->step_request_queue)
1551 {
1552 struct displaced_step_request *head;
1553 ptid_t ptid;
1554 struct regcache *regcache;
1555 struct gdbarch *gdbarch;
1556 CORE_ADDR actual_pc;
1557 struct address_space *aspace;
1558
1559 head = displaced->step_request_queue;
1560 ptid = head->ptid;
1561 displaced->step_request_queue = head->next;
1562 xfree (head);
1563
1564 context_switch (ptid);
1565
1566 regcache = get_thread_regcache (ptid);
1567 actual_pc = regcache_read_pc (regcache);
1568 aspace = get_regcache_aspace (regcache);
1569
1570 if (breakpoint_here_p (aspace, actual_pc))
1571 {
1572 if (debug_displaced)
1573 fprintf_unfiltered (gdb_stdlog,
1574 "displaced: stepping queued %s now\n",
1575 target_pid_to_str (ptid));
1576
1577 displaced_step_prepare (ptid);
1578
1579 gdbarch = get_regcache_arch (regcache);
1580
1581 if (debug_displaced)
1582 {
1583 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1584 gdb_byte buf[4];
1585
1586 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1587 paddress (gdbarch, actual_pc));
1588 read_memory (actual_pc, buf, sizeof (buf));
1589 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1590 }
1591
1592 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1593 displaced->step_closure))
1594 target_resume (ptid, 1, GDB_SIGNAL_0);
1595 else
1596 target_resume (ptid, 0, GDB_SIGNAL_0);
1597
1598 /* Done, we're stepping a thread. */
1599 break;
1600 }
1601 else
1602 {
1603 int step;
1604 struct thread_info *tp = inferior_thread ();
1605
1606 /* The breakpoint we were sitting under has since been
1607 removed. */
1608 tp->control.trap_expected = 0;
1609
1610 /* Go back to what we were trying to do. */
1611 step = currently_stepping (tp);
1612
1613 if (debug_displaced)
1614 fprintf_unfiltered (gdb_stdlog,
1615 "displaced: breakpoint is gone: %s, step(%d)\n",
1616 target_pid_to_str (tp->ptid), step);
1617
1618 target_resume (ptid, step, GDB_SIGNAL_0);
1619 tp->suspend.stop_signal = GDB_SIGNAL_0;
1620
1621 /* This request was discarded. See if there's any other
1622 thread waiting for its turn. */
1623 }
1624 }
1625 }
1626
1627 /* Update global variables holding ptids to hold NEW_PTID if they were
1628 holding OLD_PTID. */
1629 static void
1630 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1631 {
1632 struct displaced_step_request *it;
1633 struct displaced_step_inferior_state *displaced;
1634
1635 if (ptid_equal (inferior_ptid, old_ptid))
1636 inferior_ptid = new_ptid;
1637
1638 if (ptid_equal (singlestep_ptid, old_ptid))
1639 singlestep_ptid = new_ptid;
1640
1641 for (displaced = displaced_step_inferior_states;
1642 displaced;
1643 displaced = displaced->next)
1644 {
1645 if (ptid_equal (displaced->step_ptid, old_ptid))
1646 displaced->step_ptid = new_ptid;
1647
1648 for (it = displaced->step_request_queue; it; it = it->next)
1649 if (ptid_equal (it->ptid, old_ptid))
1650 it->ptid = new_ptid;
1651 }
1652 }
1653
1654 \f
1655 /* Resuming. */
1656
1657 /* Things to clean up if we QUIT out of resume (). */
1658 static void
1659 resume_cleanups (void *ignore)
1660 {
1661 normal_stop ();
1662 }
1663
1664 static const char schedlock_off[] = "off";
1665 static const char schedlock_on[] = "on";
1666 static const char schedlock_step[] = "step";
1667 static const char *const scheduler_enums[] = {
1668 schedlock_off,
1669 schedlock_on,
1670 schedlock_step,
1671 NULL
1672 };
1673 static const char *scheduler_mode = schedlock_off;
1674 static void
1675 show_scheduler_mode (struct ui_file *file, int from_tty,
1676 struct cmd_list_element *c, const char *value)
1677 {
1678 fprintf_filtered (file,
1679 _("Mode for locking scheduler "
1680 "during execution is \"%s\".\n"),
1681 value);
1682 }
1683
1684 static void
1685 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1686 {
1687 if (!target_can_lock_scheduler)
1688 {
1689 scheduler_mode = schedlock_off;
1690 error (_("Target '%s' cannot support this command."), target_shortname);
1691 }
1692 }
1693
1694 /* True if execution commands resume all threads of all processes by
1695 default; otherwise, resume only threads of the current inferior
1696 process. */
1697 int sched_multi = 0;
1698
1699 /* Try to setup for software single stepping over the specified location.
1700 Return 1 if target_resume() should use hardware single step.
1701
1702 GDBARCH the current gdbarch.
1703 PC the location to step over. */
1704
1705 static int
1706 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1707 {
1708 int hw_step = 1;
1709
1710 if (execution_direction == EXEC_FORWARD
1711 && gdbarch_software_single_step_p (gdbarch)
1712 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1713 {
1714 hw_step = 0;
1715 /* Do not pull these breakpoints until after a `wait' in
1716 `wait_for_inferior'. */
1717 singlestep_breakpoints_inserted_p = 1;
1718 singlestep_ptid = inferior_ptid;
1719 singlestep_pc = pc;
1720 }
1721 return hw_step;
1722 }
1723
1724 ptid_t
1725 user_visible_resume_ptid (int step)
1726 {
1727 /* By default, resume all threads of all processes. */
1728 ptid_t resume_ptid = RESUME_ALL;
1729
1730 /* Maybe resume only all threads of the current process. */
1731 if (!sched_multi && target_supports_multi_process ())
1732 {
1733 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1734 }
1735
1736 /* Maybe resume a single thread after all. */
1737 if (non_stop)
1738 {
1739 /* With non-stop mode on, threads are always handled
1740 individually. */
1741 resume_ptid = inferior_ptid;
1742 }
1743 else if ((scheduler_mode == schedlock_on)
1744 || (scheduler_mode == schedlock_step
1745 && (step || singlestep_breakpoints_inserted_p)))
1746 {
1747 /* User-settable 'scheduler' mode requires solo thread resume. */
1748 resume_ptid = inferior_ptid;
1749 }
1750
1751 /* We may actually resume fewer threads at first, e.g., if a thread
1752 is stopped at a breakpoint that needs stepping-off, but that
1753 should not be visible to the user/frontend, and neither should
1754 the frontend/user be allowed to proceed any of the threads that
1755 happen to be stopped for internal run control handling, if a
1756 previous command wanted them resumed. */
1757 return resume_ptid;
1758 }
1759
1760 /* Resume the inferior, but allow a QUIT. This is useful if the user
1761 wants to interrupt some lengthy single-stepping operation
1762 (for child processes, the SIGINT goes to the inferior, and so
1763 we get a SIGINT random_signal, but for remote debugging and perhaps
1764 other targets, that's not true).
1765
1766 STEP nonzero if we should step (zero to continue instead).
1767 SIG is the signal to give the inferior (zero for none). */
1768 void
1769 resume (int step, enum gdb_signal sig)
1770 {
1771 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1772 struct regcache *regcache = get_current_regcache ();
1773 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1774 struct thread_info *tp = inferior_thread ();
1775 CORE_ADDR pc = regcache_read_pc (regcache);
1776 struct address_space *aspace = get_regcache_aspace (regcache);
1777 ptid_t resume_ptid;
1778 /* From here on, this represents the caller's step vs continue
1779 request, while STEP represents what we'll actually request the
1780 target to do. STEP can decay from a step to a continue, if e.g.,
1781 we need to implement single-stepping with breakpoints (software
1782 single-step). When deciding whether "set scheduler-locking step"
1783 applies, it's the callers intention that counts. */
1784 const int entry_step = step;
1785
1786 QUIT;
1787
1788 if (current_inferior ()->waiting_for_vfork_done)
1789 {
1790 /* Don't try to single-step a vfork parent that is waiting for
1791 the child to get out of the shared memory region (by exec'ing
1792 or exiting). This is particularly important on software
1793 single-step archs, as the child process would trip on the
1794 software single step breakpoint inserted for the parent
1795 process. Since the parent will not actually execute any
1796 instruction until the child is out of the shared region (such
1797 are vfork's semantics), it is safe to simply continue it.
1798 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1799 the parent, and tell it to `keep_going', which automatically
1800 re-sets it stepping. */
1801 if (debug_infrun)
1802 fprintf_unfiltered (gdb_stdlog,
1803 "infrun: resume : clear step\n");
1804 step = 0;
1805 }
1806
1807 if (debug_infrun)
1808 fprintf_unfiltered (gdb_stdlog,
1809 "infrun: resume (step=%d, signal=%s), "
1810 "trap_expected=%d, current thread [%s] at %s\n",
1811 step, gdb_signal_to_symbol_string (sig),
1812 tp->control.trap_expected,
1813 target_pid_to_str (inferior_ptid),
1814 paddress (gdbarch, pc));
1815
1816 /* Normally, by the time we reach `resume', the breakpoints are either
1817 removed or inserted, as appropriate. The exception is if we're sitting
1818 at a permanent breakpoint; we need to step over it, but permanent
1819 breakpoints can't be removed. So we have to test for it here. */
1820 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1821 {
1822 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1823 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1824 else
1825 error (_("\
1826 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1827 how to step past a permanent breakpoint on this architecture. Try using\n\
1828 a command like `return' or `jump' to continue execution."));
1829 }
1830
1831 /* If we have a breakpoint to step over, make sure to do a single
1832 step only. Same if we have software watchpoints. */
1833 if (tp->control.trap_expected || bpstat_should_step ())
1834 tp->control.may_range_step = 0;
1835
1836 /* If enabled, step over breakpoints by executing a copy of the
1837 instruction at a different address.
1838
1839 We can't use displaced stepping when we have a signal to deliver;
1840 the comments for displaced_step_prepare explain why. The
1841 comments in the handle_inferior event for dealing with 'random
1842 signals' explain what we do instead.
1843
1844 We can't use displaced stepping when we are waiting for vfork_done
1845 event, displaced stepping breaks the vfork child similarly as single
1846 step software breakpoint. */
1847 if (use_displaced_stepping (gdbarch)
1848 && (tp->control.trap_expected
1849 || (step && gdbarch_software_single_step_p (gdbarch)))
1850 && sig == GDB_SIGNAL_0
1851 && !current_inferior ()->waiting_for_vfork_done)
1852 {
1853 struct displaced_step_inferior_state *displaced;
1854
1855 if (!displaced_step_prepare (inferior_ptid))
1856 {
1857 /* Got placed in displaced stepping queue. Will be resumed
1858 later when all the currently queued displaced stepping
1859 requests finish. The thread is not executing at this
1860 point, and the call to set_executing will be made later.
1861 But we need to call set_running here, since from the
1862 user/frontend's point of view, threads were set running.
1863 Unless we're calling an inferior function, as in that
1864 case we pretend the inferior doesn't run at all. */
1865 if (!tp->control.in_infcall)
1866 set_running (user_visible_resume_ptid (entry_step), 1);
1867 discard_cleanups (old_cleanups);
1868 return;
1869 }
1870
1871 /* Update pc to reflect the new address from which we will execute
1872 instructions due to displaced stepping. */
1873 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
1874
1875 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1876 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1877 displaced->step_closure);
1878 }
1879
1880 /* Do we need to do it the hard way, w/temp breakpoints? */
1881 else if (step)
1882 step = maybe_software_singlestep (gdbarch, pc);
1883
1884 /* Currently, our software single-step implementation leads to different
1885 results than hardware single-stepping in one situation: when stepping
1886 into delivering a signal which has an associated signal handler,
1887 hardware single-step will stop at the first instruction of the handler,
1888 while software single-step will simply skip execution of the handler.
1889
1890 For now, this difference in behavior is accepted since there is no
1891 easy way to actually implement single-stepping into a signal handler
1892 without kernel support.
1893
1894 However, there is one scenario where this difference leads to follow-on
1895 problems: if we're stepping off a breakpoint by removing all breakpoints
1896 and then single-stepping. In this case, the software single-step
1897 behavior means that even if there is a *breakpoint* in the signal
1898 handler, GDB still would not stop.
1899
1900 Fortunately, we can at least fix this particular issue. We detect
1901 here the case where we are about to deliver a signal while software
1902 single-stepping with breakpoints removed. In this situation, we
1903 revert the decisions to remove all breakpoints and insert single-
1904 step breakpoints, and instead we install a step-resume breakpoint
1905 at the current address, deliver the signal without stepping, and
1906 once we arrive back at the step-resume breakpoint, actually step
1907 over the breakpoint we originally wanted to step over. */
1908 if (singlestep_breakpoints_inserted_p
1909 && tp->control.trap_expected && sig != GDB_SIGNAL_0)
1910 {
1911 /* If we have nested signals or a pending signal is delivered
1912 immediately after a handler returns, might might already have
1913 a step-resume breakpoint set on the earlier handler. We cannot
1914 set another step-resume breakpoint; just continue on until the
1915 original breakpoint is hit. */
1916 if (tp->control.step_resume_breakpoint == NULL)
1917 {
1918 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1919 tp->step_after_step_resume_breakpoint = 1;
1920 }
1921
1922 remove_single_step_breakpoints ();
1923 singlestep_breakpoints_inserted_p = 0;
1924
1925 clear_step_over_info ();
1926 tp->control.trap_expected = 0;
1927
1928 insert_breakpoints ();
1929 }
1930
1931 /* If STEP is set, it's a request to use hardware stepping
1932 facilities. But in that case, we should never
1933 use singlestep breakpoint. */
1934 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1935
1936 /* Decide the set of threads to ask the target to resume. Start
1937 by assuming everything will be resumed, than narrow the set
1938 by applying increasingly restricting conditions. */
1939 resume_ptid = user_visible_resume_ptid (entry_step);
1940
1941 /* Even if RESUME_PTID is a wildcard, and we end up resuming less
1942 (e.g., we might need to step over a breakpoint), from the
1943 user/frontend's point of view, all threads in RESUME_PTID are now
1944 running. Unless we're calling an inferior function, as in that
1945 case pretend we inferior doesn't run at all. */
1946 if (!tp->control.in_infcall)
1947 set_running (resume_ptid, 1);
1948
1949 /* Maybe resume a single thread after all. */
1950 if ((step || singlestep_breakpoints_inserted_p)
1951 && tp->control.trap_expected)
1952 {
1953 /* We're allowing a thread to run past a breakpoint it has
1954 hit, by single-stepping the thread with the breakpoint
1955 removed. In which case, we need to single-step only this
1956 thread, and keep others stopped, as they can miss this
1957 breakpoint if allowed to run. */
1958 resume_ptid = inferior_ptid;
1959 }
1960
1961 if (gdbarch_cannot_step_breakpoint (gdbarch))
1962 {
1963 /* Most targets can step a breakpoint instruction, thus
1964 executing it normally. But if this one cannot, just
1965 continue and we will hit it anyway. */
1966 if (step && breakpoint_inserted_here_p (aspace, pc))
1967 step = 0;
1968 }
1969
1970 if (debug_displaced
1971 && use_displaced_stepping (gdbarch)
1972 && tp->control.trap_expected)
1973 {
1974 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1975 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1976 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1977 gdb_byte buf[4];
1978
1979 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1980 paddress (resume_gdbarch, actual_pc));
1981 read_memory (actual_pc, buf, sizeof (buf));
1982 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1983 }
1984
1985 if (tp->control.may_range_step)
1986 {
1987 /* If we're resuming a thread with the PC out of the step
1988 range, then we're doing some nested/finer run control
1989 operation, like stepping the thread out of the dynamic
1990 linker or the displaced stepping scratch pad. We
1991 shouldn't have allowed a range step then. */
1992 gdb_assert (pc_in_thread_step_range (pc, tp));
1993 }
1994
1995 /* Install inferior's terminal modes. */
1996 target_terminal_inferior ();
1997
1998 /* Avoid confusing the next resume, if the next stop/resume
1999 happens to apply to another thread. */
2000 tp->suspend.stop_signal = GDB_SIGNAL_0;
2001
2002 /* Advise target which signals may be handled silently. If we have
2003 removed breakpoints because we are stepping over one (which can
2004 happen only if we are not using displaced stepping), we need to
2005 receive all signals to avoid accidentally skipping a breakpoint
2006 during execution of a signal handler. */
2007 if ((step || singlestep_breakpoints_inserted_p)
2008 && tp->control.trap_expected
2009 && !use_displaced_stepping (gdbarch))
2010 target_pass_signals (0, NULL);
2011 else
2012 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2013
2014 target_resume (resume_ptid, step, sig);
2015
2016 discard_cleanups (old_cleanups);
2017 }
2018 \f
2019 /* Proceeding. */
2020
2021 /* Clear out all variables saying what to do when inferior is continued.
2022 First do this, then set the ones you want, then call `proceed'. */
2023
2024 static void
2025 clear_proceed_status_thread (struct thread_info *tp)
2026 {
2027 if (debug_infrun)
2028 fprintf_unfiltered (gdb_stdlog,
2029 "infrun: clear_proceed_status_thread (%s)\n",
2030 target_pid_to_str (tp->ptid));
2031
2032 /* If this signal should not be seen by program, give it zero.
2033 Used for debugging signals. */
2034 if (!signal_pass_state (tp->suspend.stop_signal))
2035 tp->suspend.stop_signal = GDB_SIGNAL_0;
2036
2037 tp->control.trap_expected = 0;
2038 tp->control.step_range_start = 0;
2039 tp->control.step_range_end = 0;
2040 tp->control.may_range_step = 0;
2041 tp->control.step_frame_id = null_frame_id;
2042 tp->control.step_stack_frame_id = null_frame_id;
2043 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2044 tp->stop_requested = 0;
2045
2046 tp->control.stop_step = 0;
2047
2048 tp->control.proceed_to_finish = 0;
2049
2050 tp->control.command_interp = NULL;
2051
2052 /* Discard any remaining commands or status from previous stop. */
2053 bpstat_clear (&tp->control.stop_bpstat);
2054 }
2055
2056 void
2057 clear_proceed_status (int step)
2058 {
2059 if (!non_stop)
2060 {
2061 struct thread_info *tp;
2062 ptid_t resume_ptid;
2063
2064 resume_ptid = user_visible_resume_ptid (step);
2065
2066 /* In all-stop mode, delete the per-thread status of all threads
2067 we're about to resume, implicitly and explicitly. */
2068 ALL_NON_EXITED_THREADS (tp)
2069 {
2070 if (!ptid_match (tp->ptid, resume_ptid))
2071 continue;
2072 clear_proceed_status_thread (tp);
2073 }
2074 }
2075
2076 if (!ptid_equal (inferior_ptid, null_ptid))
2077 {
2078 struct inferior *inferior;
2079
2080 if (non_stop)
2081 {
2082 /* If in non-stop mode, only delete the per-thread status of
2083 the current thread. */
2084 clear_proceed_status_thread (inferior_thread ());
2085 }
2086
2087 inferior = current_inferior ();
2088 inferior->control.stop_soon = NO_STOP_QUIETLY;
2089 }
2090
2091 stop_after_trap = 0;
2092
2093 clear_step_over_info ();
2094
2095 observer_notify_about_to_proceed ();
2096
2097 if (stop_registers)
2098 {
2099 regcache_xfree (stop_registers);
2100 stop_registers = NULL;
2101 }
2102 }
2103
2104 /* Returns true if TP is still stopped at a breakpoint that needs
2105 stepping-over in order to make progress. If the breakpoint is gone
2106 meanwhile, we can skip the whole step-over dance. */
2107
2108 static int
2109 thread_still_needs_step_over (struct thread_info *tp)
2110 {
2111 if (tp->stepping_over_breakpoint)
2112 {
2113 struct regcache *regcache = get_thread_regcache (tp->ptid);
2114
2115 if (breakpoint_here_p (get_regcache_aspace (regcache),
2116 regcache_read_pc (regcache)))
2117 return 1;
2118
2119 tp->stepping_over_breakpoint = 0;
2120 }
2121
2122 return 0;
2123 }
2124
2125 /* Returns true if scheduler locking applies. STEP indicates whether
2126 we're about to do a step/next-like command to a thread. */
2127
2128 static int
2129 schedlock_applies (int step)
2130 {
2131 return (scheduler_mode == schedlock_on
2132 || (scheduler_mode == schedlock_step
2133 && step));
2134 }
2135
2136 /* Look a thread other than EXCEPT that has previously reported a
2137 breakpoint event, and thus needs a step-over in order to make
2138 progress. Returns NULL is none is found. STEP indicates whether
2139 we're about to step the current thread, in order to decide whether
2140 "set scheduler-locking step" applies. */
2141
2142 static struct thread_info *
2143 find_thread_needs_step_over (int step, struct thread_info *except)
2144 {
2145 struct thread_info *tp, *current;
2146
2147 /* With non-stop mode on, threads are always handled individually. */
2148 gdb_assert (! non_stop);
2149
2150 current = inferior_thread ();
2151
2152 /* If scheduler locking applies, we can avoid iterating over all
2153 threads. */
2154 if (schedlock_applies (step))
2155 {
2156 if (except != current
2157 && thread_still_needs_step_over (current))
2158 return current;
2159
2160 return NULL;
2161 }
2162
2163 ALL_NON_EXITED_THREADS (tp)
2164 {
2165 /* Ignore the EXCEPT thread. */
2166 if (tp == except)
2167 continue;
2168 /* Ignore threads of processes we're not resuming. */
2169 if (!sched_multi
2170 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
2171 continue;
2172
2173 if (thread_still_needs_step_over (tp))
2174 return tp;
2175 }
2176
2177 return NULL;
2178 }
2179
2180 /* Basic routine for continuing the program in various fashions.
2181
2182 ADDR is the address to resume at, or -1 for resume where stopped.
2183 SIGGNAL is the signal to give it, or 0 for none,
2184 or -1 for act according to how it stopped.
2185 STEP is nonzero if should trap after one instruction.
2186 -1 means return after that and print nothing.
2187 You should probably set various step_... variables
2188 before calling here, if you are stepping.
2189
2190 You should call clear_proceed_status before calling proceed. */
2191
2192 void
2193 proceed (CORE_ADDR addr, enum gdb_signal siggnal, int step)
2194 {
2195 struct regcache *regcache;
2196 struct gdbarch *gdbarch;
2197 struct thread_info *tp;
2198 CORE_ADDR pc;
2199 struct address_space *aspace;
2200
2201 /* If we're stopped at a fork/vfork, follow the branch set by the
2202 "set follow-fork-mode" command; otherwise, we'll just proceed
2203 resuming the current thread. */
2204 if (!follow_fork ())
2205 {
2206 /* The target for some reason decided not to resume. */
2207 normal_stop ();
2208 if (target_can_async_p ())
2209 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2210 return;
2211 }
2212
2213 /* We'll update this if & when we switch to a new thread. */
2214 previous_inferior_ptid = inferior_ptid;
2215
2216 regcache = get_current_regcache ();
2217 gdbarch = get_regcache_arch (regcache);
2218 aspace = get_regcache_aspace (regcache);
2219 pc = regcache_read_pc (regcache);
2220 tp = inferior_thread ();
2221
2222 if (step > 0)
2223 step_start_function = find_pc_function (pc);
2224 if (step < 0)
2225 stop_after_trap = 1;
2226
2227 /* Fill in with reasonable starting values. */
2228 init_thread_stepping_state (tp);
2229
2230 if (addr == (CORE_ADDR) -1)
2231 {
2232 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2233 && execution_direction != EXEC_REVERSE)
2234 /* There is a breakpoint at the address we will resume at,
2235 step one instruction before inserting breakpoints so that
2236 we do not stop right away (and report a second hit at this
2237 breakpoint).
2238
2239 Note, we don't do this in reverse, because we won't
2240 actually be executing the breakpoint insn anyway.
2241 We'll be (un-)executing the previous instruction. */
2242 tp->stepping_over_breakpoint = 1;
2243 else if (gdbarch_single_step_through_delay_p (gdbarch)
2244 && gdbarch_single_step_through_delay (gdbarch,
2245 get_current_frame ()))
2246 /* We stepped onto an instruction that needs to be stepped
2247 again before re-inserting the breakpoint, do so. */
2248 tp->stepping_over_breakpoint = 1;
2249 }
2250 else
2251 {
2252 regcache_write_pc (regcache, addr);
2253 }
2254
2255 if (siggnal != GDB_SIGNAL_DEFAULT)
2256 tp->suspend.stop_signal = siggnal;
2257
2258 /* Record the interpreter that issued the execution command that
2259 caused this thread to resume. If the top level interpreter is
2260 MI/async, and the execution command was a CLI command
2261 (next/step/etc.), we'll want to print stop event output to the MI
2262 console channel (the stepped-to line, etc.), as if the user
2263 entered the execution command on a real GDB console. */
2264 inferior_thread ()->control.command_interp = command_interp ();
2265
2266 if (debug_infrun)
2267 fprintf_unfiltered (gdb_stdlog,
2268 "infrun: proceed (addr=%s, signal=%s, step=%d)\n",
2269 paddress (gdbarch, addr),
2270 gdb_signal_to_symbol_string (siggnal), step);
2271
2272 if (non_stop)
2273 /* In non-stop, each thread is handled individually. The context
2274 must already be set to the right thread here. */
2275 ;
2276 else
2277 {
2278 struct thread_info *step_over;
2279
2280 /* In a multi-threaded task we may select another thread and
2281 then continue or step.
2282
2283 But if the old thread was stopped at a breakpoint, it will
2284 immediately cause another breakpoint stop without any
2285 execution (i.e. it will report a breakpoint hit incorrectly).
2286 So we must step over it first.
2287
2288 Look for a thread other than the current (TP) that reported a
2289 breakpoint hit and hasn't been resumed yet since. */
2290 step_over = find_thread_needs_step_over (step, tp);
2291 if (step_over != NULL)
2292 {
2293 if (debug_infrun)
2294 fprintf_unfiltered (gdb_stdlog,
2295 "infrun: need to step-over [%s] first\n",
2296 target_pid_to_str (step_over->ptid));
2297
2298 /* Store the prev_pc for the stepping thread too, needed by
2299 switch_back_to_stepping thread. */
2300 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2301 switch_to_thread (step_over->ptid);
2302 tp = step_over;
2303 }
2304 }
2305
2306 /* If we need to step over a breakpoint, and we're not using
2307 displaced stepping to do so, insert all breakpoints (watchpoints,
2308 etc.) but the one we're stepping over, step one instruction, and
2309 then re-insert the breakpoint when that step is finished. */
2310 if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
2311 {
2312 struct regcache *regcache = get_current_regcache ();
2313
2314 set_step_over_info (get_regcache_aspace (regcache),
2315 regcache_read_pc (regcache));
2316 }
2317 else
2318 clear_step_over_info ();
2319
2320 insert_breakpoints ();
2321
2322 tp->control.trap_expected = tp->stepping_over_breakpoint;
2323
2324 annotate_starting ();
2325
2326 /* Make sure that output from GDB appears before output from the
2327 inferior. */
2328 gdb_flush (gdb_stdout);
2329
2330 /* Refresh prev_pc value just prior to resuming. This used to be
2331 done in stop_waiting, however, setting prev_pc there did not handle
2332 scenarios such as inferior function calls or returning from
2333 a function via the return command. In those cases, the prev_pc
2334 value was not set properly for subsequent commands. The prev_pc value
2335 is used to initialize the starting line number in the ecs. With an
2336 invalid value, the gdb next command ends up stopping at the position
2337 represented by the next line table entry past our start position.
2338 On platforms that generate one line table entry per line, this
2339 is not a problem. However, on the ia64, the compiler generates
2340 extraneous line table entries that do not increase the line number.
2341 When we issue the gdb next command on the ia64 after an inferior call
2342 or a return command, we often end up a few instructions forward, still
2343 within the original line we started.
2344
2345 An attempt was made to refresh the prev_pc at the same time the
2346 execution_control_state is initialized (for instance, just before
2347 waiting for an inferior event). But this approach did not work
2348 because of platforms that use ptrace, where the pc register cannot
2349 be read unless the inferior is stopped. At that point, we are not
2350 guaranteed the inferior is stopped and so the regcache_read_pc() call
2351 can fail. Setting the prev_pc value here ensures the value is updated
2352 correctly when the inferior is stopped. */
2353 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2354
2355 /* Reset to normal state. */
2356 init_infwait_state ();
2357
2358 /* Resume inferior. */
2359 resume (tp->control.trap_expected || step || bpstat_should_step (),
2360 tp->suspend.stop_signal);
2361
2362 /* Wait for it to stop (if not standalone)
2363 and in any case decode why it stopped, and act accordingly. */
2364 /* Do this only if we are not using the event loop, or if the target
2365 does not support asynchronous execution. */
2366 if (!target_can_async_p ())
2367 {
2368 wait_for_inferior ();
2369 normal_stop ();
2370 }
2371 }
2372 \f
2373
2374 /* Start remote-debugging of a machine over a serial link. */
2375
2376 void
2377 start_remote (int from_tty)
2378 {
2379 struct inferior *inferior;
2380
2381 inferior = current_inferior ();
2382 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2383
2384 /* Always go on waiting for the target, regardless of the mode. */
2385 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2386 indicate to wait_for_inferior that a target should timeout if
2387 nothing is returned (instead of just blocking). Because of this,
2388 targets expecting an immediate response need to, internally, set
2389 things up so that the target_wait() is forced to eventually
2390 timeout. */
2391 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2392 differentiate to its caller what the state of the target is after
2393 the initial open has been performed. Here we're assuming that
2394 the target has stopped. It should be possible to eventually have
2395 target_open() return to the caller an indication that the target
2396 is currently running and GDB state should be set to the same as
2397 for an async run. */
2398 wait_for_inferior ();
2399
2400 /* Now that the inferior has stopped, do any bookkeeping like
2401 loading shared libraries. We want to do this before normal_stop,
2402 so that the displayed frame is up to date. */
2403 post_create_inferior (&current_target, from_tty);
2404
2405 normal_stop ();
2406 }
2407
2408 /* Initialize static vars when a new inferior begins. */
2409
2410 void
2411 init_wait_for_inferior (void)
2412 {
2413 /* These are meaningless until the first time through wait_for_inferior. */
2414
2415 breakpoint_init_inferior (inf_starting);
2416
2417 clear_proceed_status (0);
2418
2419 target_last_wait_ptid = minus_one_ptid;
2420
2421 previous_inferior_ptid = inferior_ptid;
2422 init_infwait_state ();
2423
2424 /* Discard any skipped inlined frames. */
2425 clear_inline_frame_state (minus_one_ptid);
2426
2427 singlestep_ptid = null_ptid;
2428 singlestep_pc = 0;
2429 }
2430
2431 \f
2432 /* This enum encodes possible reasons for doing a target_wait, so that
2433 wfi can call target_wait in one place. (Ultimately the call will be
2434 moved out of the infinite loop entirely.) */
2435
2436 enum infwait_states
2437 {
2438 infwait_normal_state,
2439 infwait_step_watch_state,
2440 infwait_nonstep_watch_state
2441 };
2442
2443 /* The PTID we'll do a target_wait on.*/
2444 ptid_t waiton_ptid;
2445
2446 /* Current inferior wait state. */
2447 static enum infwait_states infwait_state;
2448
2449 /* Data to be passed around while handling an event. This data is
2450 discarded between events. */
2451 struct execution_control_state
2452 {
2453 ptid_t ptid;
2454 /* The thread that got the event, if this was a thread event; NULL
2455 otherwise. */
2456 struct thread_info *event_thread;
2457
2458 struct target_waitstatus ws;
2459 int stop_func_filled_in;
2460 CORE_ADDR stop_func_start;
2461 CORE_ADDR stop_func_end;
2462 const char *stop_func_name;
2463 int wait_some_more;
2464
2465 /* We were in infwait_step_watch_state or
2466 infwait_nonstep_watch_state state, and the thread reported an
2467 event. */
2468 int stepped_after_stopped_by_watchpoint;
2469
2470 /* True if the event thread hit the single-step breakpoint of
2471 another thread. Thus the event doesn't cause a stop, the thread
2472 needs to be single-stepped past the single-step breakpoint before
2473 we can switch back to the original stepping thread. */
2474 int hit_singlestep_breakpoint;
2475 };
2476
2477 static void handle_inferior_event (struct execution_control_state *ecs);
2478
2479 static void handle_step_into_function (struct gdbarch *gdbarch,
2480 struct execution_control_state *ecs);
2481 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2482 struct execution_control_state *ecs);
2483 static void handle_signal_stop (struct execution_control_state *ecs);
2484 static void check_exception_resume (struct execution_control_state *,
2485 struct frame_info *);
2486
2487 static void end_stepping_range (struct execution_control_state *ecs);
2488 static void stop_waiting (struct execution_control_state *ecs);
2489 static void prepare_to_wait (struct execution_control_state *ecs);
2490 static void keep_going (struct execution_control_state *ecs);
2491 static void process_event_stop_test (struct execution_control_state *ecs);
2492 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2493
2494 /* Callback for iterate over threads. If the thread is stopped, but
2495 the user/frontend doesn't know about that yet, go through
2496 normal_stop, as if the thread had just stopped now. ARG points at
2497 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2498 ptid_is_pid(PTID) is true, applies to all threads of the process
2499 pointed at by PTID. Otherwise, apply only to the thread pointed by
2500 PTID. */
2501
2502 static int
2503 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2504 {
2505 ptid_t ptid = * (ptid_t *) arg;
2506
2507 if ((ptid_equal (info->ptid, ptid)
2508 || ptid_equal (minus_one_ptid, ptid)
2509 || (ptid_is_pid (ptid)
2510 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2511 && is_running (info->ptid)
2512 && !is_executing (info->ptid))
2513 {
2514 struct cleanup *old_chain;
2515 struct execution_control_state ecss;
2516 struct execution_control_state *ecs = &ecss;
2517
2518 memset (ecs, 0, sizeof (*ecs));
2519
2520 old_chain = make_cleanup_restore_current_thread ();
2521
2522 overlay_cache_invalid = 1;
2523 /* Flush target cache before starting to handle each event.
2524 Target was running and cache could be stale. This is just a
2525 heuristic. Running threads may modify target memory, but we
2526 don't get any event. */
2527 target_dcache_invalidate ();
2528
2529 /* Go through handle_inferior_event/normal_stop, so we always
2530 have consistent output as if the stop event had been
2531 reported. */
2532 ecs->ptid = info->ptid;
2533 ecs->event_thread = find_thread_ptid (info->ptid);
2534 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2535 ecs->ws.value.sig = GDB_SIGNAL_0;
2536
2537 handle_inferior_event (ecs);
2538
2539 if (!ecs->wait_some_more)
2540 {
2541 struct thread_info *tp;
2542
2543 normal_stop ();
2544
2545 /* Finish off the continuations. */
2546 tp = inferior_thread ();
2547 do_all_intermediate_continuations_thread (tp, 1);
2548 do_all_continuations_thread (tp, 1);
2549 }
2550
2551 do_cleanups (old_chain);
2552 }
2553
2554 return 0;
2555 }
2556
2557 /* This function is attached as a "thread_stop_requested" observer.
2558 Cleanup local state that assumed the PTID was to be resumed, and
2559 report the stop to the frontend. */
2560
2561 static void
2562 infrun_thread_stop_requested (ptid_t ptid)
2563 {
2564 struct displaced_step_inferior_state *displaced;
2565
2566 /* PTID was requested to stop. Remove it from the displaced
2567 stepping queue, so we don't try to resume it automatically. */
2568
2569 for (displaced = displaced_step_inferior_states;
2570 displaced;
2571 displaced = displaced->next)
2572 {
2573 struct displaced_step_request *it, **prev_next_p;
2574
2575 it = displaced->step_request_queue;
2576 prev_next_p = &displaced->step_request_queue;
2577 while (it)
2578 {
2579 if (ptid_match (it->ptid, ptid))
2580 {
2581 *prev_next_p = it->next;
2582 it->next = NULL;
2583 xfree (it);
2584 }
2585 else
2586 {
2587 prev_next_p = &it->next;
2588 }
2589
2590 it = *prev_next_p;
2591 }
2592 }
2593
2594 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2595 }
2596
2597 static void
2598 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2599 {
2600 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2601 nullify_last_target_wait_ptid ();
2602 }
2603
2604 /* Callback for iterate_over_threads. */
2605
2606 static int
2607 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2608 {
2609 if (is_exited (info->ptid))
2610 return 0;
2611
2612 delete_step_resume_breakpoint (info);
2613 delete_exception_resume_breakpoint (info);
2614 return 0;
2615 }
2616
2617 /* In all-stop, delete the step resume breakpoint of any thread that
2618 had one. In non-stop, delete the step resume breakpoint of the
2619 thread that just stopped. */
2620
2621 static void
2622 delete_step_thread_step_resume_breakpoint (void)
2623 {
2624 if (!target_has_execution
2625 || ptid_equal (inferior_ptid, null_ptid))
2626 /* If the inferior has exited, we have already deleted the step
2627 resume breakpoints out of GDB's lists. */
2628 return;
2629
2630 if (non_stop)
2631 {
2632 /* If in non-stop mode, only delete the step-resume or
2633 longjmp-resume breakpoint of the thread that just stopped
2634 stepping. */
2635 struct thread_info *tp = inferior_thread ();
2636
2637 delete_step_resume_breakpoint (tp);
2638 delete_exception_resume_breakpoint (tp);
2639 }
2640 else
2641 /* In all-stop mode, delete all step-resume and longjmp-resume
2642 breakpoints of any thread that had them. */
2643 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2644 }
2645
2646 /* A cleanup wrapper. */
2647
2648 static void
2649 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2650 {
2651 delete_step_thread_step_resume_breakpoint ();
2652 }
2653
2654 /* Pretty print the results of target_wait, for debugging purposes. */
2655
2656 static void
2657 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2658 const struct target_waitstatus *ws)
2659 {
2660 char *status_string = target_waitstatus_to_string (ws);
2661 struct ui_file *tmp_stream = mem_fileopen ();
2662 char *text;
2663
2664 /* The text is split over several lines because it was getting too long.
2665 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2666 output as a unit; we want only one timestamp printed if debug_timestamp
2667 is set. */
2668
2669 fprintf_unfiltered (tmp_stream,
2670 "infrun: target_wait (%d", ptid_get_pid (waiton_ptid));
2671 if (ptid_get_pid (waiton_ptid) != -1)
2672 fprintf_unfiltered (tmp_stream,
2673 " [%s]", target_pid_to_str (waiton_ptid));
2674 fprintf_unfiltered (tmp_stream, ", status) =\n");
2675 fprintf_unfiltered (tmp_stream,
2676 "infrun: %d [%s],\n",
2677 ptid_get_pid (result_ptid),
2678 target_pid_to_str (result_ptid));
2679 fprintf_unfiltered (tmp_stream,
2680 "infrun: %s\n",
2681 status_string);
2682
2683 text = ui_file_xstrdup (tmp_stream, NULL);
2684
2685 /* This uses %s in part to handle %'s in the text, but also to avoid
2686 a gcc error: the format attribute requires a string literal. */
2687 fprintf_unfiltered (gdb_stdlog, "%s", text);
2688
2689 xfree (status_string);
2690 xfree (text);
2691 ui_file_delete (tmp_stream);
2692 }
2693
2694 /* Prepare and stabilize the inferior for detaching it. E.g.,
2695 detaching while a thread is displaced stepping is a recipe for
2696 crashing it, as nothing would readjust the PC out of the scratch
2697 pad. */
2698
2699 void
2700 prepare_for_detach (void)
2701 {
2702 struct inferior *inf = current_inferior ();
2703 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2704 struct cleanup *old_chain_1;
2705 struct displaced_step_inferior_state *displaced;
2706
2707 displaced = get_displaced_stepping_state (inf->pid);
2708
2709 /* Is any thread of this process displaced stepping? If not,
2710 there's nothing else to do. */
2711 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2712 return;
2713
2714 if (debug_infrun)
2715 fprintf_unfiltered (gdb_stdlog,
2716 "displaced-stepping in-process while detaching");
2717
2718 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2719 inf->detaching = 1;
2720
2721 while (!ptid_equal (displaced->step_ptid, null_ptid))
2722 {
2723 struct cleanup *old_chain_2;
2724 struct execution_control_state ecss;
2725 struct execution_control_state *ecs;
2726
2727 ecs = &ecss;
2728 memset (ecs, 0, sizeof (*ecs));
2729
2730 overlay_cache_invalid = 1;
2731 /* Flush target cache before starting to handle each event.
2732 Target was running and cache could be stale. This is just a
2733 heuristic. Running threads may modify target memory, but we
2734 don't get any event. */
2735 target_dcache_invalidate ();
2736
2737 if (deprecated_target_wait_hook)
2738 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2739 else
2740 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2741
2742 if (debug_infrun)
2743 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2744
2745 /* If an error happens while handling the event, propagate GDB's
2746 knowledge of the executing state to the frontend/user running
2747 state. */
2748 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2749 &minus_one_ptid);
2750
2751 /* Now figure out what to do with the result of the result. */
2752 handle_inferior_event (ecs);
2753
2754 /* No error, don't finish the state yet. */
2755 discard_cleanups (old_chain_2);
2756
2757 /* Breakpoints and watchpoints are not installed on the target
2758 at this point, and signals are passed directly to the
2759 inferior, so this must mean the process is gone. */
2760 if (!ecs->wait_some_more)
2761 {
2762 discard_cleanups (old_chain_1);
2763 error (_("Program exited while detaching"));
2764 }
2765 }
2766
2767 discard_cleanups (old_chain_1);
2768 }
2769
2770 /* Wait for control to return from inferior to debugger.
2771
2772 If inferior gets a signal, we may decide to start it up again
2773 instead of returning. That is why there is a loop in this function.
2774 When this function actually returns it means the inferior
2775 should be left stopped and GDB should read more commands. */
2776
2777 void
2778 wait_for_inferior (void)
2779 {
2780 struct cleanup *old_cleanups;
2781
2782 if (debug_infrun)
2783 fprintf_unfiltered
2784 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2785
2786 old_cleanups =
2787 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2788
2789 while (1)
2790 {
2791 struct execution_control_state ecss;
2792 struct execution_control_state *ecs = &ecss;
2793 struct cleanup *old_chain;
2794
2795 memset (ecs, 0, sizeof (*ecs));
2796
2797 overlay_cache_invalid = 1;
2798
2799 /* Flush target cache before starting to handle each event.
2800 Target was running and cache could be stale. This is just a
2801 heuristic. Running threads may modify target memory, but we
2802 don't get any event. */
2803 target_dcache_invalidate ();
2804
2805 if (deprecated_target_wait_hook)
2806 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2807 else
2808 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2809
2810 if (debug_infrun)
2811 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2812
2813 /* If an error happens while handling the event, propagate GDB's
2814 knowledge of the executing state to the frontend/user running
2815 state. */
2816 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2817
2818 /* Now figure out what to do with the result of the result. */
2819 handle_inferior_event (ecs);
2820
2821 /* No error, don't finish the state yet. */
2822 discard_cleanups (old_chain);
2823
2824 if (!ecs->wait_some_more)
2825 break;
2826 }
2827
2828 do_cleanups (old_cleanups);
2829 }
2830
2831 /* Asynchronous version of wait_for_inferior. It is called by the
2832 event loop whenever a change of state is detected on the file
2833 descriptor corresponding to the target. It can be called more than
2834 once to complete a single execution command. In such cases we need
2835 to keep the state in a global variable ECSS. If it is the last time
2836 that this function is called for a single execution command, then
2837 report to the user that the inferior has stopped, and do the
2838 necessary cleanups. */
2839
2840 void
2841 fetch_inferior_event (void *client_data)
2842 {
2843 struct execution_control_state ecss;
2844 struct execution_control_state *ecs = &ecss;
2845 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2846 struct cleanup *ts_old_chain;
2847 int was_sync = sync_execution;
2848 int cmd_done = 0;
2849
2850 memset (ecs, 0, sizeof (*ecs));
2851
2852 /* We're handling a live event, so make sure we're doing live
2853 debugging. If we're looking at traceframes while the target is
2854 running, we're going to need to get back to that mode after
2855 handling the event. */
2856 if (non_stop)
2857 {
2858 make_cleanup_restore_current_traceframe ();
2859 set_current_traceframe (-1);
2860 }
2861
2862 if (non_stop)
2863 /* In non-stop mode, the user/frontend should not notice a thread
2864 switch due to internal events. Make sure we reverse to the
2865 user selected thread and frame after handling the event and
2866 running any breakpoint commands. */
2867 make_cleanup_restore_current_thread ();
2868
2869 overlay_cache_invalid = 1;
2870 /* Flush target cache before starting to handle each event. Target
2871 was running and cache could be stale. This is just a heuristic.
2872 Running threads may modify target memory, but we don't get any
2873 event. */
2874 target_dcache_invalidate ();
2875
2876 make_cleanup_restore_integer (&execution_direction);
2877 execution_direction = target_execution_direction ();
2878
2879 if (deprecated_target_wait_hook)
2880 ecs->ptid =
2881 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2882 else
2883 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2884
2885 if (debug_infrun)
2886 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2887
2888 /* If an error happens while handling the event, propagate GDB's
2889 knowledge of the executing state to the frontend/user running
2890 state. */
2891 if (!non_stop)
2892 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2893 else
2894 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2895
2896 /* Get executed before make_cleanup_restore_current_thread above to apply
2897 still for the thread which has thrown the exception. */
2898 make_bpstat_clear_actions_cleanup ();
2899
2900 /* Now figure out what to do with the result of the result. */
2901 handle_inferior_event (ecs);
2902
2903 if (!ecs->wait_some_more)
2904 {
2905 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2906
2907 delete_step_thread_step_resume_breakpoint ();
2908
2909 /* We may not find an inferior if this was a process exit. */
2910 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2911 normal_stop ();
2912
2913 if (target_has_execution
2914 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2915 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2916 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2917 && ecs->event_thread->step_multi
2918 && ecs->event_thread->control.stop_step)
2919 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2920 else
2921 {
2922 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2923 cmd_done = 1;
2924 }
2925 }
2926
2927 /* No error, don't finish the thread states yet. */
2928 discard_cleanups (ts_old_chain);
2929
2930 /* Revert thread and frame. */
2931 do_cleanups (old_chain);
2932
2933 /* If the inferior was in sync execution mode, and now isn't,
2934 restore the prompt (a synchronous execution command has finished,
2935 and we're ready for input). */
2936 if (interpreter_async && was_sync && !sync_execution)
2937 observer_notify_sync_execution_done ();
2938
2939 if (cmd_done
2940 && !was_sync
2941 && exec_done_display_p
2942 && (ptid_equal (inferior_ptid, null_ptid)
2943 || !is_running (inferior_ptid)))
2944 printf_unfiltered (_("completed.\n"));
2945 }
2946
2947 /* Record the frame and location we're currently stepping through. */
2948 void
2949 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2950 {
2951 struct thread_info *tp = inferior_thread ();
2952
2953 tp->control.step_frame_id = get_frame_id (frame);
2954 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2955
2956 tp->current_symtab = sal.symtab;
2957 tp->current_line = sal.line;
2958 }
2959
2960 /* Clear context switchable stepping state. */
2961
2962 void
2963 init_thread_stepping_state (struct thread_info *tss)
2964 {
2965 tss->stepping_over_breakpoint = 0;
2966 tss->step_after_step_resume_breakpoint = 0;
2967 }
2968
2969 /* Set the cached copy of the last ptid/waitstatus. */
2970
2971 static void
2972 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
2973 {
2974 target_last_wait_ptid = ptid;
2975 target_last_waitstatus = status;
2976 }
2977
2978 /* Return the cached copy of the last pid/waitstatus returned by
2979 target_wait()/deprecated_target_wait_hook(). The data is actually
2980 cached by handle_inferior_event(), which gets called immediately
2981 after target_wait()/deprecated_target_wait_hook(). */
2982
2983 void
2984 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2985 {
2986 *ptidp = target_last_wait_ptid;
2987 *status = target_last_waitstatus;
2988 }
2989
2990 void
2991 nullify_last_target_wait_ptid (void)
2992 {
2993 target_last_wait_ptid = minus_one_ptid;
2994 }
2995
2996 /* Switch thread contexts. */
2997
2998 static void
2999 context_switch (ptid_t ptid)
3000 {
3001 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
3002 {
3003 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3004 target_pid_to_str (inferior_ptid));
3005 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3006 target_pid_to_str (ptid));
3007 }
3008
3009 switch_to_thread (ptid);
3010 }
3011
3012 static void
3013 adjust_pc_after_break (struct execution_control_state *ecs)
3014 {
3015 struct regcache *regcache;
3016 struct gdbarch *gdbarch;
3017 struct address_space *aspace;
3018 CORE_ADDR breakpoint_pc, decr_pc;
3019
3020 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3021 we aren't, just return.
3022
3023 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3024 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3025 implemented by software breakpoints should be handled through the normal
3026 breakpoint layer.
3027
3028 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3029 different signals (SIGILL or SIGEMT for instance), but it is less
3030 clear where the PC is pointing afterwards. It may not match
3031 gdbarch_decr_pc_after_break. I don't know any specific target that
3032 generates these signals at breakpoints (the code has been in GDB since at
3033 least 1992) so I can not guess how to handle them here.
3034
3035 In earlier versions of GDB, a target with
3036 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3037 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3038 target with both of these set in GDB history, and it seems unlikely to be
3039 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3040
3041 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3042 return;
3043
3044 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3045 return;
3046
3047 /* In reverse execution, when a breakpoint is hit, the instruction
3048 under it has already been de-executed. The reported PC always
3049 points at the breakpoint address, so adjusting it further would
3050 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3051 architecture:
3052
3053 B1 0x08000000 : INSN1
3054 B2 0x08000001 : INSN2
3055 0x08000002 : INSN3
3056 PC -> 0x08000003 : INSN4
3057
3058 Say you're stopped at 0x08000003 as above. Reverse continuing
3059 from that point should hit B2 as below. Reading the PC when the
3060 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3061 been de-executed already.
3062
3063 B1 0x08000000 : INSN1
3064 B2 PC -> 0x08000001 : INSN2
3065 0x08000002 : INSN3
3066 0x08000003 : INSN4
3067
3068 We can't apply the same logic as for forward execution, because
3069 we would wrongly adjust the PC to 0x08000000, since there's a
3070 breakpoint at PC - 1. We'd then report a hit on B1, although
3071 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3072 behaviour. */
3073 if (execution_direction == EXEC_REVERSE)
3074 return;
3075
3076 /* If this target does not decrement the PC after breakpoints, then
3077 we have nothing to do. */
3078 regcache = get_thread_regcache (ecs->ptid);
3079 gdbarch = get_regcache_arch (regcache);
3080
3081 decr_pc = target_decr_pc_after_break (gdbarch);
3082 if (decr_pc == 0)
3083 return;
3084
3085 aspace = get_regcache_aspace (regcache);
3086
3087 /* Find the location where (if we've hit a breakpoint) the
3088 breakpoint would be. */
3089 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3090
3091 /* Check whether there actually is a software breakpoint inserted at
3092 that location.
3093
3094 If in non-stop mode, a race condition is possible where we've
3095 removed a breakpoint, but stop events for that breakpoint were
3096 already queued and arrive later. To suppress those spurious
3097 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3098 and retire them after a number of stop events are reported. */
3099 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3100 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3101 {
3102 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3103
3104 if (record_full_is_used ())
3105 record_full_gdb_operation_disable_set ();
3106
3107 /* When using hardware single-step, a SIGTRAP is reported for both
3108 a completed single-step and a software breakpoint. Need to
3109 differentiate between the two, as the latter needs adjusting
3110 but the former does not.
3111
3112 The SIGTRAP can be due to a completed hardware single-step only if
3113 - we didn't insert software single-step breakpoints
3114 - the thread to be examined is still the current thread
3115 - this thread is currently being stepped
3116
3117 If any of these events did not occur, we must have stopped due
3118 to hitting a software breakpoint, and have to back up to the
3119 breakpoint address.
3120
3121 As a special case, we could have hardware single-stepped a
3122 software breakpoint. In this case (prev_pc == breakpoint_pc),
3123 we also need to back up to the breakpoint address. */
3124
3125 if (singlestep_breakpoints_inserted_p
3126 || !ptid_equal (ecs->ptid, inferior_ptid)
3127 || !currently_stepping (ecs->event_thread)
3128 || ecs->event_thread->prev_pc == breakpoint_pc)
3129 regcache_write_pc (regcache, breakpoint_pc);
3130
3131 do_cleanups (old_cleanups);
3132 }
3133 }
3134
3135 static void
3136 init_infwait_state (void)
3137 {
3138 waiton_ptid = pid_to_ptid (-1);
3139 infwait_state = infwait_normal_state;
3140 }
3141
3142 static int
3143 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3144 {
3145 for (frame = get_prev_frame (frame);
3146 frame != NULL;
3147 frame = get_prev_frame (frame))
3148 {
3149 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3150 return 1;
3151 if (get_frame_type (frame) != INLINE_FRAME)
3152 break;
3153 }
3154
3155 return 0;
3156 }
3157
3158 /* Auxiliary function that handles syscall entry/return events.
3159 It returns 1 if the inferior should keep going (and GDB
3160 should ignore the event), or 0 if the event deserves to be
3161 processed. */
3162
3163 static int
3164 handle_syscall_event (struct execution_control_state *ecs)
3165 {
3166 struct regcache *regcache;
3167 int syscall_number;
3168
3169 if (!ptid_equal (ecs->ptid, inferior_ptid))
3170 context_switch (ecs->ptid);
3171
3172 regcache = get_thread_regcache (ecs->ptid);
3173 syscall_number = ecs->ws.value.syscall_number;
3174 stop_pc = regcache_read_pc (regcache);
3175
3176 if (catch_syscall_enabled () > 0
3177 && catching_syscall_number (syscall_number) > 0)
3178 {
3179 if (debug_infrun)
3180 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3181 syscall_number);
3182
3183 ecs->event_thread->control.stop_bpstat
3184 = bpstat_stop_status (get_regcache_aspace (regcache),
3185 stop_pc, ecs->ptid, &ecs->ws);
3186
3187 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3188 {
3189 /* Catchpoint hit. */
3190 return 0;
3191 }
3192 }
3193
3194 /* If no catchpoint triggered for this, then keep going. */
3195 keep_going (ecs);
3196 return 1;
3197 }
3198
3199 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3200
3201 static void
3202 fill_in_stop_func (struct gdbarch *gdbarch,
3203 struct execution_control_state *ecs)
3204 {
3205 if (!ecs->stop_func_filled_in)
3206 {
3207 /* Don't care about return value; stop_func_start and stop_func_name
3208 will both be 0 if it doesn't work. */
3209 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3210 &ecs->stop_func_start, &ecs->stop_func_end);
3211 ecs->stop_func_start
3212 += gdbarch_deprecated_function_start_offset (gdbarch);
3213
3214 if (gdbarch_skip_entrypoint_p (gdbarch))
3215 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3216 ecs->stop_func_start);
3217
3218 ecs->stop_func_filled_in = 1;
3219 }
3220 }
3221
3222
3223 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3224
3225 static enum stop_kind
3226 get_inferior_stop_soon (ptid_t ptid)
3227 {
3228 struct inferior *inf = find_inferior_pid (ptid_get_pid (ptid));
3229
3230 gdb_assert (inf != NULL);
3231 return inf->control.stop_soon;
3232 }
3233
3234 /* Given an execution control state that has been freshly filled in by
3235 an event from the inferior, figure out what it means and take
3236 appropriate action.
3237
3238 The alternatives are:
3239
3240 1) stop_waiting and return; to really stop and return to the
3241 debugger.
3242
3243 2) keep_going and return; to wait for the next event (set
3244 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3245 once). */
3246
3247 static void
3248 handle_inferior_event (struct execution_control_state *ecs)
3249 {
3250 enum stop_kind stop_soon;
3251
3252 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3253 {
3254 /* We had an event in the inferior, but we are not interested in
3255 handling it at this level. The lower layers have already
3256 done what needs to be done, if anything.
3257
3258 One of the possible circumstances for this is when the
3259 inferior produces output for the console. The inferior has
3260 not stopped, and we are ignoring the event. Another possible
3261 circumstance is any event which the lower level knows will be
3262 reported multiple times without an intervening resume. */
3263 if (debug_infrun)
3264 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3265 prepare_to_wait (ecs);
3266 return;
3267 }
3268
3269 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3270 && target_can_async_p () && !sync_execution)
3271 {
3272 /* There were no unwaited-for children left in the target, but,
3273 we're not synchronously waiting for events either. Just
3274 ignore. Otherwise, if we were running a synchronous
3275 execution command, we need to cancel it and give the user
3276 back the terminal. */
3277 if (debug_infrun)
3278 fprintf_unfiltered (gdb_stdlog,
3279 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3280 prepare_to_wait (ecs);
3281 return;
3282 }
3283
3284 /* Cache the last pid/waitstatus. */
3285 set_last_target_status (ecs->ptid, ecs->ws);
3286
3287 /* Always clear state belonging to the previous time we stopped. */
3288 stop_stack_dummy = STOP_NONE;
3289
3290 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3291 {
3292 /* No unwaited-for children left. IOW, all resumed children
3293 have exited. */
3294 if (debug_infrun)
3295 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3296
3297 stop_print_frame = 0;
3298 stop_waiting (ecs);
3299 return;
3300 }
3301
3302 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3303 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3304 {
3305 ecs->event_thread = find_thread_ptid (ecs->ptid);
3306 /* If it's a new thread, add it to the thread database. */
3307 if (ecs->event_thread == NULL)
3308 ecs->event_thread = add_thread (ecs->ptid);
3309
3310 /* Disable range stepping. If the next step request could use a
3311 range, this will be end up re-enabled then. */
3312 ecs->event_thread->control.may_range_step = 0;
3313 }
3314
3315 /* Dependent on valid ECS->EVENT_THREAD. */
3316 adjust_pc_after_break (ecs);
3317
3318 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3319 reinit_frame_cache ();
3320
3321 breakpoint_retire_moribund ();
3322
3323 /* First, distinguish signals caused by the debugger from signals
3324 that have to do with the program's own actions. Note that
3325 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3326 on the operating system version. Here we detect when a SIGILL or
3327 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3328 something similar for SIGSEGV, since a SIGSEGV will be generated
3329 when we're trying to execute a breakpoint instruction on a
3330 non-executable stack. This happens for call dummy breakpoints
3331 for architectures like SPARC that place call dummies on the
3332 stack. */
3333 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3334 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3335 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3336 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3337 {
3338 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3339
3340 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3341 regcache_read_pc (regcache)))
3342 {
3343 if (debug_infrun)
3344 fprintf_unfiltered (gdb_stdlog,
3345 "infrun: Treating signal as SIGTRAP\n");
3346 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3347 }
3348 }
3349
3350 /* Mark the non-executing threads accordingly. In all-stop, all
3351 threads of all processes are stopped when we get any event
3352 reported. In non-stop mode, only the event thread stops. If
3353 we're handling a process exit in non-stop mode, there's nothing
3354 to do, as threads of the dead process are gone, and threads of
3355 any other process were left running. */
3356 if (!non_stop)
3357 set_executing (minus_one_ptid, 0);
3358 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3359 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3360 set_executing (ecs->ptid, 0);
3361
3362 switch (infwait_state)
3363 {
3364 case infwait_normal_state:
3365 if (debug_infrun)
3366 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3367 break;
3368
3369 case infwait_step_watch_state:
3370 if (debug_infrun)
3371 fprintf_unfiltered (gdb_stdlog,
3372 "infrun: infwait_step_watch_state\n");
3373
3374 ecs->stepped_after_stopped_by_watchpoint = 1;
3375 break;
3376
3377 case infwait_nonstep_watch_state:
3378 if (debug_infrun)
3379 fprintf_unfiltered (gdb_stdlog,
3380 "infrun: infwait_nonstep_watch_state\n");
3381 insert_breakpoints ();
3382
3383 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3384 handle things like signals arriving and other things happening
3385 in combination correctly? */
3386 ecs->stepped_after_stopped_by_watchpoint = 1;
3387 break;
3388
3389 default:
3390 internal_error (__FILE__, __LINE__, _("bad switch"));
3391 }
3392
3393 infwait_state = infwait_normal_state;
3394 waiton_ptid = pid_to_ptid (-1);
3395
3396 switch (ecs->ws.kind)
3397 {
3398 case TARGET_WAITKIND_LOADED:
3399 if (debug_infrun)
3400 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3401 if (!ptid_equal (ecs->ptid, inferior_ptid))
3402 context_switch (ecs->ptid);
3403 /* Ignore gracefully during startup of the inferior, as it might
3404 be the shell which has just loaded some objects, otherwise
3405 add the symbols for the newly loaded objects. Also ignore at
3406 the beginning of an attach or remote session; we will query
3407 the full list of libraries once the connection is
3408 established. */
3409
3410 stop_soon = get_inferior_stop_soon (ecs->ptid);
3411 if (stop_soon == NO_STOP_QUIETLY)
3412 {
3413 struct regcache *regcache;
3414
3415 regcache = get_thread_regcache (ecs->ptid);
3416
3417 handle_solib_event ();
3418
3419 ecs->event_thread->control.stop_bpstat
3420 = bpstat_stop_status (get_regcache_aspace (regcache),
3421 stop_pc, ecs->ptid, &ecs->ws);
3422
3423 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3424 {
3425 /* A catchpoint triggered. */
3426 process_event_stop_test (ecs);
3427 return;
3428 }
3429
3430 /* If requested, stop when the dynamic linker notifies
3431 gdb of events. This allows the user to get control
3432 and place breakpoints in initializer routines for
3433 dynamically loaded objects (among other things). */
3434 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3435 if (stop_on_solib_events)
3436 {
3437 /* Make sure we print "Stopped due to solib-event" in
3438 normal_stop. */
3439 stop_print_frame = 1;
3440
3441 stop_waiting (ecs);
3442 return;
3443 }
3444 }
3445
3446 /* If we are skipping through a shell, or through shared library
3447 loading that we aren't interested in, resume the program. If
3448 we're running the program normally, also resume. */
3449 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3450 {
3451 /* Loading of shared libraries might have changed breakpoint
3452 addresses. Make sure new breakpoints are inserted. */
3453 if (stop_soon == NO_STOP_QUIETLY
3454 && !breakpoints_always_inserted_mode ())
3455 insert_breakpoints ();
3456 resume (0, GDB_SIGNAL_0);
3457 prepare_to_wait (ecs);
3458 return;
3459 }
3460
3461 /* But stop if we're attaching or setting up a remote
3462 connection. */
3463 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3464 || stop_soon == STOP_QUIETLY_REMOTE)
3465 {
3466 if (debug_infrun)
3467 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3468 stop_waiting (ecs);
3469 return;
3470 }
3471
3472 internal_error (__FILE__, __LINE__,
3473 _("unhandled stop_soon: %d"), (int) stop_soon);
3474
3475 case TARGET_WAITKIND_SPURIOUS:
3476 if (debug_infrun)
3477 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3478 if (!ptid_equal (ecs->ptid, inferior_ptid))
3479 context_switch (ecs->ptid);
3480 resume (0, GDB_SIGNAL_0);
3481 prepare_to_wait (ecs);
3482 return;
3483
3484 case TARGET_WAITKIND_EXITED:
3485 case TARGET_WAITKIND_SIGNALLED:
3486 if (debug_infrun)
3487 {
3488 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3489 fprintf_unfiltered (gdb_stdlog,
3490 "infrun: TARGET_WAITKIND_EXITED\n");
3491 else
3492 fprintf_unfiltered (gdb_stdlog,
3493 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3494 }
3495
3496 inferior_ptid = ecs->ptid;
3497 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3498 set_current_program_space (current_inferior ()->pspace);
3499 handle_vfork_child_exec_or_exit (0);
3500 target_terminal_ours (); /* Must do this before mourn anyway. */
3501
3502 /* Clearing any previous state of convenience variables. */
3503 clear_exit_convenience_vars ();
3504
3505 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3506 {
3507 /* Record the exit code in the convenience variable $_exitcode, so
3508 that the user can inspect this again later. */
3509 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3510 (LONGEST) ecs->ws.value.integer);
3511
3512 /* Also record this in the inferior itself. */
3513 current_inferior ()->has_exit_code = 1;
3514 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3515
3516 /* Support the --return-child-result option. */
3517 return_child_result_value = ecs->ws.value.integer;
3518
3519 observer_notify_exited (ecs->ws.value.integer);
3520 }
3521 else
3522 {
3523 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3524 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3525
3526 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3527 {
3528 /* Set the value of the internal variable $_exitsignal,
3529 which holds the signal uncaught by the inferior. */
3530 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3531 gdbarch_gdb_signal_to_target (gdbarch,
3532 ecs->ws.value.sig));
3533 }
3534 else
3535 {
3536 /* We don't have access to the target's method used for
3537 converting between signal numbers (GDB's internal
3538 representation <-> target's representation).
3539 Therefore, we cannot do a good job at displaying this
3540 information to the user. It's better to just warn
3541 her about it (if infrun debugging is enabled), and
3542 give up. */
3543 if (debug_infrun)
3544 fprintf_filtered (gdb_stdlog, _("\
3545 Cannot fill $_exitsignal with the correct signal number.\n"));
3546 }
3547
3548 observer_notify_signal_exited (ecs->ws.value.sig);
3549 }
3550
3551 gdb_flush (gdb_stdout);
3552 target_mourn_inferior ();
3553 singlestep_breakpoints_inserted_p = 0;
3554 cancel_single_step_breakpoints ();
3555 stop_print_frame = 0;
3556 stop_waiting (ecs);
3557 return;
3558
3559 /* The following are the only cases in which we keep going;
3560 the above cases end in a continue or goto. */
3561 case TARGET_WAITKIND_FORKED:
3562 case TARGET_WAITKIND_VFORKED:
3563 if (debug_infrun)
3564 {
3565 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3566 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3567 else
3568 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3569 }
3570
3571 /* Check whether the inferior is displaced stepping. */
3572 {
3573 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3574 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3575 struct displaced_step_inferior_state *displaced
3576 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3577
3578 /* If checking displaced stepping is supported, and thread
3579 ecs->ptid is displaced stepping. */
3580 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3581 {
3582 struct inferior *parent_inf
3583 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3584 struct regcache *child_regcache;
3585 CORE_ADDR parent_pc;
3586
3587 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3588 indicating that the displaced stepping of syscall instruction
3589 has been done. Perform cleanup for parent process here. Note
3590 that this operation also cleans up the child process for vfork,
3591 because their pages are shared. */
3592 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
3593
3594 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3595 {
3596 /* Restore scratch pad for child process. */
3597 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3598 }
3599
3600 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3601 the child's PC is also within the scratchpad. Set the child's PC
3602 to the parent's PC value, which has already been fixed up.
3603 FIXME: we use the parent's aspace here, although we're touching
3604 the child, because the child hasn't been added to the inferior
3605 list yet at this point. */
3606
3607 child_regcache
3608 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3609 gdbarch,
3610 parent_inf->aspace);
3611 /* Read PC value of parent process. */
3612 parent_pc = regcache_read_pc (regcache);
3613
3614 if (debug_displaced)
3615 fprintf_unfiltered (gdb_stdlog,
3616 "displaced: write child pc from %s to %s\n",
3617 paddress (gdbarch,
3618 regcache_read_pc (child_regcache)),
3619 paddress (gdbarch, parent_pc));
3620
3621 regcache_write_pc (child_regcache, parent_pc);
3622 }
3623 }
3624
3625 if (!ptid_equal (ecs->ptid, inferior_ptid))
3626 context_switch (ecs->ptid);
3627
3628 /* Immediately detach breakpoints from the child before there's
3629 any chance of letting the user delete breakpoints from the
3630 breakpoint lists. If we don't do this early, it's easy to
3631 leave left over traps in the child, vis: "break foo; catch
3632 fork; c; <fork>; del; c; <child calls foo>". We only follow
3633 the fork on the last `continue', and by that time the
3634 breakpoint at "foo" is long gone from the breakpoint table.
3635 If we vforked, then we don't need to unpatch here, since both
3636 parent and child are sharing the same memory pages; we'll
3637 need to unpatch at follow/detach time instead to be certain
3638 that new breakpoints added between catchpoint hit time and
3639 vfork follow are detached. */
3640 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3641 {
3642 /* This won't actually modify the breakpoint list, but will
3643 physically remove the breakpoints from the child. */
3644 detach_breakpoints (ecs->ws.value.related_pid);
3645 }
3646
3647 if (singlestep_breakpoints_inserted_p)
3648 {
3649 /* Pull the single step breakpoints out of the target. */
3650 remove_single_step_breakpoints ();
3651 singlestep_breakpoints_inserted_p = 0;
3652 }
3653
3654 /* In case the event is caught by a catchpoint, remember that
3655 the event is to be followed at the next resume of the thread,
3656 and not immediately. */
3657 ecs->event_thread->pending_follow = ecs->ws;
3658
3659 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3660
3661 ecs->event_thread->control.stop_bpstat
3662 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3663 stop_pc, ecs->ptid, &ecs->ws);
3664
3665 /* If no catchpoint triggered for this, then keep going. Note
3666 that we're interested in knowing the bpstat actually causes a
3667 stop, not just if it may explain the signal. Software
3668 watchpoints, for example, always appear in the bpstat. */
3669 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3670 {
3671 ptid_t parent;
3672 ptid_t child;
3673 int should_resume;
3674 int follow_child
3675 = (follow_fork_mode_string == follow_fork_mode_child);
3676
3677 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3678
3679 should_resume = follow_fork ();
3680
3681 parent = ecs->ptid;
3682 child = ecs->ws.value.related_pid;
3683
3684 /* In non-stop mode, also resume the other branch. */
3685 if (non_stop && !detach_fork)
3686 {
3687 if (follow_child)
3688 switch_to_thread (parent);
3689 else
3690 switch_to_thread (child);
3691
3692 ecs->event_thread = inferior_thread ();
3693 ecs->ptid = inferior_ptid;
3694 keep_going (ecs);
3695 }
3696
3697 if (follow_child)
3698 switch_to_thread (child);
3699 else
3700 switch_to_thread (parent);
3701
3702 ecs->event_thread = inferior_thread ();
3703 ecs->ptid = inferior_ptid;
3704
3705 if (should_resume)
3706 keep_going (ecs);
3707 else
3708 stop_waiting (ecs);
3709 return;
3710 }
3711 process_event_stop_test (ecs);
3712 return;
3713
3714 case TARGET_WAITKIND_VFORK_DONE:
3715 /* Done with the shared memory region. Re-insert breakpoints in
3716 the parent, and keep going. */
3717
3718 if (debug_infrun)
3719 fprintf_unfiltered (gdb_stdlog,
3720 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3721
3722 if (!ptid_equal (ecs->ptid, inferior_ptid))
3723 context_switch (ecs->ptid);
3724
3725 current_inferior ()->waiting_for_vfork_done = 0;
3726 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3727 /* This also takes care of reinserting breakpoints in the
3728 previously locked inferior. */
3729 keep_going (ecs);
3730 return;
3731
3732 case TARGET_WAITKIND_EXECD:
3733 if (debug_infrun)
3734 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3735
3736 if (!ptid_equal (ecs->ptid, inferior_ptid))
3737 context_switch (ecs->ptid);
3738
3739 singlestep_breakpoints_inserted_p = 0;
3740 cancel_single_step_breakpoints ();
3741
3742 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3743
3744 /* Do whatever is necessary to the parent branch of the vfork. */
3745 handle_vfork_child_exec_or_exit (1);
3746
3747 /* This causes the eventpoints and symbol table to be reset.
3748 Must do this now, before trying to determine whether to
3749 stop. */
3750 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3751
3752 ecs->event_thread->control.stop_bpstat
3753 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3754 stop_pc, ecs->ptid, &ecs->ws);
3755
3756 /* Note that this may be referenced from inside
3757 bpstat_stop_status above, through inferior_has_execd. */
3758 xfree (ecs->ws.value.execd_pathname);
3759 ecs->ws.value.execd_pathname = NULL;
3760
3761 /* If no catchpoint triggered for this, then keep going. */
3762 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3763 {
3764 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3765 keep_going (ecs);
3766 return;
3767 }
3768 process_event_stop_test (ecs);
3769 return;
3770
3771 /* Be careful not to try to gather much state about a thread
3772 that's in a syscall. It's frequently a losing proposition. */
3773 case TARGET_WAITKIND_SYSCALL_ENTRY:
3774 if (debug_infrun)
3775 fprintf_unfiltered (gdb_stdlog,
3776 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3777 /* Getting the current syscall number. */
3778 if (handle_syscall_event (ecs) == 0)
3779 process_event_stop_test (ecs);
3780 return;
3781
3782 /* Before examining the threads further, step this thread to
3783 get it entirely out of the syscall. (We get notice of the
3784 event when the thread is just on the verge of exiting a
3785 syscall. Stepping one instruction seems to get it back
3786 into user code.) */
3787 case TARGET_WAITKIND_SYSCALL_RETURN:
3788 if (debug_infrun)
3789 fprintf_unfiltered (gdb_stdlog,
3790 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3791 if (handle_syscall_event (ecs) == 0)
3792 process_event_stop_test (ecs);
3793 return;
3794
3795 case TARGET_WAITKIND_STOPPED:
3796 if (debug_infrun)
3797 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3798 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3799 handle_signal_stop (ecs);
3800 return;
3801
3802 case TARGET_WAITKIND_NO_HISTORY:
3803 if (debug_infrun)
3804 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
3805 /* Reverse execution: target ran out of history info. */
3806
3807 /* Pull the single step breakpoints out of the target. */
3808 if (singlestep_breakpoints_inserted_p)
3809 {
3810 if (!ptid_equal (ecs->ptid, inferior_ptid))
3811 context_switch (ecs->ptid);
3812 remove_single_step_breakpoints ();
3813 singlestep_breakpoints_inserted_p = 0;
3814 }
3815 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3816 observer_notify_no_history ();
3817 stop_waiting (ecs);
3818 return;
3819 }
3820 }
3821
3822 /* Come here when the program has stopped with a signal. */
3823
3824 static void
3825 handle_signal_stop (struct execution_control_state *ecs)
3826 {
3827 struct frame_info *frame;
3828 struct gdbarch *gdbarch;
3829 int stopped_by_watchpoint;
3830 enum stop_kind stop_soon;
3831 int random_signal;
3832
3833 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
3834
3835 /* Do we need to clean up the state of a thread that has
3836 completed a displaced single-step? (Doing so usually affects
3837 the PC, so do it here, before we set stop_pc.) */
3838 displaced_step_fixup (ecs->ptid,
3839 ecs->event_thread->suspend.stop_signal);
3840
3841 /* If we either finished a single-step or hit a breakpoint, but
3842 the user wanted this thread to be stopped, pretend we got a
3843 SIG0 (generic unsignaled stop). */
3844 if (ecs->event_thread->stop_requested
3845 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3846 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3847
3848 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3849
3850 if (debug_infrun)
3851 {
3852 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3853 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3854 struct cleanup *old_chain = save_inferior_ptid ();
3855
3856 inferior_ptid = ecs->ptid;
3857
3858 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3859 paddress (gdbarch, stop_pc));
3860 if (target_stopped_by_watchpoint ())
3861 {
3862 CORE_ADDR addr;
3863
3864 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3865
3866 if (target_stopped_data_address (&current_target, &addr))
3867 fprintf_unfiltered (gdb_stdlog,
3868 "infrun: stopped data address = %s\n",
3869 paddress (gdbarch, addr));
3870 else
3871 fprintf_unfiltered (gdb_stdlog,
3872 "infrun: (no data address available)\n");
3873 }
3874
3875 do_cleanups (old_chain);
3876 }
3877
3878 /* This is originated from start_remote(), start_inferior() and
3879 shared libraries hook functions. */
3880 stop_soon = get_inferior_stop_soon (ecs->ptid);
3881 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3882 {
3883 if (!ptid_equal (ecs->ptid, inferior_ptid))
3884 context_switch (ecs->ptid);
3885 if (debug_infrun)
3886 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3887 stop_print_frame = 1;
3888 stop_waiting (ecs);
3889 return;
3890 }
3891
3892 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3893 && stop_after_trap)
3894 {
3895 if (!ptid_equal (ecs->ptid, inferior_ptid))
3896 context_switch (ecs->ptid);
3897 if (debug_infrun)
3898 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3899 stop_print_frame = 0;
3900 stop_waiting (ecs);
3901 return;
3902 }
3903
3904 /* This originates from attach_command(). We need to overwrite
3905 the stop_signal here, because some kernels don't ignore a
3906 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3907 See more comments in inferior.h. On the other hand, if we
3908 get a non-SIGSTOP, report it to the user - assume the backend
3909 will handle the SIGSTOP if it should show up later.
3910
3911 Also consider that the attach is complete when we see a
3912 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3913 target extended-remote report it instead of a SIGSTOP
3914 (e.g. gdbserver). We already rely on SIGTRAP being our
3915 signal, so this is no exception.
3916
3917 Also consider that the attach is complete when we see a
3918 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3919 the target to stop all threads of the inferior, in case the
3920 low level attach operation doesn't stop them implicitly. If
3921 they weren't stopped implicitly, then the stub will report a
3922 GDB_SIGNAL_0, meaning: stopped for no particular reason
3923 other than GDB's request. */
3924 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3925 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
3926 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3927 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
3928 {
3929 stop_print_frame = 1;
3930 stop_waiting (ecs);
3931 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3932 return;
3933 }
3934
3935 /* See if something interesting happened to the non-current thread. If
3936 so, then switch to that thread. */
3937 if (!ptid_equal (ecs->ptid, inferior_ptid))
3938 {
3939 if (debug_infrun)
3940 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3941
3942 context_switch (ecs->ptid);
3943
3944 if (deprecated_context_hook)
3945 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3946 }
3947
3948 /* At this point, get hold of the now-current thread's frame. */
3949 frame = get_current_frame ();
3950 gdbarch = get_frame_arch (frame);
3951
3952 /* Pull the single step breakpoints out of the target. */
3953 if (singlestep_breakpoints_inserted_p)
3954 {
3955 /* However, before doing so, if this single-step breakpoint was
3956 actually for another thread, set this thread up for moving
3957 past it. */
3958 if (!ptid_equal (ecs->ptid, singlestep_ptid)
3959 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3960 {
3961 struct regcache *regcache;
3962 struct address_space *aspace;
3963 CORE_ADDR pc;
3964
3965 regcache = get_thread_regcache (ecs->ptid);
3966 aspace = get_regcache_aspace (regcache);
3967 pc = regcache_read_pc (regcache);
3968 if (single_step_breakpoint_inserted_here_p (aspace, pc))
3969 {
3970 if (debug_infrun)
3971 {
3972 fprintf_unfiltered (gdb_stdlog,
3973 "infrun: [%s] hit step over single-step"
3974 " breakpoint of [%s]\n",
3975 target_pid_to_str (ecs->ptid),
3976 target_pid_to_str (singlestep_ptid));
3977 }
3978 ecs->hit_singlestep_breakpoint = 1;
3979 }
3980 }
3981
3982 remove_single_step_breakpoints ();
3983 singlestep_breakpoints_inserted_p = 0;
3984 }
3985
3986 if (ecs->stepped_after_stopped_by_watchpoint)
3987 stopped_by_watchpoint = 0;
3988 else
3989 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3990
3991 /* If necessary, step over this watchpoint. We'll be back to display
3992 it in a moment. */
3993 if (stopped_by_watchpoint
3994 && (target_have_steppable_watchpoint
3995 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3996 {
3997 /* At this point, we are stopped at an instruction which has
3998 attempted to write to a piece of memory under control of
3999 a watchpoint. The instruction hasn't actually executed
4000 yet. If we were to evaluate the watchpoint expression
4001 now, we would get the old value, and therefore no change
4002 would seem to have occurred.
4003
4004 In order to make watchpoints work `right', we really need
4005 to complete the memory write, and then evaluate the
4006 watchpoint expression. We do this by single-stepping the
4007 target.
4008
4009 It may not be necessary to disable the watchpoint to stop over
4010 it. For example, the PA can (with some kernel cooperation)
4011 single step over a watchpoint without disabling the watchpoint.
4012
4013 It is far more common to need to disable a watchpoint to step
4014 the inferior over it. If we have non-steppable watchpoints,
4015 we must disable the current watchpoint; it's simplest to
4016 disable all watchpoints and breakpoints. */
4017 int hw_step = 1;
4018
4019 if (!target_have_steppable_watchpoint)
4020 {
4021 remove_breakpoints ();
4022 /* See comment in resume why we need to stop bypassing signals
4023 while breakpoints have been removed. */
4024 target_pass_signals (0, NULL);
4025 }
4026 /* Single step */
4027 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4028 target_resume (ecs->ptid, hw_step, GDB_SIGNAL_0);
4029 waiton_ptid = ecs->ptid;
4030 if (target_have_steppable_watchpoint)
4031 infwait_state = infwait_step_watch_state;
4032 else
4033 infwait_state = infwait_nonstep_watch_state;
4034 prepare_to_wait (ecs);
4035 return;
4036 }
4037
4038 ecs->event_thread->stepping_over_breakpoint = 0;
4039 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4040 ecs->event_thread->control.stop_step = 0;
4041 stop_print_frame = 1;
4042 stopped_by_random_signal = 0;
4043
4044 /* Hide inlined functions starting here, unless we just performed stepi or
4045 nexti. After stepi and nexti, always show the innermost frame (not any
4046 inline function call sites). */
4047 if (ecs->event_thread->control.step_range_end != 1)
4048 {
4049 struct address_space *aspace =
4050 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4051
4052 /* skip_inline_frames is expensive, so we avoid it if we can
4053 determine that the address is one where functions cannot have
4054 been inlined. This improves performance with inferiors that
4055 load a lot of shared libraries, because the solib event
4056 breakpoint is defined as the address of a function (i.e. not
4057 inline). Note that we have to check the previous PC as well
4058 as the current one to catch cases when we have just
4059 single-stepped off a breakpoint prior to reinstating it.
4060 Note that we're assuming that the code we single-step to is
4061 not inline, but that's not definitive: there's nothing
4062 preventing the event breakpoint function from containing
4063 inlined code, and the single-step ending up there. If the
4064 user had set a breakpoint on that inlined code, the missing
4065 skip_inline_frames call would break things. Fortunately
4066 that's an extremely unlikely scenario. */
4067 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4068 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4069 && ecs->event_thread->control.trap_expected
4070 && pc_at_non_inline_function (aspace,
4071 ecs->event_thread->prev_pc,
4072 &ecs->ws)))
4073 {
4074 skip_inline_frames (ecs->ptid);
4075
4076 /* Re-fetch current thread's frame in case that invalidated
4077 the frame cache. */
4078 frame = get_current_frame ();
4079 gdbarch = get_frame_arch (frame);
4080 }
4081 }
4082
4083 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4084 && ecs->event_thread->control.trap_expected
4085 && gdbarch_single_step_through_delay_p (gdbarch)
4086 && currently_stepping (ecs->event_thread))
4087 {
4088 /* We're trying to step off a breakpoint. Turns out that we're
4089 also on an instruction that needs to be stepped multiple
4090 times before it's been fully executing. E.g., architectures
4091 with a delay slot. It needs to be stepped twice, once for
4092 the instruction and once for the delay slot. */
4093 int step_through_delay
4094 = gdbarch_single_step_through_delay (gdbarch, frame);
4095
4096 if (debug_infrun && step_through_delay)
4097 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4098 if (ecs->event_thread->control.step_range_end == 0
4099 && step_through_delay)
4100 {
4101 /* The user issued a continue when stopped at a breakpoint.
4102 Set up for another trap and get out of here. */
4103 ecs->event_thread->stepping_over_breakpoint = 1;
4104 keep_going (ecs);
4105 return;
4106 }
4107 else if (step_through_delay)
4108 {
4109 /* The user issued a step when stopped at a breakpoint.
4110 Maybe we should stop, maybe we should not - the delay
4111 slot *might* correspond to a line of source. In any
4112 case, don't decide that here, just set
4113 ecs->stepping_over_breakpoint, making sure we
4114 single-step again before breakpoints are re-inserted. */
4115 ecs->event_thread->stepping_over_breakpoint = 1;
4116 }
4117 }
4118
4119 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4120 handles this event. */
4121 ecs->event_thread->control.stop_bpstat
4122 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4123 stop_pc, ecs->ptid, &ecs->ws);
4124
4125 /* Following in case break condition called a
4126 function. */
4127 stop_print_frame = 1;
4128
4129 /* This is where we handle "moribund" watchpoints. Unlike
4130 software breakpoints traps, hardware watchpoint traps are
4131 always distinguishable from random traps. If no high-level
4132 watchpoint is associated with the reported stop data address
4133 anymore, then the bpstat does not explain the signal ---
4134 simply make sure to ignore it if `stopped_by_watchpoint' is
4135 set. */
4136
4137 if (debug_infrun
4138 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4139 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4140 GDB_SIGNAL_TRAP)
4141 && stopped_by_watchpoint)
4142 fprintf_unfiltered (gdb_stdlog,
4143 "infrun: no user watchpoint explains "
4144 "watchpoint SIGTRAP, ignoring\n");
4145
4146 /* NOTE: cagney/2003-03-29: These checks for a random signal
4147 at one stage in the past included checks for an inferior
4148 function call's call dummy's return breakpoint. The original
4149 comment, that went with the test, read:
4150
4151 ``End of a stack dummy. Some systems (e.g. Sony news) give
4152 another signal besides SIGTRAP, so check here as well as
4153 above.''
4154
4155 If someone ever tries to get call dummys on a
4156 non-executable stack to work (where the target would stop
4157 with something like a SIGSEGV), then those tests might need
4158 to be re-instated. Given, however, that the tests were only
4159 enabled when momentary breakpoints were not being used, I
4160 suspect that it won't be the case.
4161
4162 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4163 be necessary for call dummies on a non-executable stack on
4164 SPARC. */
4165
4166 /* See if the breakpoints module can explain the signal. */
4167 random_signal
4168 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4169 ecs->event_thread->suspend.stop_signal);
4170
4171 /* If not, perhaps stepping/nexting can. */
4172 if (random_signal)
4173 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4174 && currently_stepping (ecs->event_thread));
4175
4176 /* Perhaps the thread hit a single-step breakpoint of _another_
4177 thread. Single-step breakpoints are transparent to the
4178 breakpoints module. */
4179 if (random_signal)
4180 random_signal = !ecs->hit_singlestep_breakpoint;
4181
4182 /* No? Perhaps we got a moribund watchpoint. */
4183 if (random_signal)
4184 random_signal = !stopped_by_watchpoint;
4185
4186 /* For the program's own signals, act according to
4187 the signal handling tables. */
4188
4189 if (random_signal)
4190 {
4191 /* Signal not for debugging purposes. */
4192 int printed = 0;
4193 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4194 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4195
4196 if (debug_infrun)
4197 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4198 gdb_signal_to_symbol_string (stop_signal));
4199
4200 stopped_by_random_signal = 1;
4201
4202 if (signal_print[ecs->event_thread->suspend.stop_signal])
4203 {
4204 /* The signal table tells us to print about this signal. */
4205 printed = 1;
4206 target_terminal_ours_for_output ();
4207 observer_notify_signal_received (ecs->event_thread->suspend.stop_signal);
4208 }
4209 /* Always stop on signals if we're either just gaining control
4210 of the program, or the user explicitly requested this thread
4211 to remain stopped. */
4212 if (stop_soon != NO_STOP_QUIETLY
4213 || ecs->event_thread->stop_requested
4214 || (!inf->detaching
4215 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4216 {
4217 stop_waiting (ecs);
4218 return;
4219 }
4220 /* If not going to stop, give terminal back
4221 if we took it away. */
4222 else if (printed)
4223 target_terminal_inferior ();
4224
4225 /* Clear the signal if it should not be passed. */
4226 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4227 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4228
4229 if (ecs->event_thread->prev_pc == stop_pc
4230 && ecs->event_thread->control.trap_expected
4231 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4232 {
4233 /* We were just starting a new sequence, attempting to
4234 single-step off of a breakpoint and expecting a SIGTRAP.
4235 Instead this signal arrives. This signal will take us out
4236 of the stepping range so GDB needs to remember to, when
4237 the signal handler returns, resume stepping off that
4238 breakpoint. */
4239 /* To simplify things, "continue" is forced to use the same
4240 code paths as single-step - set a breakpoint at the
4241 signal return address and then, once hit, step off that
4242 breakpoint. */
4243 if (debug_infrun)
4244 fprintf_unfiltered (gdb_stdlog,
4245 "infrun: signal arrived while stepping over "
4246 "breakpoint\n");
4247
4248 insert_hp_step_resume_breakpoint_at_frame (frame);
4249 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4250 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4251 ecs->event_thread->control.trap_expected = 0;
4252
4253 /* If we were nexting/stepping some other thread, switch to
4254 it, so that we don't continue it, losing control. */
4255 if (!switch_back_to_stepped_thread (ecs))
4256 keep_going (ecs);
4257 return;
4258 }
4259
4260 if (ecs->event_thread->control.step_range_end != 0
4261 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4262 && pc_in_thread_step_range (stop_pc, ecs->event_thread)
4263 && frame_id_eq (get_stack_frame_id (frame),
4264 ecs->event_thread->control.step_stack_frame_id)
4265 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4266 {
4267 /* The inferior is about to take a signal that will take it
4268 out of the single step range. Set a breakpoint at the
4269 current PC (which is presumably where the signal handler
4270 will eventually return) and then allow the inferior to
4271 run free.
4272
4273 Note that this is only needed for a signal delivered
4274 while in the single-step range. Nested signals aren't a
4275 problem as they eventually all return. */
4276 if (debug_infrun)
4277 fprintf_unfiltered (gdb_stdlog,
4278 "infrun: signal may take us out of "
4279 "single-step range\n");
4280
4281 insert_hp_step_resume_breakpoint_at_frame (frame);
4282 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4283 ecs->event_thread->control.trap_expected = 0;
4284 keep_going (ecs);
4285 return;
4286 }
4287
4288 /* Note: step_resume_breakpoint may be non-NULL. This occures
4289 when either there's a nested signal, or when there's a
4290 pending signal enabled just as the signal handler returns
4291 (leaving the inferior at the step-resume-breakpoint without
4292 actually executing it). Either way continue until the
4293 breakpoint is really hit. */
4294
4295 if (!switch_back_to_stepped_thread (ecs))
4296 {
4297 if (debug_infrun)
4298 fprintf_unfiltered (gdb_stdlog,
4299 "infrun: random signal, keep going\n");
4300
4301 keep_going (ecs);
4302 }
4303 return;
4304 }
4305
4306 process_event_stop_test (ecs);
4307 }
4308
4309 /* Come here when we've got some debug event / signal we can explain
4310 (IOW, not a random signal), and test whether it should cause a
4311 stop, or whether we should resume the inferior (transparently).
4312 E.g., could be a breakpoint whose condition evaluates false; we
4313 could be still stepping within the line; etc. */
4314
4315 static void
4316 process_event_stop_test (struct execution_control_state *ecs)
4317 {
4318 struct symtab_and_line stop_pc_sal;
4319 struct frame_info *frame;
4320 struct gdbarch *gdbarch;
4321 CORE_ADDR jmp_buf_pc;
4322 struct bpstat_what what;
4323
4324 /* Handle cases caused by hitting a breakpoint. */
4325
4326 frame = get_current_frame ();
4327 gdbarch = get_frame_arch (frame);
4328
4329 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4330
4331 if (what.call_dummy)
4332 {
4333 stop_stack_dummy = what.call_dummy;
4334 }
4335
4336 /* If we hit an internal event that triggers symbol changes, the
4337 current frame will be invalidated within bpstat_what (e.g., if we
4338 hit an internal solib event). Re-fetch it. */
4339 frame = get_current_frame ();
4340 gdbarch = get_frame_arch (frame);
4341
4342 switch (what.main_action)
4343 {
4344 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4345 /* If we hit the breakpoint at longjmp while stepping, we
4346 install a momentary breakpoint at the target of the
4347 jmp_buf. */
4348
4349 if (debug_infrun)
4350 fprintf_unfiltered (gdb_stdlog,
4351 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4352
4353 ecs->event_thread->stepping_over_breakpoint = 1;
4354
4355 if (what.is_longjmp)
4356 {
4357 struct value *arg_value;
4358
4359 /* If we set the longjmp breakpoint via a SystemTap probe,
4360 then use it to extract the arguments. The destination PC
4361 is the third argument to the probe. */
4362 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4363 if (arg_value)
4364 jmp_buf_pc = value_as_address (arg_value);
4365 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4366 || !gdbarch_get_longjmp_target (gdbarch,
4367 frame, &jmp_buf_pc))
4368 {
4369 if (debug_infrun)
4370 fprintf_unfiltered (gdb_stdlog,
4371 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4372 "(!gdbarch_get_longjmp_target)\n");
4373 keep_going (ecs);
4374 return;
4375 }
4376
4377 /* Insert a breakpoint at resume address. */
4378 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4379 }
4380 else
4381 check_exception_resume (ecs, frame);
4382 keep_going (ecs);
4383 return;
4384
4385 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4386 {
4387 struct frame_info *init_frame;
4388
4389 /* There are several cases to consider.
4390
4391 1. The initiating frame no longer exists. In this case we
4392 must stop, because the exception or longjmp has gone too
4393 far.
4394
4395 2. The initiating frame exists, and is the same as the
4396 current frame. We stop, because the exception or longjmp
4397 has been caught.
4398
4399 3. The initiating frame exists and is different from the
4400 current frame. This means the exception or longjmp has
4401 been caught beneath the initiating frame, so keep going.
4402
4403 4. longjmp breakpoint has been placed just to protect
4404 against stale dummy frames and user is not interested in
4405 stopping around longjmps. */
4406
4407 if (debug_infrun)
4408 fprintf_unfiltered (gdb_stdlog,
4409 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4410
4411 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4412 != NULL);
4413 delete_exception_resume_breakpoint (ecs->event_thread);
4414
4415 if (what.is_longjmp)
4416 {
4417 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
4418
4419 if (!frame_id_p (ecs->event_thread->initiating_frame))
4420 {
4421 /* Case 4. */
4422 keep_going (ecs);
4423 return;
4424 }
4425 }
4426
4427 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4428
4429 if (init_frame)
4430 {
4431 struct frame_id current_id
4432 = get_frame_id (get_current_frame ());
4433 if (frame_id_eq (current_id,
4434 ecs->event_thread->initiating_frame))
4435 {
4436 /* Case 2. Fall through. */
4437 }
4438 else
4439 {
4440 /* Case 3. */
4441 keep_going (ecs);
4442 return;
4443 }
4444 }
4445
4446 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4447 exists. */
4448 delete_step_resume_breakpoint (ecs->event_thread);
4449
4450 end_stepping_range (ecs);
4451 }
4452 return;
4453
4454 case BPSTAT_WHAT_SINGLE:
4455 if (debug_infrun)
4456 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4457 ecs->event_thread->stepping_over_breakpoint = 1;
4458 /* Still need to check other stuff, at least the case where we
4459 are stepping and step out of the right range. */
4460 break;
4461
4462 case BPSTAT_WHAT_STEP_RESUME:
4463 if (debug_infrun)
4464 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4465
4466 delete_step_resume_breakpoint (ecs->event_thread);
4467 if (ecs->event_thread->control.proceed_to_finish
4468 && execution_direction == EXEC_REVERSE)
4469 {
4470 struct thread_info *tp = ecs->event_thread;
4471
4472 /* We are finishing a function in reverse, and just hit the
4473 step-resume breakpoint at the start address of the
4474 function, and we're almost there -- just need to back up
4475 by one more single-step, which should take us back to the
4476 function call. */
4477 tp->control.step_range_start = tp->control.step_range_end = 1;
4478 keep_going (ecs);
4479 return;
4480 }
4481 fill_in_stop_func (gdbarch, ecs);
4482 if (stop_pc == ecs->stop_func_start
4483 && execution_direction == EXEC_REVERSE)
4484 {
4485 /* We are stepping over a function call in reverse, and just
4486 hit the step-resume breakpoint at the start address of
4487 the function. Go back to single-stepping, which should
4488 take us back to the function call. */
4489 ecs->event_thread->stepping_over_breakpoint = 1;
4490 keep_going (ecs);
4491 return;
4492 }
4493 break;
4494
4495 case BPSTAT_WHAT_STOP_NOISY:
4496 if (debug_infrun)
4497 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4498 stop_print_frame = 1;
4499
4500 /* Assume the thread stopped for a breapoint. We'll still check
4501 whether a/the breakpoint is there when the thread is next
4502 resumed. */
4503 ecs->event_thread->stepping_over_breakpoint = 1;
4504
4505 stop_waiting (ecs);
4506 return;
4507
4508 case BPSTAT_WHAT_STOP_SILENT:
4509 if (debug_infrun)
4510 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4511 stop_print_frame = 0;
4512
4513 /* Assume the thread stopped for a breapoint. We'll still check
4514 whether a/the breakpoint is there when the thread is next
4515 resumed. */
4516 ecs->event_thread->stepping_over_breakpoint = 1;
4517 stop_waiting (ecs);
4518 return;
4519
4520 case BPSTAT_WHAT_HP_STEP_RESUME:
4521 if (debug_infrun)
4522 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4523
4524 delete_step_resume_breakpoint (ecs->event_thread);
4525 if (ecs->event_thread->step_after_step_resume_breakpoint)
4526 {
4527 /* Back when the step-resume breakpoint was inserted, we
4528 were trying to single-step off a breakpoint. Go back to
4529 doing that. */
4530 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4531 ecs->event_thread->stepping_over_breakpoint = 1;
4532 keep_going (ecs);
4533 return;
4534 }
4535 break;
4536
4537 case BPSTAT_WHAT_KEEP_CHECKING:
4538 break;
4539 }
4540
4541 /* We come here if we hit a breakpoint but should not stop for it.
4542 Possibly we also were stepping and should stop for that. So fall
4543 through and test for stepping. But, if not stepping, do not
4544 stop. */
4545
4546 /* In all-stop mode, if we're currently stepping but have stopped in
4547 some other thread, we need to switch back to the stepped thread. */
4548 if (switch_back_to_stepped_thread (ecs))
4549 return;
4550
4551 if (ecs->event_thread->control.step_resume_breakpoint)
4552 {
4553 if (debug_infrun)
4554 fprintf_unfiltered (gdb_stdlog,
4555 "infrun: step-resume breakpoint is inserted\n");
4556
4557 /* Having a step-resume breakpoint overrides anything
4558 else having to do with stepping commands until
4559 that breakpoint is reached. */
4560 keep_going (ecs);
4561 return;
4562 }
4563
4564 if (ecs->event_thread->control.step_range_end == 0)
4565 {
4566 if (debug_infrun)
4567 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4568 /* Likewise if we aren't even stepping. */
4569 keep_going (ecs);
4570 return;
4571 }
4572
4573 /* Re-fetch current thread's frame in case the code above caused
4574 the frame cache to be re-initialized, making our FRAME variable
4575 a dangling pointer. */
4576 frame = get_current_frame ();
4577 gdbarch = get_frame_arch (frame);
4578 fill_in_stop_func (gdbarch, ecs);
4579
4580 /* If stepping through a line, keep going if still within it.
4581
4582 Note that step_range_end is the address of the first instruction
4583 beyond the step range, and NOT the address of the last instruction
4584 within it!
4585
4586 Note also that during reverse execution, we may be stepping
4587 through a function epilogue and therefore must detect when
4588 the current-frame changes in the middle of a line. */
4589
4590 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4591 && (execution_direction != EXEC_REVERSE
4592 || frame_id_eq (get_frame_id (frame),
4593 ecs->event_thread->control.step_frame_id)))
4594 {
4595 if (debug_infrun)
4596 fprintf_unfiltered
4597 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4598 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4599 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4600
4601 /* Tentatively re-enable range stepping; `resume' disables it if
4602 necessary (e.g., if we're stepping over a breakpoint or we
4603 have software watchpoints). */
4604 ecs->event_thread->control.may_range_step = 1;
4605
4606 /* When stepping backward, stop at beginning of line range
4607 (unless it's the function entry point, in which case
4608 keep going back to the call point). */
4609 if (stop_pc == ecs->event_thread->control.step_range_start
4610 && stop_pc != ecs->stop_func_start
4611 && execution_direction == EXEC_REVERSE)
4612 end_stepping_range (ecs);
4613 else
4614 keep_going (ecs);
4615
4616 return;
4617 }
4618
4619 /* We stepped out of the stepping range. */
4620
4621 /* If we are stepping at the source level and entered the runtime
4622 loader dynamic symbol resolution code...
4623
4624 EXEC_FORWARD: we keep on single stepping until we exit the run
4625 time loader code and reach the callee's address.
4626
4627 EXEC_REVERSE: we've already executed the callee (backward), and
4628 the runtime loader code is handled just like any other
4629 undebuggable function call. Now we need only keep stepping
4630 backward through the trampoline code, and that's handled further
4631 down, so there is nothing for us to do here. */
4632
4633 if (execution_direction != EXEC_REVERSE
4634 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4635 && in_solib_dynsym_resolve_code (stop_pc))
4636 {
4637 CORE_ADDR pc_after_resolver =
4638 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4639
4640 if (debug_infrun)
4641 fprintf_unfiltered (gdb_stdlog,
4642 "infrun: stepped into dynsym resolve code\n");
4643
4644 if (pc_after_resolver)
4645 {
4646 /* Set up a step-resume breakpoint at the address
4647 indicated by SKIP_SOLIB_RESOLVER. */
4648 struct symtab_and_line sr_sal;
4649
4650 init_sal (&sr_sal);
4651 sr_sal.pc = pc_after_resolver;
4652 sr_sal.pspace = get_frame_program_space (frame);
4653
4654 insert_step_resume_breakpoint_at_sal (gdbarch,
4655 sr_sal, null_frame_id);
4656 }
4657
4658 keep_going (ecs);
4659 return;
4660 }
4661
4662 if (ecs->event_thread->control.step_range_end != 1
4663 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4664 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4665 && get_frame_type (frame) == SIGTRAMP_FRAME)
4666 {
4667 if (debug_infrun)
4668 fprintf_unfiltered (gdb_stdlog,
4669 "infrun: stepped into signal trampoline\n");
4670 /* The inferior, while doing a "step" or "next", has ended up in
4671 a signal trampoline (either by a signal being delivered or by
4672 the signal handler returning). Just single-step until the
4673 inferior leaves the trampoline (either by calling the handler
4674 or returning). */
4675 keep_going (ecs);
4676 return;
4677 }
4678
4679 /* If we're in the return path from a shared library trampoline,
4680 we want to proceed through the trampoline when stepping. */
4681 /* macro/2012-04-25: This needs to come before the subroutine
4682 call check below as on some targets return trampolines look
4683 like subroutine calls (MIPS16 return thunks). */
4684 if (gdbarch_in_solib_return_trampoline (gdbarch,
4685 stop_pc, ecs->stop_func_name)
4686 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4687 {
4688 /* Determine where this trampoline returns. */
4689 CORE_ADDR real_stop_pc;
4690
4691 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4692
4693 if (debug_infrun)
4694 fprintf_unfiltered (gdb_stdlog,
4695 "infrun: stepped into solib return tramp\n");
4696
4697 /* Only proceed through if we know where it's going. */
4698 if (real_stop_pc)
4699 {
4700 /* And put the step-breakpoint there and go until there. */
4701 struct symtab_and_line sr_sal;
4702
4703 init_sal (&sr_sal); /* initialize to zeroes */
4704 sr_sal.pc = real_stop_pc;
4705 sr_sal.section = find_pc_overlay (sr_sal.pc);
4706 sr_sal.pspace = get_frame_program_space (frame);
4707
4708 /* Do not specify what the fp should be when we stop since
4709 on some machines the prologue is where the new fp value
4710 is established. */
4711 insert_step_resume_breakpoint_at_sal (gdbarch,
4712 sr_sal, null_frame_id);
4713
4714 /* Restart without fiddling with the step ranges or
4715 other state. */
4716 keep_going (ecs);
4717 return;
4718 }
4719 }
4720
4721 /* Check for subroutine calls. The check for the current frame
4722 equalling the step ID is not necessary - the check of the
4723 previous frame's ID is sufficient - but it is a common case and
4724 cheaper than checking the previous frame's ID.
4725
4726 NOTE: frame_id_eq will never report two invalid frame IDs as
4727 being equal, so to get into this block, both the current and
4728 previous frame must have valid frame IDs. */
4729 /* The outer_frame_id check is a heuristic to detect stepping
4730 through startup code. If we step over an instruction which
4731 sets the stack pointer from an invalid value to a valid value,
4732 we may detect that as a subroutine call from the mythical
4733 "outermost" function. This could be fixed by marking
4734 outermost frames as !stack_p,code_p,special_p. Then the
4735 initial outermost frame, before sp was valid, would
4736 have code_addr == &_start. See the comment in frame_id_eq
4737 for more. */
4738 if (!frame_id_eq (get_stack_frame_id (frame),
4739 ecs->event_thread->control.step_stack_frame_id)
4740 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4741 ecs->event_thread->control.step_stack_frame_id)
4742 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4743 outer_frame_id)
4744 || step_start_function != find_pc_function (stop_pc))))
4745 {
4746 CORE_ADDR real_stop_pc;
4747
4748 if (debug_infrun)
4749 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4750
4751 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4752 || ((ecs->event_thread->control.step_range_end == 1)
4753 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4754 ecs->stop_func_start)))
4755 {
4756 /* I presume that step_over_calls is only 0 when we're
4757 supposed to be stepping at the assembly language level
4758 ("stepi"). Just stop. */
4759 /* Also, maybe we just did a "nexti" inside a prolog, so we
4760 thought it was a subroutine call but it was not. Stop as
4761 well. FENN */
4762 /* And this works the same backward as frontward. MVS */
4763 end_stepping_range (ecs);
4764 return;
4765 }
4766
4767 /* Reverse stepping through solib trampolines. */
4768
4769 if (execution_direction == EXEC_REVERSE
4770 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4771 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4772 || (ecs->stop_func_start == 0
4773 && in_solib_dynsym_resolve_code (stop_pc))))
4774 {
4775 /* Any solib trampoline code can be handled in reverse
4776 by simply continuing to single-step. We have already
4777 executed the solib function (backwards), and a few
4778 steps will take us back through the trampoline to the
4779 caller. */
4780 keep_going (ecs);
4781 return;
4782 }
4783
4784 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4785 {
4786 /* We're doing a "next".
4787
4788 Normal (forward) execution: set a breakpoint at the
4789 callee's return address (the address at which the caller
4790 will resume).
4791
4792 Reverse (backward) execution. set the step-resume
4793 breakpoint at the start of the function that we just
4794 stepped into (backwards), and continue to there. When we
4795 get there, we'll need to single-step back to the caller. */
4796
4797 if (execution_direction == EXEC_REVERSE)
4798 {
4799 /* If we're already at the start of the function, we've either
4800 just stepped backward into a single instruction function,
4801 or stepped back out of a signal handler to the first instruction
4802 of the function. Just keep going, which will single-step back
4803 to the caller. */
4804 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
4805 {
4806 struct symtab_and_line sr_sal;
4807
4808 /* Normal function call return (static or dynamic). */
4809 init_sal (&sr_sal);
4810 sr_sal.pc = ecs->stop_func_start;
4811 sr_sal.pspace = get_frame_program_space (frame);
4812 insert_step_resume_breakpoint_at_sal (gdbarch,
4813 sr_sal, null_frame_id);
4814 }
4815 }
4816 else
4817 insert_step_resume_breakpoint_at_caller (frame);
4818
4819 keep_going (ecs);
4820 return;
4821 }
4822
4823 /* If we are in a function call trampoline (a stub between the
4824 calling routine and the real function), locate the real
4825 function. That's what tells us (a) whether we want to step
4826 into it at all, and (b) what prologue we want to run to the
4827 end of, if we do step into it. */
4828 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4829 if (real_stop_pc == 0)
4830 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4831 if (real_stop_pc != 0)
4832 ecs->stop_func_start = real_stop_pc;
4833
4834 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4835 {
4836 struct symtab_and_line sr_sal;
4837
4838 init_sal (&sr_sal);
4839 sr_sal.pc = ecs->stop_func_start;
4840 sr_sal.pspace = get_frame_program_space (frame);
4841
4842 insert_step_resume_breakpoint_at_sal (gdbarch,
4843 sr_sal, null_frame_id);
4844 keep_going (ecs);
4845 return;
4846 }
4847
4848 /* If we have line number information for the function we are
4849 thinking of stepping into and the function isn't on the skip
4850 list, step into it.
4851
4852 If there are several symtabs at that PC (e.g. with include
4853 files), just want to know whether *any* of them have line
4854 numbers. find_pc_line handles this. */
4855 {
4856 struct symtab_and_line tmp_sal;
4857
4858 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4859 if (tmp_sal.line != 0
4860 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4861 &tmp_sal))
4862 {
4863 if (execution_direction == EXEC_REVERSE)
4864 handle_step_into_function_backward (gdbarch, ecs);
4865 else
4866 handle_step_into_function (gdbarch, ecs);
4867 return;
4868 }
4869 }
4870
4871 /* If we have no line number and the step-stop-if-no-debug is
4872 set, we stop the step so that the user has a chance to switch
4873 in assembly mode. */
4874 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4875 && step_stop_if_no_debug)
4876 {
4877 end_stepping_range (ecs);
4878 return;
4879 }
4880
4881 if (execution_direction == EXEC_REVERSE)
4882 {
4883 /* If we're already at the start of the function, we've either just
4884 stepped backward into a single instruction function without line
4885 number info, or stepped back out of a signal handler to the first
4886 instruction of the function without line number info. Just keep
4887 going, which will single-step back to the caller. */
4888 if (ecs->stop_func_start != stop_pc)
4889 {
4890 /* Set a breakpoint at callee's start address.
4891 From there we can step once and be back in the caller. */
4892 struct symtab_and_line sr_sal;
4893
4894 init_sal (&sr_sal);
4895 sr_sal.pc = ecs->stop_func_start;
4896 sr_sal.pspace = get_frame_program_space (frame);
4897 insert_step_resume_breakpoint_at_sal (gdbarch,
4898 sr_sal, null_frame_id);
4899 }
4900 }
4901 else
4902 /* Set a breakpoint at callee's return address (the address
4903 at which the caller will resume). */
4904 insert_step_resume_breakpoint_at_caller (frame);
4905
4906 keep_going (ecs);
4907 return;
4908 }
4909
4910 /* Reverse stepping through solib trampolines. */
4911
4912 if (execution_direction == EXEC_REVERSE
4913 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4914 {
4915 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4916 || (ecs->stop_func_start == 0
4917 && in_solib_dynsym_resolve_code (stop_pc)))
4918 {
4919 /* Any solib trampoline code can be handled in reverse
4920 by simply continuing to single-step. We have already
4921 executed the solib function (backwards), and a few
4922 steps will take us back through the trampoline to the
4923 caller. */
4924 keep_going (ecs);
4925 return;
4926 }
4927 else if (in_solib_dynsym_resolve_code (stop_pc))
4928 {
4929 /* Stepped backward into the solib dynsym resolver.
4930 Set a breakpoint at its start and continue, then
4931 one more step will take us out. */
4932 struct symtab_and_line sr_sal;
4933
4934 init_sal (&sr_sal);
4935 sr_sal.pc = ecs->stop_func_start;
4936 sr_sal.pspace = get_frame_program_space (frame);
4937 insert_step_resume_breakpoint_at_sal (gdbarch,
4938 sr_sal, null_frame_id);
4939 keep_going (ecs);
4940 return;
4941 }
4942 }
4943
4944 stop_pc_sal = find_pc_line (stop_pc, 0);
4945
4946 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4947 the trampoline processing logic, however, there are some trampolines
4948 that have no names, so we should do trampoline handling first. */
4949 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4950 && ecs->stop_func_name == NULL
4951 && stop_pc_sal.line == 0)
4952 {
4953 if (debug_infrun)
4954 fprintf_unfiltered (gdb_stdlog,
4955 "infrun: stepped into undebuggable function\n");
4956
4957 /* The inferior just stepped into, or returned to, an
4958 undebuggable function (where there is no debugging information
4959 and no line number corresponding to the address where the
4960 inferior stopped). Since we want to skip this kind of code,
4961 we keep going until the inferior returns from this
4962 function - unless the user has asked us not to (via
4963 set step-mode) or we no longer know how to get back
4964 to the call site. */
4965 if (step_stop_if_no_debug
4966 || !frame_id_p (frame_unwind_caller_id (frame)))
4967 {
4968 /* If we have no line number and the step-stop-if-no-debug
4969 is set, we stop the step so that the user has a chance to
4970 switch in assembly mode. */
4971 end_stepping_range (ecs);
4972 return;
4973 }
4974 else
4975 {
4976 /* Set a breakpoint at callee's return address (the address
4977 at which the caller will resume). */
4978 insert_step_resume_breakpoint_at_caller (frame);
4979 keep_going (ecs);
4980 return;
4981 }
4982 }
4983
4984 if (ecs->event_thread->control.step_range_end == 1)
4985 {
4986 /* It is stepi or nexti. We always want to stop stepping after
4987 one instruction. */
4988 if (debug_infrun)
4989 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4990 end_stepping_range (ecs);
4991 return;
4992 }
4993
4994 if (stop_pc_sal.line == 0)
4995 {
4996 /* We have no line number information. That means to stop
4997 stepping (does this always happen right after one instruction,
4998 when we do "s" in a function with no line numbers,
4999 or can this happen as a result of a return or longjmp?). */
5000 if (debug_infrun)
5001 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5002 end_stepping_range (ecs);
5003 return;
5004 }
5005
5006 /* Look for "calls" to inlined functions, part one. If the inline
5007 frame machinery detected some skipped call sites, we have entered
5008 a new inline function. */
5009
5010 if (frame_id_eq (get_frame_id (get_current_frame ()),
5011 ecs->event_thread->control.step_frame_id)
5012 && inline_skipped_frames (ecs->ptid))
5013 {
5014 struct symtab_and_line call_sal;
5015
5016 if (debug_infrun)
5017 fprintf_unfiltered (gdb_stdlog,
5018 "infrun: stepped into inlined function\n");
5019
5020 find_frame_sal (get_current_frame (), &call_sal);
5021
5022 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5023 {
5024 /* For "step", we're going to stop. But if the call site
5025 for this inlined function is on the same source line as
5026 we were previously stepping, go down into the function
5027 first. Otherwise stop at the call site. */
5028
5029 if (call_sal.line == ecs->event_thread->current_line
5030 && call_sal.symtab == ecs->event_thread->current_symtab)
5031 step_into_inline_frame (ecs->ptid);
5032
5033 end_stepping_range (ecs);
5034 return;
5035 }
5036 else
5037 {
5038 /* For "next", we should stop at the call site if it is on a
5039 different source line. Otherwise continue through the
5040 inlined function. */
5041 if (call_sal.line == ecs->event_thread->current_line
5042 && call_sal.symtab == ecs->event_thread->current_symtab)
5043 keep_going (ecs);
5044 else
5045 end_stepping_range (ecs);
5046 return;
5047 }
5048 }
5049
5050 /* Look for "calls" to inlined functions, part two. If we are still
5051 in the same real function we were stepping through, but we have
5052 to go further up to find the exact frame ID, we are stepping
5053 through a more inlined call beyond its call site. */
5054
5055 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5056 && !frame_id_eq (get_frame_id (get_current_frame ()),
5057 ecs->event_thread->control.step_frame_id)
5058 && stepped_in_from (get_current_frame (),
5059 ecs->event_thread->control.step_frame_id))
5060 {
5061 if (debug_infrun)
5062 fprintf_unfiltered (gdb_stdlog,
5063 "infrun: stepping through inlined function\n");
5064
5065 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5066 keep_going (ecs);
5067 else
5068 end_stepping_range (ecs);
5069 return;
5070 }
5071
5072 if ((stop_pc == stop_pc_sal.pc)
5073 && (ecs->event_thread->current_line != stop_pc_sal.line
5074 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5075 {
5076 /* We are at the start of a different line. So stop. Note that
5077 we don't stop if we step into the middle of a different line.
5078 That is said to make things like for (;;) statements work
5079 better. */
5080 if (debug_infrun)
5081 fprintf_unfiltered (gdb_stdlog,
5082 "infrun: stepped to a different line\n");
5083 end_stepping_range (ecs);
5084 return;
5085 }
5086
5087 /* We aren't done stepping.
5088
5089 Optimize by setting the stepping range to the line.
5090 (We might not be in the original line, but if we entered a
5091 new line in mid-statement, we continue stepping. This makes
5092 things like for(;;) statements work better.) */
5093
5094 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5095 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5096 ecs->event_thread->control.may_range_step = 1;
5097 set_step_info (frame, stop_pc_sal);
5098
5099 if (debug_infrun)
5100 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5101 keep_going (ecs);
5102 }
5103
5104 /* In all-stop mode, if we're currently stepping but have stopped in
5105 some other thread, we may need to switch back to the stepped
5106 thread. Returns true we set the inferior running, false if we left
5107 it stopped (and the event needs further processing). */
5108
5109 static int
5110 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5111 {
5112 if (!non_stop)
5113 {
5114 struct thread_info *tp;
5115 struct thread_info *stepping_thread;
5116 struct thread_info *step_over;
5117
5118 /* If any thread is blocked on some internal breakpoint, and we
5119 simply need to step over that breakpoint to get it going
5120 again, do that first. */
5121
5122 /* However, if we see an event for the stepping thread, then we
5123 know all other threads have been moved past their breakpoints
5124 already. Let the caller check whether the step is finished,
5125 etc., before deciding to move it past a breakpoint. */
5126 if (ecs->event_thread->control.step_range_end != 0)
5127 return 0;
5128
5129 /* Check if the current thread is blocked on an incomplete
5130 step-over, interrupted by a random signal. */
5131 if (ecs->event_thread->control.trap_expected
5132 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5133 {
5134 if (debug_infrun)
5135 {
5136 fprintf_unfiltered (gdb_stdlog,
5137 "infrun: need to finish step-over of [%s]\n",
5138 target_pid_to_str (ecs->event_thread->ptid));
5139 }
5140 keep_going (ecs);
5141 return 1;
5142 }
5143
5144 /* Check if the current thread is blocked by a single-step
5145 breakpoint of another thread. */
5146 if (ecs->hit_singlestep_breakpoint)
5147 {
5148 if (debug_infrun)
5149 {
5150 fprintf_unfiltered (gdb_stdlog,
5151 "infrun: need to step [%s] over single-step "
5152 "breakpoint\n",
5153 target_pid_to_str (ecs->ptid));
5154 }
5155 keep_going (ecs);
5156 return 1;
5157 }
5158
5159 /* Otherwise, we no longer expect a trap in the current thread.
5160 Clear the trap_expected flag before switching back -- this is
5161 what keep_going does as well, if we call it. */
5162 ecs->event_thread->control.trap_expected = 0;
5163
5164 /* Likewise, clear the signal if it should not be passed. */
5165 if (!signal_program[ecs->event_thread->suspend.stop_signal])
5166 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5167
5168 /* If scheduler locking applies even if not stepping, there's no
5169 need to walk over threads. Above we've checked whether the
5170 current thread is stepping. If some other thread not the
5171 event thread is stepping, then it must be that scheduler
5172 locking is not in effect. */
5173 if (schedlock_applies (0))
5174 return 0;
5175
5176 /* Look for the stepping/nexting thread, and check if any other
5177 thread other than the stepping thread needs to start a
5178 step-over. Do all step-overs before actually proceeding with
5179 step/next/etc. */
5180 stepping_thread = NULL;
5181 step_over = NULL;
5182 ALL_NON_EXITED_THREADS (tp)
5183 {
5184 /* Ignore threads of processes we're not resuming. */
5185 if (!sched_multi
5186 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
5187 continue;
5188
5189 /* When stepping over a breakpoint, we lock all threads
5190 except the one that needs to move past the breakpoint.
5191 If a non-event thread has this set, the "incomplete
5192 step-over" check above should have caught it earlier. */
5193 gdb_assert (!tp->control.trap_expected);
5194
5195 /* Did we find the stepping thread? */
5196 if (tp->control.step_range_end)
5197 {
5198 /* Yep. There should only one though. */
5199 gdb_assert (stepping_thread == NULL);
5200
5201 /* The event thread is handled at the top, before we
5202 enter this loop. */
5203 gdb_assert (tp != ecs->event_thread);
5204
5205 /* If some thread other than the event thread is
5206 stepping, then scheduler locking can't be in effect,
5207 otherwise we wouldn't have resumed the current event
5208 thread in the first place. */
5209 gdb_assert (!schedlock_applies (1));
5210
5211 stepping_thread = tp;
5212 }
5213 else if (thread_still_needs_step_over (tp))
5214 {
5215 step_over = tp;
5216
5217 /* At the top we've returned early if the event thread
5218 is stepping. If some other thread not the event
5219 thread is stepping, then scheduler locking can't be
5220 in effect, and we can resume this thread. No need to
5221 keep looking for the stepping thread then. */
5222 break;
5223 }
5224 }
5225
5226 if (step_over != NULL)
5227 {
5228 tp = step_over;
5229 if (debug_infrun)
5230 {
5231 fprintf_unfiltered (gdb_stdlog,
5232 "infrun: need to step-over [%s]\n",
5233 target_pid_to_str (tp->ptid));
5234 }
5235
5236 /* Only the stepping thread should have this set. */
5237 gdb_assert (tp->control.step_range_end == 0);
5238
5239 ecs->ptid = tp->ptid;
5240 ecs->event_thread = tp;
5241 switch_to_thread (ecs->ptid);
5242 keep_going (ecs);
5243 return 1;
5244 }
5245
5246 if (stepping_thread != NULL)
5247 {
5248 struct frame_info *frame;
5249 struct gdbarch *gdbarch;
5250
5251 tp = stepping_thread;
5252
5253 /* If the stepping thread exited, then don't try to switch
5254 back and resume it, which could fail in several different
5255 ways depending on the target. Instead, just keep going.
5256
5257 We can find a stepping dead thread in the thread list in
5258 two cases:
5259
5260 - The target supports thread exit events, and when the
5261 target tries to delete the thread from the thread list,
5262 inferior_ptid pointed at the exiting thread. In such
5263 case, calling delete_thread does not really remove the
5264 thread from the list; instead, the thread is left listed,
5265 with 'exited' state.
5266
5267 - The target's debug interface does not support thread
5268 exit events, and so we have no idea whatsoever if the
5269 previously stepping thread is still alive. For that
5270 reason, we need to synchronously query the target
5271 now. */
5272 if (is_exited (tp->ptid)
5273 || !target_thread_alive (tp->ptid))
5274 {
5275 if (debug_infrun)
5276 fprintf_unfiltered (gdb_stdlog,
5277 "infrun: not switching back to "
5278 "stepped thread, it has vanished\n");
5279
5280 delete_thread (tp->ptid);
5281 keep_going (ecs);
5282 return 1;
5283 }
5284
5285 if (debug_infrun)
5286 fprintf_unfiltered (gdb_stdlog,
5287 "infrun: switching back to stepped thread\n");
5288
5289 ecs->event_thread = tp;
5290 ecs->ptid = tp->ptid;
5291 context_switch (ecs->ptid);
5292
5293 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5294 frame = get_current_frame ();
5295 gdbarch = get_frame_arch (frame);
5296
5297 /* If the PC of the thread we were trying to single-step has
5298 changed, then that thread has trapped or been signaled,
5299 but the event has not been reported to GDB yet. Re-poll
5300 the target looking for this particular thread's event
5301 (i.e. temporarily enable schedlock) by:
5302
5303 - setting a break at the current PC
5304 - resuming that particular thread, only (by setting
5305 trap expected)
5306
5307 This prevents us continuously moving the single-step
5308 breakpoint forward, one instruction at a time,
5309 overstepping. */
5310
5311 if (gdbarch_software_single_step_p (gdbarch)
5312 && stop_pc != tp->prev_pc)
5313 {
5314 if (debug_infrun)
5315 fprintf_unfiltered (gdb_stdlog,
5316 "infrun: expected thread advanced also\n");
5317
5318 insert_single_step_breakpoint (get_frame_arch (frame),
5319 get_frame_address_space (frame),
5320 stop_pc);
5321 singlestep_breakpoints_inserted_p = 1;
5322 ecs->event_thread->control.trap_expected = 1;
5323 singlestep_ptid = inferior_ptid;
5324 singlestep_pc = stop_pc;
5325
5326 resume (0, GDB_SIGNAL_0);
5327 prepare_to_wait (ecs);
5328 }
5329 else
5330 {
5331 if (debug_infrun)
5332 fprintf_unfiltered (gdb_stdlog,
5333 "infrun: expected thread still "
5334 "hasn't advanced\n");
5335 keep_going (ecs);
5336 }
5337
5338 return 1;
5339 }
5340 }
5341 return 0;
5342 }
5343
5344 /* Is thread TP in the middle of single-stepping? */
5345
5346 static int
5347 currently_stepping (struct thread_info *tp)
5348 {
5349 return ((tp->control.step_range_end
5350 && tp->control.step_resume_breakpoint == NULL)
5351 || tp->control.trap_expected
5352 || bpstat_should_step ());
5353 }
5354
5355 /* Inferior has stepped into a subroutine call with source code that
5356 we should not step over. Do step to the first line of code in
5357 it. */
5358
5359 static void
5360 handle_step_into_function (struct gdbarch *gdbarch,
5361 struct execution_control_state *ecs)
5362 {
5363 struct symtab *s;
5364 struct symtab_and_line stop_func_sal, sr_sal;
5365
5366 fill_in_stop_func (gdbarch, ecs);
5367
5368 s = find_pc_symtab (stop_pc);
5369 if (s && s->language != language_asm)
5370 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5371 ecs->stop_func_start);
5372
5373 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5374 /* Use the step_resume_break to step until the end of the prologue,
5375 even if that involves jumps (as it seems to on the vax under
5376 4.2). */
5377 /* If the prologue ends in the middle of a source line, continue to
5378 the end of that source line (if it is still within the function).
5379 Otherwise, just go to end of prologue. */
5380 if (stop_func_sal.end
5381 && stop_func_sal.pc != ecs->stop_func_start
5382 && stop_func_sal.end < ecs->stop_func_end)
5383 ecs->stop_func_start = stop_func_sal.end;
5384
5385 /* Architectures which require breakpoint adjustment might not be able
5386 to place a breakpoint at the computed address. If so, the test
5387 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5388 ecs->stop_func_start to an address at which a breakpoint may be
5389 legitimately placed.
5390
5391 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5392 made, GDB will enter an infinite loop when stepping through
5393 optimized code consisting of VLIW instructions which contain
5394 subinstructions corresponding to different source lines. On
5395 FR-V, it's not permitted to place a breakpoint on any but the
5396 first subinstruction of a VLIW instruction. When a breakpoint is
5397 set, GDB will adjust the breakpoint address to the beginning of
5398 the VLIW instruction. Thus, we need to make the corresponding
5399 adjustment here when computing the stop address. */
5400
5401 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5402 {
5403 ecs->stop_func_start
5404 = gdbarch_adjust_breakpoint_address (gdbarch,
5405 ecs->stop_func_start);
5406 }
5407
5408 if (ecs->stop_func_start == stop_pc)
5409 {
5410 /* We are already there: stop now. */
5411 end_stepping_range (ecs);
5412 return;
5413 }
5414 else
5415 {
5416 /* Put the step-breakpoint there and go until there. */
5417 init_sal (&sr_sal); /* initialize to zeroes */
5418 sr_sal.pc = ecs->stop_func_start;
5419 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5420 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5421
5422 /* Do not specify what the fp should be when we stop since on
5423 some machines the prologue is where the new fp value is
5424 established. */
5425 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5426
5427 /* And make sure stepping stops right away then. */
5428 ecs->event_thread->control.step_range_end
5429 = ecs->event_thread->control.step_range_start;
5430 }
5431 keep_going (ecs);
5432 }
5433
5434 /* Inferior has stepped backward into a subroutine call with source
5435 code that we should not step over. Do step to the beginning of the
5436 last line of code in it. */
5437
5438 static void
5439 handle_step_into_function_backward (struct gdbarch *gdbarch,
5440 struct execution_control_state *ecs)
5441 {
5442 struct symtab *s;
5443 struct symtab_and_line stop_func_sal;
5444
5445 fill_in_stop_func (gdbarch, ecs);
5446
5447 s = find_pc_symtab (stop_pc);
5448 if (s && s->language != language_asm)
5449 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5450 ecs->stop_func_start);
5451
5452 stop_func_sal = find_pc_line (stop_pc, 0);
5453
5454 /* OK, we're just going to keep stepping here. */
5455 if (stop_func_sal.pc == stop_pc)
5456 {
5457 /* We're there already. Just stop stepping now. */
5458 end_stepping_range (ecs);
5459 }
5460 else
5461 {
5462 /* Else just reset the step range and keep going.
5463 No step-resume breakpoint, they don't work for
5464 epilogues, which can have multiple entry paths. */
5465 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5466 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5467 keep_going (ecs);
5468 }
5469 return;
5470 }
5471
5472 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5473 This is used to both functions and to skip over code. */
5474
5475 static void
5476 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5477 struct symtab_and_line sr_sal,
5478 struct frame_id sr_id,
5479 enum bptype sr_type)
5480 {
5481 /* There should never be more than one step-resume or longjmp-resume
5482 breakpoint per thread, so we should never be setting a new
5483 step_resume_breakpoint when one is already active. */
5484 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5485 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5486
5487 if (debug_infrun)
5488 fprintf_unfiltered (gdb_stdlog,
5489 "infrun: inserting step-resume breakpoint at %s\n",
5490 paddress (gdbarch, sr_sal.pc));
5491
5492 inferior_thread ()->control.step_resume_breakpoint
5493 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5494 }
5495
5496 void
5497 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5498 struct symtab_and_line sr_sal,
5499 struct frame_id sr_id)
5500 {
5501 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5502 sr_sal, sr_id,
5503 bp_step_resume);
5504 }
5505
5506 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5507 This is used to skip a potential signal handler.
5508
5509 This is called with the interrupted function's frame. The signal
5510 handler, when it returns, will resume the interrupted function at
5511 RETURN_FRAME.pc. */
5512
5513 static void
5514 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5515 {
5516 struct symtab_and_line sr_sal;
5517 struct gdbarch *gdbarch;
5518
5519 gdb_assert (return_frame != NULL);
5520 init_sal (&sr_sal); /* initialize to zeros */
5521
5522 gdbarch = get_frame_arch (return_frame);
5523 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5524 sr_sal.section = find_pc_overlay (sr_sal.pc);
5525 sr_sal.pspace = get_frame_program_space (return_frame);
5526
5527 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5528 get_stack_frame_id (return_frame),
5529 bp_hp_step_resume);
5530 }
5531
5532 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5533 is used to skip a function after stepping into it (for "next" or if
5534 the called function has no debugging information).
5535
5536 The current function has almost always been reached by single
5537 stepping a call or return instruction. NEXT_FRAME belongs to the
5538 current function, and the breakpoint will be set at the caller's
5539 resume address.
5540
5541 This is a separate function rather than reusing
5542 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5543 get_prev_frame, which may stop prematurely (see the implementation
5544 of frame_unwind_caller_id for an example). */
5545
5546 static void
5547 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5548 {
5549 struct symtab_and_line sr_sal;
5550 struct gdbarch *gdbarch;
5551
5552 /* We shouldn't have gotten here if we don't know where the call site
5553 is. */
5554 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5555
5556 init_sal (&sr_sal); /* initialize to zeros */
5557
5558 gdbarch = frame_unwind_caller_arch (next_frame);
5559 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5560 frame_unwind_caller_pc (next_frame));
5561 sr_sal.section = find_pc_overlay (sr_sal.pc);
5562 sr_sal.pspace = frame_unwind_program_space (next_frame);
5563
5564 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5565 frame_unwind_caller_id (next_frame));
5566 }
5567
5568 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5569 new breakpoint at the target of a jmp_buf. The handling of
5570 longjmp-resume uses the same mechanisms used for handling
5571 "step-resume" breakpoints. */
5572
5573 static void
5574 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5575 {
5576 /* There should never be more than one longjmp-resume breakpoint per
5577 thread, so we should never be setting a new
5578 longjmp_resume_breakpoint when one is already active. */
5579 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
5580
5581 if (debug_infrun)
5582 fprintf_unfiltered (gdb_stdlog,
5583 "infrun: inserting longjmp-resume breakpoint at %s\n",
5584 paddress (gdbarch, pc));
5585
5586 inferior_thread ()->control.exception_resume_breakpoint =
5587 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5588 }
5589
5590 /* Insert an exception resume breakpoint. TP is the thread throwing
5591 the exception. The block B is the block of the unwinder debug hook
5592 function. FRAME is the frame corresponding to the call to this
5593 function. SYM is the symbol of the function argument holding the
5594 target PC of the exception. */
5595
5596 static void
5597 insert_exception_resume_breakpoint (struct thread_info *tp,
5598 const struct block *b,
5599 struct frame_info *frame,
5600 struct symbol *sym)
5601 {
5602 volatile struct gdb_exception e;
5603
5604 /* We want to ignore errors here. */
5605 TRY_CATCH (e, RETURN_MASK_ERROR)
5606 {
5607 struct symbol *vsym;
5608 struct value *value;
5609 CORE_ADDR handler;
5610 struct breakpoint *bp;
5611
5612 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5613 value = read_var_value (vsym, frame);
5614 /* If the value was optimized out, revert to the old behavior. */
5615 if (! value_optimized_out (value))
5616 {
5617 handler = value_as_address (value);
5618
5619 if (debug_infrun)
5620 fprintf_unfiltered (gdb_stdlog,
5621 "infrun: exception resume at %lx\n",
5622 (unsigned long) handler);
5623
5624 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5625 handler, bp_exception_resume);
5626
5627 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
5628 frame = NULL;
5629
5630 bp->thread = tp->num;
5631 inferior_thread ()->control.exception_resume_breakpoint = bp;
5632 }
5633 }
5634 }
5635
5636 /* A helper for check_exception_resume that sets an
5637 exception-breakpoint based on a SystemTap probe. */
5638
5639 static void
5640 insert_exception_resume_from_probe (struct thread_info *tp,
5641 const struct bound_probe *probe,
5642 struct frame_info *frame)
5643 {
5644 struct value *arg_value;
5645 CORE_ADDR handler;
5646 struct breakpoint *bp;
5647
5648 arg_value = probe_safe_evaluate_at_pc (frame, 1);
5649 if (!arg_value)
5650 return;
5651
5652 handler = value_as_address (arg_value);
5653
5654 if (debug_infrun)
5655 fprintf_unfiltered (gdb_stdlog,
5656 "infrun: exception resume at %s\n",
5657 paddress (get_objfile_arch (probe->objfile),
5658 handler));
5659
5660 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5661 handler, bp_exception_resume);
5662 bp->thread = tp->num;
5663 inferior_thread ()->control.exception_resume_breakpoint = bp;
5664 }
5665
5666 /* This is called when an exception has been intercepted. Check to
5667 see whether the exception's destination is of interest, and if so,
5668 set an exception resume breakpoint there. */
5669
5670 static void
5671 check_exception_resume (struct execution_control_state *ecs,
5672 struct frame_info *frame)
5673 {
5674 volatile struct gdb_exception e;
5675 struct bound_probe probe;
5676 struct symbol *func;
5677
5678 /* First see if this exception unwinding breakpoint was set via a
5679 SystemTap probe point. If so, the probe has two arguments: the
5680 CFA and the HANDLER. We ignore the CFA, extract the handler, and
5681 set a breakpoint there. */
5682 probe = find_probe_by_pc (get_frame_pc (frame));
5683 if (probe.probe)
5684 {
5685 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
5686 return;
5687 }
5688
5689 func = get_frame_function (frame);
5690 if (!func)
5691 return;
5692
5693 TRY_CATCH (e, RETURN_MASK_ERROR)
5694 {
5695 const struct block *b;
5696 struct block_iterator iter;
5697 struct symbol *sym;
5698 int argno = 0;
5699
5700 /* The exception breakpoint is a thread-specific breakpoint on
5701 the unwinder's debug hook, declared as:
5702
5703 void _Unwind_DebugHook (void *cfa, void *handler);
5704
5705 The CFA argument indicates the frame to which control is
5706 about to be transferred. HANDLER is the destination PC.
5707
5708 We ignore the CFA and set a temporary breakpoint at HANDLER.
5709 This is not extremely efficient but it avoids issues in gdb
5710 with computing the DWARF CFA, and it also works even in weird
5711 cases such as throwing an exception from inside a signal
5712 handler. */
5713
5714 b = SYMBOL_BLOCK_VALUE (func);
5715 ALL_BLOCK_SYMBOLS (b, iter, sym)
5716 {
5717 if (!SYMBOL_IS_ARGUMENT (sym))
5718 continue;
5719
5720 if (argno == 0)
5721 ++argno;
5722 else
5723 {
5724 insert_exception_resume_breakpoint (ecs->event_thread,
5725 b, frame, sym);
5726 break;
5727 }
5728 }
5729 }
5730 }
5731
5732 static void
5733 stop_waiting (struct execution_control_state *ecs)
5734 {
5735 if (debug_infrun)
5736 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
5737
5738 clear_step_over_info ();
5739
5740 /* Let callers know we don't want to wait for the inferior anymore. */
5741 ecs->wait_some_more = 0;
5742 }
5743
5744 /* Called when we should continue running the inferior, because the
5745 current event doesn't cause a user visible stop. This does the
5746 resuming part; waiting for the next event is done elsewhere. */
5747
5748 static void
5749 keep_going (struct execution_control_state *ecs)
5750 {
5751 /* Make sure normal_stop is called if we get a QUIT handled before
5752 reaching resume. */
5753 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5754
5755 /* Save the pc before execution, to compare with pc after stop. */
5756 ecs->event_thread->prev_pc
5757 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5758
5759 if (ecs->event_thread->control.trap_expected
5760 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5761 {
5762 /* We haven't yet gotten our trap, and either: intercepted a
5763 non-signal event (e.g., a fork); or took a signal which we
5764 are supposed to pass through to the inferior. Simply
5765 continue. */
5766 discard_cleanups (old_cleanups);
5767 resume (currently_stepping (ecs->event_thread),
5768 ecs->event_thread->suspend.stop_signal);
5769 }
5770 else
5771 {
5772 volatile struct gdb_exception e;
5773 struct regcache *regcache = get_current_regcache ();
5774
5775 /* Either the trap was not expected, but we are continuing
5776 anyway (if we got a signal, the user asked it be passed to
5777 the child)
5778 -- or --
5779 We got our expected trap, but decided we should resume from
5780 it.
5781
5782 We're going to run this baby now!
5783
5784 Note that insert_breakpoints won't try to re-insert
5785 already inserted breakpoints. Therefore, we don't
5786 care if breakpoints were already inserted, or not. */
5787
5788 /* If we need to step over a breakpoint, and we're not using
5789 displaced stepping to do so, insert all breakpoints
5790 (watchpoints, etc.) but the one we're stepping over, step one
5791 instruction, and then re-insert the breakpoint when that step
5792 is finished. */
5793 if ((ecs->hit_singlestep_breakpoint
5794 || thread_still_needs_step_over (ecs->event_thread))
5795 && !use_displaced_stepping (get_regcache_arch (regcache)))
5796 {
5797 set_step_over_info (get_regcache_aspace (regcache),
5798 regcache_read_pc (regcache));
5799 }
5800 else
5801 clear_step_over_info ();
5802
5803 /* Stop stepping if inserting breakpoints fails. */
5804 TRY_CATCH (e, RETURN_MASK_ERROR)
5805 {
5806 insert_breakpoints ();
5807 }
5808 if (e.reason < 0)
5809 {
5810 exception_print (gdb_stderr, e);
5811 stop_waiting (ecs);
5812 return;
5813 }
5814
5815 ecs->event_thread->control.trap_expected
5816 = (ecs->event_thread->stepping_over_breakpoint
5817 || ecs->hit_singlestep_breakpoint);
5818
5819 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
5820 explicitly specifies that such a signal should be delivered
5821 to the target program). Typically, that would occur when a
5822 user is debugging a target monitor on a simulator: the target
5823 monitor sets a breakpoint; the simulator encounters this
5824 breakpoint and halts the simulation handing control to GDB;
5825 GDB, noting that the stop address doesn't map to any known
5826 breakpoint, returns control back to the simulator; the
5827 simulator then delivers the hardware equivalent of a
5828 GDB_SIGNAL_TRAP to the program being debugged. */
5829 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5830 && !signal_program[ecs->event_thread->suspend.stop_signal])
5831 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5832
5833 discard_cleanups (old_cleanups);
5834 resume (currently_stepping (ecs->event_thread),
5835 ecs->event_thread->suspend.stop_signal);
5836 }
5837
5838 prepare_to_wait (ecs);
5839 }
5840
5841 /* This function normally comes after a resume, before
5842 handle_inferior_event exits. It takes care of any last bits of
5843 housekeeping, and sets the all-important wait_some_more flag. */
5844
5845 static void
5846 prepare_to_wait (struct execution_control_state *ecs)
5847 {
5848 if (debug_infrun)
5849 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5850
5851 /* This is the old end of the while loop. Let everybody know we
5852 want to wait for the inferior some more and get called again
5853 soon. */
5854 ecs->wait_some_more = 1;
5855 }
5856
5857 /* We are done with the step range of a step/next/si/ni command.
5858 Called once for each n of a "step n" operation. Notify observers
5859 if not in the middle of doing a "step N" operation for N > 1. */
5860
5861 static void
5862 end_stepping_range (struct execution_control_state *ecs)
5863 {
5864 ecs->event_thread->control.stop_step = 1;
5865 if (!ecs->event_thread->step_multi)
5866 observer_notify_end_stepping_range ();
5867 stop_waiting (ecs);
5868 }
5869
5870 /* Several print_*_reason functions to print why the inferior has stopped.
5871 We always print something when the inferior exits, or receives a signal.
5872 The rest of the cases are dealt with later on in normal_stop and
5873 print_it_typical. Ideally there should be a call to one of these
5874 print_*_reason functions functions from handle_inferior_event each time
5875 stop_waiting is called.
5876
5877 Note that we don't call these directly, instead we delegate that to
5878 the interpreters, through observers. Interpreters then call these
5879 with whatever uiout is right. */
5880
5881 void
5882 print_end_stepping_range_reason (struct ui_out *uiout)
5883 {
5884 /* For CLI-like interpreters, print nothing. */
5885
5886 if (ui_out_is_mi_like_p (uiout))
5887 {
5888 ui_out_field_string (uiout, "reason",
5889 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5890 }
5891 }
5892
5893 void
5894 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
5895 {
5896 annotate_signalled ();
5897 if (ui_out_is_mi_like_p (uiout))
5898 ui_out_field_string
5899 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5900 ui_out_text (uiout, "\nProgram terminated with signal ");
5901 annotate_signal_name ();
5902 ui_out_field_string (uiout, "signal-name",
5903 gdb_signal_to_name (siggnal));
5904 annotate_signal_name_end ();
5905 ui_out_text (uiout, ", ");
5906 annotate_signal_string ();
5907 ui_out_field_string (uiout, "signal-meaning",
5908 gdb_signal_to_string (siggnal));
5909 annotate_signal_string_end ();
5910 ui_out_text (uiout, ".\n");
5911 ui_out_text (uiout, "The program no longer exists.\n");
5912 }
5913
5914 void
5915 print_exited_reason (struct ui_out *uiout, int exitstatus)
5916 {
5917 struct inferior *inf = current_inferior ();
5918 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5919
5920 annotate_exited (exitstatus);
5921 if (exitstatus)
5922 {
5923 if (ui_out_is_mi_like_p (uiout))
5924 ui_out_field_string (uiout, "reason",
5925 async_reason_lookup (EXEC_ASYNC_EXITED));
5926 ui_out_text (uiout, "[Inferior ");
5927 ui_out_text (uiout, plongest (inf->num));
5928 ui_out_text (uiout, " (");
5929 ui_out_text (uiout, pidstr);
5930 ui_out_text (uiout, ") exited with code ");
5931 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5932 ui_out_text (uiout, "]\n");
5933 }
5934 else
5935 {
5936 if (ui_out_is_mi_like_p (uiout))
5937 ui_out_field_string
5938 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5939 ui_out_text (uiout, "[Inferior ");
5940 ui_out_text (uiout, plongest (inf->num));
5941 ui_out_text (uiout, " (");
5942 ui_out_text (uiout, pidstr);
5943 ui_out_text (uiout, ") exited normally]\n");
5944 }
5945 }
5946
5947 void
5948 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
5949 {
5950 annotate_signal ();
5951
5952 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5953 {
5954 struct thread_info *t = inferior_thread ();
5955
5956 ui_out_text (uiout, "\n[");
5957 ui_out_field_string (uiout, "thread-name",
5958 target_pid_to_str (t->ptid));
5959 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5960 ui_out_text (uiout, " stopped");
5961 }
5962 else
5963 {
5964 ui_out_text (uiout, "\nProgram received signal ");
5965 annotate_signal_name ();
5966 if (ui_out_is_mi_like_p (uiout))
5967 ui_out_field_string
5968 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5969 ui_out_field_string (uiout, "signal-name",
5970 gdb_signal_to_name (siggnal));
5971 annotate_signal_name_end ();
5972 ui_out_text (uiout, ", ");
5973 annotate_signal_string ();
5974 ui_out_field_string (uiout, "signal-meaning",
5975 gdb_signal_to_string (siggnal));
5976 annotate_signal_string_end ();
5977 }
5978 ui_out_text (uiout, ".\n");
5979 }
5980
5981 void
5982 print_no_history_reason (struct ui_out *uiout)
5983 {
5984 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5985 }
5986
5987 /* Print current location without a level number, if we have changed
5988 functions or hit a breakpoint. Print source line if we have one.
5989 bpstat_print contains the logic deciding in detail what to print,
5990 based on the event(s) that just occurred. */
5991
5992 void
5993 print_stop_event (struct target_waitstatus *ws)
5994 {
5995 int bpstat_ret;
5996 int source_flag;
5997 int do_frame_printing = 1;
5998 struct thread_info *tp = inferior_thread ();
5999
6000 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
6001 switch (bpstat_ret)
6002 {
6003 case PRINT_UNKNOWN:
6004 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
6005 should) carry around the function and does (or should) use
6006 that when doing a frame comparison. */
6007 if (tp->control.stop_step
6008 && frame_id_eq (tp->control.step_frame_id,
6009 get_frame_id (get_current_frame ()))
6010 && step_start_function == find_pc_function (stop_pc))
6011 {
6012 /* Finished step, just print source line. */
6013 source_flag = SRC_LINE;
6014 }
6015 else
6016 {
6017 /* Print location and source line. */
6018 source_flag = SRC_AND_LOC;
6019 }
6020 break;
6021 case PRINT_SRC_AND_LOC:
6022 /* Print location and source line. */
6023 source_flag = SRC_AND_LOC;
6024 break;
6025 case PRINT_SRC_ONLY:
6026 source_flag = SRC_LINE;
6027 break;
6028 case PRINT_NOTHING:
6029 /* Something bogus. */
6030 source_flag = SRC_LINE;
6031 do_frame_printing = 0;
6032 break;
6033 default:
6034 internal_error (__FILE__, __LINE__, _("Unknown value."));
6035 }
6036
6037 /* The behavior of this routine with respect to the source
6038 flag is:
6039 SRC_LINE: Print only source line
6040 LOCATION: Print only location
6041 SRC_AND_LOC: Print location and source line. */
6042 if (do_frame_printing)
6043 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6044
6045 /* Display the auto-display expressions. */
6046 do_displays ();
6047 }
6048
6049 /* Here to return control to GDB when the inferior stops for real.
6050 Print appropriate messages, remove breakpoints, give terminal our modes.
6051
6052 STOP_PRINT_FRAME nonzero means print the executing frame
6053 (pc, function, args, file, line number and line text).
6054 BREAKPOINTS_FAILED nonzero means stop was due to error
6055 attempting to insert breakpoints. */
6056
6057 void
6058 normal_stop (void)
6059 {
6060 struct target_waitstatus last;
6061 ptid_t last_ptid;
6062 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6063
6064 get_last_target_status (&last_ptid, &last);
6065
6066 /* If an exception is thrown from this point on, make sure to
6067 propagate GDB's knowledge of the executing state to the
6068 frontend/user running state. A QUIT is an easy exception to see
6069 here, so do this before any filtered output. */
6070 if (!non_stop)
6071 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6072 else if (last.kind != TARGET_WAITKIND_SIGNALLED
6073 && last.kind != TARGET_WAITKIND_EXITED
6074 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6075 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6076
6077 /* As with the notification of thread events, we want to delay
6078 notifying the user that we've switched thread context until
6079 the inferior actually stops.
6080
6081 There's no point in saying anything if the inferior has exited.
6082 Note that SIGNALLED here means "exited with a signal", not
6083 "received a signal".
6084
6085 Also skip saying anything in non-stop mode. In that mode, as we
6086 don't want GDB to switch threads behind the user's back, to avoid
6087 races where the user is typing a command to apply to thread x,
6088 but GDB switches to thread y before the user finishes entering
6089 the command, fetch_inferior_event installs a cleanup to restore
6090 the current thread back to the thread the user had selected right
6091 after this event is handled, so we're not really switching, only
6092 informing of a stop. */
6093 if (!non_stop
6094 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6095 && target_has_execution
6096 && last.kind != TARGET_WAITKIND_SIGNALLED
6097 && last.kind != TARGET_WAITKIND_EXITED
6098 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6099 {
6100 target_terminal_ours_for_output ();
6101 printf_filtered (_("[Switching to %s]\n"),
6102 target_pid_to_str (inferior_ptid));
6103 annotate_thread_changed ();
6104 previous_inferior_ptid = inferior_ptid;
6105 }
6106
6107 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6108 {
6109 gdb_assert (sync_execution || !target_can_async_p ());
6110
6111 target_terminal_ours_for_output ();
6112 printf_filtered (_("No unwaited-for children left.\n"));
6113 }
6114
6115 if (!breakpoints_always_inserted_mode () && target_has_execution)
6116 {
6117 if (remove_breakpoints ())
6118 {
6119 target_terminal_ours_for_output ();
6120 printf_filtered (_("Cannot remove breakpoints because "
6121 "program is no longer writable.\nFurther "
6122 "execution is probably impossible.\n"));
6123 }
6124 }
6125
6126 /* If an auto-display called a function and that got a signal,
6127 delete that auto-display to avoid an infinite recursion. */
6128
6129 if (stopped_by_random_signal)
6130 disable_current_display ();
6131
6132 /* Don't print a message if in the middle of doing a "step n"
6133 operation for n > 1 */
6134 if (target_has_execution
6135 && last.kind != TARGET_WAITKIND_SIGNALLED
6136 && last.kind != TARGET_WAITKIND_EXITED
6137 && inferior_thread ()->step_multi
6138 && inferior_thread ()->control.stop_step)
6139 goto done;
6140
6141 target_terminal_ours ();
6142 async_enable_stdin ();
6143
6144 /* Set the current source location. This will also happen if we
6145 display the frame below, but the current SAL will be incorrect
6146 during a user hook-stop function. */
6147 if (has_stack_frames () && !stop_stack_dummy)
6148 set_current_sal_from_frame (get_current_frame ());
6149
6150 /* Let the user/frontend see the threads as stopped, but do nothing
6151 if the thread was running an infcall. We may be e.g., evaluating
6152 a breakpoint condition. In that case, the thread had state
6153 THREAD_RUNNING before the infcall, and shall remain set to
6154 running, all without informing the user/frontend about state
6155 transition changes. If this is actually a call command, then the
6156 thread was originally already stopped, so there's no state to
6157 finish either. */
6158 if (target_has_execution && inferior_thread ()->control.in_infcall)
6159 discard_cleanups (old_chain);
6160 else
6161 do_cleanups (old_chain);
6162
6163 /* Look up the hook_stop and run it (CLI internally handles problem
6164 of stop_command's pre-hook not existing). */
6165 if (stop_command)
6166 catch_errors (hook_stop_stub, stop_command,
6167 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6168
6169 if (!has_stack_frames ())
6170 goto done;
6171
6172 if (last.kind == TARGET_WAITKIND_SIGNALLED
6173 || last.kind == TARGET_WAITKIND_EXITED)
6174 goto done;
6175
6176 /* Select innermost stack frame - i.e., current frame is frame 0,
6177 and current location is based on that.
6178 Don't do this on return from a stack dummy routine,
6179 or if the program has exited. */
6180
6181 if (!stop_stack_dummy)
6182 {
6183 select_frame (get_current_frame ());
6184
6185 /* If --batch-silent is enabled then there's no need to print the current
6186 source location, and to try risks causing an error message about
6187 missing source files. */
6188 if (stop_print_frame && !batch_silent)
6189 print_stop_event (&last);
6190 }
6191
6192 /* Save the function value return registers, if we care.
6193 We might be about to restore their previous contents. */
6194 if (inferior_thread ()->control.proceed_to_finish
6195 && execution_direction != EXEC_REVERSE)
6196 {
6197 /* This should not be necessary. */
6198 if (stop_registers)
6199 regcache_xfree (stop_registers);
6200
6201 /* NB: The copy goes through to the target picking up the value of
6202 all the registers. */
6203 stop_registers = regcache_dup (get_current_regcache ());
6204 }
6205
6206 if (stop_stack_dummy == STOP_STACK_DUMMY)
6207 {
6208 /* Pop the empty frame that contains the stack dummy.
6209 This also restores inferior state prior to the call
6210 (struct infcall_suspend_state). */
6211 struct frame_info *frame = get_current_frame ();
6212
6213 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6214 frame_pop (frame);
6215 /* frame_pop() calls reinit_frame_cache as the last thing it
6216 does which means there's currently no selected frame. We
6217 don't need to re-establish a selected frame if the dummy call
6218 returns normally, that will be done by
6219 restore_infcall_control_state. However, we do have to handle
6220 the case where the dummy call is returning after being
6221 stopped (e.g. the dummy call previously hit a breakpoint).
6222 We can't know which case we have so just always re-establish
6223 a selected frame here. */
6224 select_frame (get_current_frame ());
6225 }
6226
6227 done:
6228 annotate_stopped ();
6229
6230 /* Suppress the stop observer if we're in the middle of:
6231
6232 - a step n (n > 1), as there still more steps to be done.
6233
6234 - a "finish" command, as the observer will be called in
6235 finish_command_continuation, so it can include the inferior
6236 function's return value.
6237
6238 - calling an inferior function, as we pretend we inferior didn't
6239 run at all. The return value of the call is handled by the
6240 expression evaluator, through call_function_by_hand. */
6241
6242 if (!target_has_execution
6243 || last.kind == TARGET_WAITKIND_SIGNALLED
6244 || last.kind == TARGET_WAITKIND_EXITED
6245 || last.kind == TARGET_WAITKIND_NO_RESUMED
6246 || (!(inferior_thread ()->step_multi
6247 && inferior_thread ()->control.stop_step)
6248 && !(inferior_thread ()->control.stop_bpstat
6249 && inferior_thread ()->control.proceed_to_finish)
6250 && !inferior_thread ()->control.in_infcall))
6251 {
6252 if (!ptid_equal (inferior_ptid, null_ptid))
6253 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6254 stop_print_frame);
6255 else
6256 observer_notify_normal_stop (NULL, stop_print_frame);
6257 }
6258
6259 if (target_has_execution)
6260 {
6261 if (last.kind != TARGET_WAITKIND_SIGNALLED
6262 && last.kind != TARGET_WAITKIND_EXITED)
6263 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6264 Delete any breakpoint that is to be deleted at the next stop. */
6265 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6266 }
6267
6268 /* Try to get rid of automatically added inferiors that are no
6269 longer needed. Keeping those around slows down things linearly.
6270 Note that this never removes the current inferior. */
6271 prune_inferiors ();
6272 }
6273
6274 static int
6275 hook_stop_stub (void *cmd)
6276 {
6277 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6278 return (0);
6279 }
6280 \f
6281 int
6282 signal_stop_state (int signo)
6283 {
6284 return signal_stop[signo];
6285 }
6286
6287 int
6288 signal_print_state (int signo)
6289 {
6290 return signal_print[signo];
6291 }
6292
6293 int
6294 signal_pass_state (int signo)
6295 {
6296 return signal_program[signo];
6297 }
6298
6299 static void
6300 signal_cache_update (int signo)
6301 {
6302 if (signo == -1)
6303 {
6304 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6305 signal_cache_update (signo);
6306
6307 return;
6308 }
6309
6310 signal_pass[signo] = (signal_stop[signo] == 0
6311 && signal_print[signo] == 0
6312 && signal_program[signo] == 1
6313 && signal_catch[signo] == 0);
6314 }
6315
6316 int
6317 signal_stop_update (int signo, int state)
6318 {
6319 int ret = signal_stop[signo];
6320
6321 signal_stop[signo] = state;
6322 signal_cache_update (signo);
6323 return ret;
6324 }
6325
6326 int
6327 signal_print_update (int signo, int state)
6328 {
6329 int ret = signal_print[signo];
6330
6331 signal_print[signo] = state;
6332 signal_cache_update (signo);
6333 return ret;
6334 }
6335
6336 int
6337 signal_pass_update (int signo, int state)
6338 {
6339 int ret = signal_program[signo];
6340
6341 signal_program[signo] = state;
6342 signal_cache_update (signo);
6343 return ret;
6344 }
6345
6346 /* Update the global 'signal_catch' from INFO and notify the
6347 target. */
6348
6349 void
6350 signal_catch_update (const unsigned int *info)
6351 {
6352 int i;
6353
6354 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6355 signal_catch[i] = info[i] > 0;
6356 signal_cache_update (-1);
6357 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6358 }
6359
6360 static void
6361 sig_print_header (void)
6362 {
6363 printf_filtered (_("Signal Stop\tPrint\tPass "
6364 "to program\tDescription\n"));
6365 }
6366
6367 static void
6368 sig_print_info (enum gdb_signal oursig)
6369 {
6370 const char *name = gdb_signal_to_name (oursig);
6371 int name_padding = 13 - strlen (name);
6372
6373 if (name_padding <= 0)
6374 name_padding = 0;
6375
6376 printf_filtered ("%s", name);
6377 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6378 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6379 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6380 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6381 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6382 }
6383
6384 /* Specify how various signals in the inferior should be handled. */
6385
6386 static void
6387 handle_command (char *args, int from_tty)
6388 {
6389 char **argv;
6390 int digits, wordlen;
6391 int sigfirst, signum, siglast;
6392 enum gdb_signal oursig;
6393 int allsigs;
6394 int nsigs;
6395 unsigned char *sigs;
6396 struct cleanup *old_chain;
6397
6398 if (args == NULL)
6399 {
6400 error_no_arg (_("signal to handle"));
6401 }
6402
6403 /* Allocate and zero an array of flags for which signals to handle. */
6404
6405 nsigs = (int) GDB_SIGNAL_LAST;
6406 sigs = (unsigned char *) alloca (nsigs);
6407 memset (sigs, 0, nsigs);
6408
6409 /* Break the command line up into args. */
6410
6411 argv = gdb_buildargv (args);
6412 old_chain = make_cleanup_freeargv (argv);
6413
6414 /* Walk through the args, looking for signal oursigs, signal names, and
6415 actions. Signal numbers and signal names may be interspersed with
6416 actions, with the actions being performed for all signals cumulatively
6417 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6418
6419 while (*argv != NULL)
6420 {
6421 wordlen = strlen (*argv);
6422 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6423 {;
6424 }
6425 allsigs = 0;
6426 sigfirst = siglast = -1;
6427
6428 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6429 {
6430 /* Apply action to all signals except those used by the
6431 debugger. Silently skip those. */
6432 allsigs = 1;
6433 sigfirst = 0;
6434 siglast = nsigs - 1;
6435 }
6436 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6437 {
6438 SET_SIGS (nsigs, sigs, signal_stop);
6439 SET_SIGS (nsigs, sigs, signal_print);
6440 }
6441 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6442 {
6443 UNSET_SIGS (nsigs, sigs, signal_program);
6444 }
6445 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6446 {
6447 SET_SIGS (nsigs, sigs, signal_print);
6448 }
6449 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6450 {
6451 SET_SIGS (nsigs, sigs, signal_program);
6452 }
6453 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6454 {
6455 UNSET_SIGS (nsigs, sigs, signal_stop);
6456 }
6457 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6458 {
6459 SET_SIGS (nsigs, sigs, signal_program);
6460 }
6461 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6462 {
6463 UNSET_SIGS (nsigs, sigs, signal_print);
6464 UNSET_SIGS (nsigs, sigs, signal_stop);
6465 }
6466 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6467 {
6468 UNSET_SIGS (nsigs, sigs, signal_program);
6469 }
6470 else if (digits > 0)
6471 {
6472 /* It is numeric. The numeric signal refers to our own
6473 internal signal numbering from target.h, not to host/target
6474 signal number. This is a feature; users really should be
6475 using symbolic names anyway, and the common ones like
6476 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6477
6478 sigfirst = siglast = (int)
6479 gdb_signal_from_command (atoi (*argv));
6480 if ((*argv)[digits] == '-')
6481 {
6482 siglast = (int)
6483 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6484 }
6485 if (sigfirst > siglast)
6486 {
6487 /* Bet he didn't figure we'd think of this case... */
6488 signum = sigfirst;
6489 sigfirst = siglast;
6490 siglast = signum;
6491 }
6492 }
6493 else
6494 {
6495 oursig = gdb_signal_from_name (*argv);
6496 if (oursig != GDB_SIGNAL_UNKNOWN)
6497 {
6498 sigfirst = siglast = (int) oursig;
6499 }
6500 else
6501 {
6502 /* Not a number and not a recognized flag word => complain. */
6503 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6504 }
6505 }
6506
6507 /* If any signal numbers or symbol names were found, set flags for
6508 which signals to apply actions to. */
6509
6510 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6511 {
6512 switch ((enum gdb_signal) signum)
6513 {
6514 case GDB_SIGNAL_TRAP:
6515 case GDB_SIGNAL_INT:
6516 if (!allsigs && !sigs[signum])
6517 {
6518 if (query (_("%s is used by the debugger.\n\
6519 Are you sure you want to change it? "),
6520 gdb_signal_to_name ((enum gdb_signal) signum)))
6521 {
6522 sigs[signum] = 1;
6523 }
6524 else
6525 {
6526 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6527 gdb_flush (gdb_stdout);
6528 }
6529 }
6530 break;
6531 case GDB_SIGNAL_0:
6532 case GDB_SIGNAL_DEFAULT:
6533 case GDB_SIGNAL_UNKNOWN:
6534 /* Make sure that "all" doesn't print these. */
6535 break;
6536 default:
6537 sigs[signum] = 1;
6538 break;
6539 }
6540 }
6541
6542 argv++;
6543 }
6544
6545 for (signum = 0; signum < nsigs; signum++)
6546 if (sigs[signum])
6547 {
6548 signal_cache_update (-1);
6549 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6550 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
6551
6552 if (from_tty)
6553 {
6554 /* Show the results. */
6555 sig_print_header ();
6556 for (; signum < nsigs; signum++)
6557 if (sigs[signum])
6558 sig_print_info (signum);
6559 }
6560
6561 break;
6562 }
6563
6564 do_cleanups (old_chain);
6565 }
6566
6567 /* Complete the "handle" command. */
6568
6569 static VEC (char_ptr) *
6570 handle_completer (struct cmd_list_element *ignore,
6571 const char *text, const char *word)
6572 {
6573 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
6574 static const char * const keywords[] =
6575 {
6576 "all",
6577 "stop",
6578 "ignore",
6579 "print",
6580 "pass",
6581 "nostop",
6582 "noignore",
6583 "noprint",
6584 "nopass",
6585 NULL,
6586 };
6587
6588 vec_signals = signal_completer (ignore, text, word);
6589 vec_keywords = complete_on_enum (keywords, word, word);
6590
6591 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
6592 VEC_free (char_ptr, vec_signals);
6593 VEC_free (char_ptr, vec_keywords);
6594 return return_val;
6595 }
6596
6597 static void
6598 xdb_handle_command (char *args, int from_tty)
6599 {
6600 char **argv;
6601 struct cleanup *old_chain;
6602
6603 if (args == NULL)
6604 error_no_arg (_("xdb command"));
6605
6606 /* Break the command line up into args. */
6607
6608 argv = gdb_buildargv (args);
6609 old_chain = make_cleanup_freeargv (argv);
6610 if (argv[1] != (char *) NULL)
6611 {
6612 char *argBuf;
6613 int bufLen;
6614
6615 bufLen = strlen (argv[0]) + 20;
6616 argBuf = (char *) xmalloc (bufLen);
6617 if (argBuf)
6618 {
6619 int validFlag = 1;
6620 enum gdb_signal oursig;
6621
6622 oursig = gdb_signal_from_name (argv[0]);
6623 memset (argBuf, 0, bufLen);
6624 if (strcmp (argv[1], "Q") == 0)
6625 sprintf (argBuf, "%s %s", argv[0], "noprint");
6626 else
6627 {
6628 if (strcmp (argv[1], "s") == 0)
6629 {
6630 if (!signal_stop[oursig])
6631 sprintf (argBuf, "%s %s", argv[0], "stop");
6632 else
6633 sprintf (argBuf, "%s %s", argv[0], "nostop");
6634 }
6635 else if (strcmp (argv[1], "i") == 0)
6636 {
6637 if (!signal_program[oursig])
6638 sprintf (argBuf, "%s %s", argv[0], "pass");
6639 else
6640 sprintf (argBuf, "%s %s", argv[0], "nopass");
6641 }
6642 else if (strcmp (argv[1], "r") == 0)
6643 {
6644 if (!signal_print[oursig])
6645 sprintf (argBuf, "%s %s", argv[0], "print");
6646 else
6647 sprintf (argBuf, "%s %s", argv[0], "noprint");
6648 }
6649 else
6650 validFlag = 0;
6651 }
6652 if (validFlag)
6653 handle_command (argBuf, from_tty);
6654 else
6655 printf_filtered (_("Invalid signal handling flag.\n"));
6656 if (argBuf)
6657 xfree (argBuf);
6658 }
6659 }
6660 do_cleanups (old_chain);
6661 }
6662
6663 enum gdb_signal
6664 gdb_signal_from_command (int num)
6665 {
6666 if (num >= 1 && num <= 15)
6667 return (enum gdb_signal) num;
6668 error (_("Only signals 1-15 are valid as numeric signals.\n\
6669 Use \"info signals\" for a list of symbolic signals."));
6670 }
6671
6672 /* Print current contents of the tables set by the handle command.
6673 It is possible we should just be printing signals actually used
6674 by the current target (but for things to work right when switching
6675 targets, all signals should be in the signal tables). */
6676
6677 static void
6678 signals_info (char *signum_exp, int from_tty)
6679 {
6680 enum gdb_signal oursig;
6681
6682 sig_print_header ();
6683
6684 if (signum_exp)
6685 {
6686 /* First see if this is a symbol name. */
6687 oursig = gdb_signal_from_name (signum_exp);
6688 if (oursig == GDB_SIGNAL_UNKNOWN)
6689 {
6690 /* No, try numeric. */
6691 oursig =
6692 gdb_signal_from_command (parse_and_eval_long (signum_exp));
6693 }
6694 sig_print_info (oursig);
6695 return;
6696 }
6697
6698 printf_filtered ("\n");
6699 /* These ugly casts brought to you by the native VAX compiler. */
6700 for (oursig = GDB_SIGNAL_FIRST;
6701 (int) oursig < (int) GDB_SIGNAL_LAST;
6702 oursig = (enum gdb_signal) ((int) oursig + 1))
6703 {
6704 QUIT;
6705
6706 if (oursig != GDB_SIGNAL_UNKNOWN
6707 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
6708 sig_print_info (oursig);
6709 }
6710
6711 printf_filtered (_("\nUse the \"handle\" command "
6712 "to change these tables.\n"));
6713 }
6714
6715 /* Check if it makes sense to read $_siginfo from the current thread
6716 at this point. If not, throw an error. */
6717
6718 static void
6719 validate_siginfo_access (void)
6720 {
6721 /* No current inferior, no siginfo. */
6722 if (ptid_equal (inferior_ptid, null_ptid))
6723 error (_("No thread selected."));
6724
6725 /* Don't try to read from a dead thread. */
6726 if (is_exited (inferior_ptid))
6727 error (_("The current thread has terminated"));
6728
6729 /* ... or from a spinning thread. */
6730 if (is_running (inferior_ptid))
6731 error (_("Selected thread is running."));
6732 }
6733
6734 /* The $_siginfo convenience variable is a bit special. We don't know
6735 for sure the type of the value until we actually have a chance to
6736 fetch the data. The type can change depending on gdbarch, so it is
6737 also dependent on which thread you have selected.
6738
6739 1. making $_siginfo be an internalvar that creates a new value on
6740 access.
6741
6742 2. making the value of $_siginfo be an lval_computed value. */
6743
6744 /* This function implements the lval_computed support for reading a
6745 $_siginfo value. */
6746
6747 static void
6748 siginfo_value_read (struct value *v)
6749 {
6750 LONGEST transferred;
6751
6752 validate_siginfo_access ();
6753
6754 transferred =
6755 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6756 NULL,
6757 value_contents_all_raw (v),
6758 value_offset (v),
6759 TYPE_LENGTH (value_type (v)));
6760
6761 if (transferred != TYPE_LENGTH (value_type (v)))
6762 error (_("Unable to read siginfo"));
6763 }
6764
6765 /* This function implements the lval_computed support for writing a
6766 $_siginfo value. */
6767
6768 static void
6769 siginfo_value_write (struct value *v, struct value *fromval)
6770 {
6771 LONGEST transferred;
6772
6773 validate_siginfo_access ();
6774
6775 transferred = target_write (&current_target,
6776 TARGET_OBJECT_SIGNAL_INFO,
6777 NULL,
6778 value_contents_all_raw (fromval),
6779 value_offset (v),
6780 TYPE_LENGTH (value_type (fromval)));
6781
6782 if (transferred != TYPE_LENGTH (value_type (fromval)))
6783 error (_("Unable to write siginfo"));
6784 }
6785
6786 static const struct lval_funcs siginfo_value_funcs =
6787 {
6788 siginfo_value_read,
6789 siginfo_value_write
6790 };
6791
6792 /* Return a new value with the correct type for the siginfo object of
6793 the current thread using architecture GDBARCH. Return a void value
6794 if there's no object available. */
6795
6796 static struct value *
6797 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
6798 void *ignore)
6799 {
6800 if (target_has_stack
6801 && !ptid_equal (inferior_ptid, null_ptid)
6802 && gdbarch_get_siginfo_type_p (gdbarch))
6803 {
6804 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6805
6806 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6807 }
6808
6809 return allocate_value (builtin_type (gdbarch)->builtin_void);
6810 }
6811
6812 \f
6813 /* infcall_suspend_state contains state about the program itself like its
6814 registers and any signal it received when it last stopped.
6815 This state must be restored regardless of how the inferior function call
6816 ends (either successfully, or after it hits a breakpoint or signal)
6817 if the program is to properly continue where it left off. */
6818
6819 struct infcall_suspend_state
6820 {
6821 struct thread_suspend_state thread_suspend;
6822 #if 0 /* Currently unused and empty structures are not valid C. */
6823 struct inferior_suspend_state inferior_suspend;
6824 #endif
6825
6826 /* Other fields: */
6827 CORE_ADDR stop_pc;
6828 struct regcache *registers;
6829
6830 /* Format of SIGINFO_DATA or NULL if it is not present. */
6831 struct gdbarch *siginfo_gdbarch;
6832
6833 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6834 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6835 content would be invalid. */
6836 gdb_byte *siginfo_data;
6837 };
6838
6839 struct infcall_suspend_state *
6840 save_infcall_suspend_state (void)
6841 {
6842 struct infcall_suspend_state *inf_state;
6843 struct thread_info *tp = inferior_thread ();
6844 #if 0
6845 struct inferior *inf = current_inferior ();
6846 #endif
6847 struct regcache *regcache = get_current_regcache ();
6848 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6849 gdb_byte *siginfo_data = NULL;
6850
6851 if (gdbarch_get_siginfo_type_p (gdbarch))
6852 {
6853 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6854 size_t len = TYPE_LENGTH (type);
6855 struct cleanup *back_to;
6856
6857 siginfo_data = xmalloc (len);
6858 back_to = make_cleanup (xfree, siginfo_data);
6859
6860 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6861 siginfo_data, 0, len) == len)
6862 discard_cleanups (back_to);
6863 else
6864 {
6865 /* Errors ignored. */
6866 do_cleanups (back_to);
6867 siginfo_data = NULL;
6868 }
6869 }
6870
6871 inf_state = XCNEW (struct infcall_suspend_state);
6872
6873 if (siginfo_data)
6874 {
6875 inf_state->siginfo_gdbarch = gdbarch;
6876 inf_state->siginfo_data = siginfo_data;
6877 }
6878
6879 inf_state->thread_suspend = tp->suspend;
6880 #if 0 /* Currently unused and empty structures are not valid C. */
6881 inf_state->inferior_suspend = inf->suspend;
6882 #endif
6883
6884 /* run_inferior_call will not use the signal due to its `proceed' call with
6885 GDB_SIGNAL_0 anyway. */
6886 tp->suspend.stop_signal = GDB_SIGNAL_0;
6887
6888 inf_state->stop_pc = stop_pc;
6889
6890 inf_state->registers = regcache_dup (regcache);
6891
6892 return inf_state;
6893 }
6894
6895 /* Restore inferior session state to INF_STATE. */
6896
6897 void
6898 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6899 {
6900 struct thread_info *tp = inferior_thread ();
6901 #if 0
6902 struct inferior *inf = current_inferior ();
6903 #endif
6904 struct regcache *regcache = get_current_regcache ();
6905 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6906
6907 tp->suspend = inf_state->thread_suspend;
6908 #if 0 /* Currently unused and empty structures are not valid C. */
6909 inf->suspend = inf_state->inferior_suspend;
6910 #endif
6911
6912 stop_pc = inf_state->stop_pc;
6913
6914 if (inf_state->siginfo_gdbarch == gdbarch)
6915 {
6916 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6917
6918 /* Errors ignored. */
6919 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6920 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
6921 }
6922
6923 /* The inferior can be gone if the user types "print exit(0)"
6924 (and perhaps other times). */
6925 if (target_has_execution)
6926 /* NB: The register write goes through to the target. */
6927 regcache_cpy (regcache, inf_state->registers);
6928
6929 discard_infcall_suspend_state (inf_state);
6930 }
6931
6932 static void
6933 do_restore_infcall_suspend_state_cleanup (void *state)
6934 {
6935 restore_infcall_suspend_state (state);
6936 }
6937
6938 struct cleanup *
6939 make_cleanup_restore_infcall_suspend_state
6940 (struct infcall_suspend_state *inf_state)
6941 {
6942 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6943 }
6944
6945 void
6946 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6947 {
6948 regcache_xfree (inf_state->registers);
6949 xfree (inf_state->siginfo_data);
6950 xfree (inf_state);
6951 }
6952
6953 struct regcache *
6954 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6955 {
6956 return inf_state->registers;
6957 }
6958
6959 /* infcall_control_state contains state regarding gdb's control of the
6960 inferior itself like stepping control. It also contains session state like
6961 the user's currently selected frame. */
6962
6963 struct infcall_control_state
6964 {
6965 struct thread_control_state thread_control;
6966 struct inferior_control_state inferior_control;
6967
6968 /* Other fields: */
6969 enum stop_stack_kind stop_stack_dummy;
6970 int stopped_by_random_signal;
6971 int stop_after_trap;
6972
6973 /* ID if the selected frame when the inferior function call was made. */
6974 struct frame_id selected_frame_id;
6975 };
6976
6977 /* Save all of the information associated with the inferior<==>gdb
6978 connection. */
6979
6980 struct infcall_control_state *
6981 save_infcall_control_state (void)
6982 {
6983 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
6984 struct thread_info *tp = inferior_thread ();
6985 struct inferior *inf = current_inferior ();
6986
6987 inf_status->thread_control = tp->control;
6988 inf_status->inferior_control = inf->control;
6989
6990 tp->control.step_resume_breakpoint = NULL;
6991 tp->control.exception_resume_breakpoint = NULL;
6992
6993 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
6994 chain. If caller's caller is walking the chain, they'll be happier if we
6995 hand them back the original chain when restore_infcall_control_state is
6996 called. */
6997 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
6998
6999 /* Other fields: */
7000 inf_status->stop_stack_dummy = stop_stack_dummy;
7001 inf_status->stopped_by_random_signal = stopped_by_random_signal;
7002 inf_status->stop_after_trap = stop_after_trap;
7003
7004 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
7005
7006 return inf_status;
7007 }
7008
7009 static int
7010 restore_selected_frame (void *args)
7011 {
7012 struct frame_id *fid = (struct frame_id *) args;
7013 struct frame_info *frame;
7014
7015 frame = frame_find_by_id (*fid);
7016
7017 /* If inf_status->selected_frame_id is NULL, there was no previously
7018 selected frame. */
7019 if (frame == NULL)
7020 {
7021 warning (_("Unable to restore previously selected frame."));
7022 return 0;
7023 }
7024
7025 select_frame (frame);
7026
7027 return (1);
7028 }
7029
7030 /* Restore inferior session state to INF_STATUS. */
7031
7032 void
7033 restore_infcall_control_state (struct infcall_control_state *inf_status)
7034 {
7035 struct thread_info *tp = inferior_thread ();
7036 struct inferior *inf = current_inferior ();
7037
7038 if (tp->control.step_resume_breakpoint)
7039 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7040
7041 if (tp->control.exception_resume_breakpoint)
7042 tp->control.exception_resume_breakpoint->disposition
7043 = disp_del_at_next_stop;
7044
7045 /* Handle the bpstat_copy of the chain. */
7046 bpstat_clear (&tp->control.stop_bpstat);
7047
7048 tp->control = inf_status->thread_control;
7049 inf->control = inf_status->inferior_control;
7050
7051 /* Other fields: */
7052 stop_stack_dummy = inf_status->stop_stack_dummy;
7053 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7054 stop_after_trap = inf_status->stop_after_trap;
7055
7056 if (target_has_stack)
7057 {
7058 /* The point of catch_errors is that if the stack is clobbered,
7059 walking the stack might encounter a garbage pointer and
7060 error() trying to dereference it. */
7061 if (catch_errors
7062 (restore_selected_frame, &inf_status->selected_frame_id,
7063 "Unable to restore previously selected frame:\n",
7064 RETURN_MASK_ERROR) == 0)
7065 /* Error in restoring the selected frame. Select the innermost
7066 frame. */
7067 select_frame (get_current_frame ());
7068 }
7069
7070 xfree (inf_status);
7071 }
7072
7073 static void
7074 do_restore_infcall_control_state_cleanup (void *sts)
7075 {
7076 restore_infcall_control_state (sts);
7077 }
7078
7079 struct cleanup *
7080 make_cleanup_restore_infcall_control_state
7081 (struct infcall_control_state *inf_status)
7082 {
7083 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7084 }
7085
7086 void
7087 discard_infcall_control_state (struct infcall_control_state *inf_status)
7088 {
7089 if (inf_status->thread_control.step_resume_breakpoint)
7090 inf_status->thread_control.step_resume_breakpoint->disposition
7091 = disp_del_at_next_stop;
7092
7093 if (inf_status->thread_control.exception_resume_breakpoint)
7094 inf_status->thread_control.exception_resume_breakpoint->disposition
7095 = disp_del_at_next_stop;
7096
7097 /* See save_infcall_control_state for info on stop_bpstat. */
7098 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7099
7100 xfree (inf_status);
7101 }
7102 \f
7103 /* restore_inferior_ptid() will be used by the cleanup machinery
7104 to restore the inferior_ptid value saved in a call to
7105 save_inferior_ptid(). */
7106
7107 static void
7108 restore_inferior_ptid (void *arg)
7109 {
7110 ptid_t *saved_ptid_ptr = arg;
7111
7112 inferior_ptid = *saved_ptid_ptr;
7113 xfree (arg);
7114 }
7115
7116 /* Save the value of inferior_ptid so that it may be restored by a
7117 later call to do_cleanups(). Returns the struct cleanup pointer
7118 needed for later doing the cleanup. */
7119
7120 struct cleanup *
7121 save_inferior_ptid (void)
7122 {
7123 ptid_t *saved_ptid_ptr;
7124
7125 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7126 *saved_ptid_ptr = inferior_ptid;
7127 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7128 }
7129
7130 /* See inferior.h. */
7131
7132 void
7133 clear_exit_convenience_vars (void)
7134 {
7135 clear_internalvar (lookup_internalvar ("_exitsignal"));
7136 clear_internalvar (lookup_internalvar ("_exitcode"));
7137 }
7138 \f
7139
7140 /* User interface for reverse debugging:
7141 Set exec-direction / show exec-direction commands
7142 (returns error unless target implements to_set_exec_direction method). */
7143
7144 int execution_direction = EXEC_FORWARD;
7145 static const char exec_forward[] = "forward";
7146 static const char exec_reverse[] = "reverse";
7147 static const char *exec_direction = exec_forward;
7148 static const char *const exec_direction_names[] = {
7149 exec_forward,
7150 exec_reverse,
7151 NULL
7152 };
7153
7154 static void
7155 set_exec_direction_func (char *args, int from_tty,
7156 struct cmd_list_element *cmd)
7157 {
7158 if (target_can_execute_reverse)
7159 {
7160 if (!strcmp (exec_direction, exec_forward))
7161 execution_direction = EXEC_FORWARD;
7162 else if (!strcmp (exec_direction, exec_reverse))
7163 execution_direction = EXEC_REVERSE;
7164 }
7165 else
7166 {
7167 exec_direction = exec_forward;
7168 error (_("Target does not support this operation."));
7169 }
7170 }
7171
7172 static void
7173 show_exec_direction_func (struct ui_file *out, int from_tty,
7174 struct cmd_list_element *cmd, const char *value)
7175 {
7176 switch (execution_direction) {
7177 case EXEC_FORWARD:
7178 fprintf_filtered (out, _("Forward.\n"));
7179 break;
7180 case EXEC_REVERSE:
7181 fprintf_filtered (out, _("Reverse.\n"));
7182 break;
7183 default:
7184 internal_error (__FILE__, __LINE__,
7185 _("bogus execution_direction value: %d"),
7186 (int) execution_direction);
7187 }
7188 }
7189
7190 static void
7191 show_schedule_multiple (struct ui_file *file, int from_tty,
7192 struct cmd_list_element *c, const char *value)
7193 {
7194 fprintf_filtered (file, _("Resuming the execution of threads "
7195 "of all processes is %s.\n"), value);
7196 }
7197
7198 /* Implementation of `siginfo' variable. */
7199
7200 static const struct internalvar_funcs siginfo_funcs =
7201 {
7202 siginfo_make_value,
7203 NULL,
7204 NULL
7205 };
7206
7207 void
7208 _initialize_infrun (void)
7209 {
7210 int i;
7211 int numsigs;
7212 struct cmd_list_element *c;
7213
7214 add_info ("signals", signals_info, _("\
7215 What debugger does when program gets various signals.\n\
7216 Specify a signal as argument to print info on that signal only."));
7217 add_info_alias ("handle", "signals", 0);
7218
7219 c = add_com ("handle", class_run, handle_command, _("\
7220 Specify how to handle signals.\n\
7221 Usage: handle SIGNAL [ACTIONS]\n\
7222 Args are signals and actions to apply to those signals.\n\
7223 If no actions are specified, the current settings for the specified signals\n\
7224 will be displayed instead.\n\
7225 \n\
7226 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7227 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7228 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7229 The special arg \"all\" is recognized to mean all signals except those\n\
7230 used by the debugger, typically SIGTRAP and SIGINT.\n\
7231 \n\
7232 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7233 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7234 Stop means reenter debugger if this signal happens (implies print).\n\
7235 Print means print a message if this signal happens.\n\
7236 Pass means let program see this signal; otherwise program doesn't know.\n\
7237 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7238 Pass and Stop may be combined.\n\
7239 \n\
7240 Multiple signals may be specified. Signal numbers and signal names\n\
7241 may be interspersed with actions, with the actions being performed for\n\
7242 all signals cumulatively specified."));
7243 set_cmd_completer (c, handle_completer);
7244
7245 if (xdb_commands)
7246 {
7247 add_com ("lz", class_info, signals_info, _("\
7248 What debugger does when program gets various signals.\n\
7249 Specify a signal as argument to print info on that signal only."));
7250 add_com ("z", class_run, xdb_handle_command, _("\
7251 Specify how to handle a signal.\n\
7252 Args are signals and actions to apply to those signals.\n\
7253 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7254 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7255 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7256 The special arg \"all\" is recognized to mean all signals except those\n\
7257 used by the debugger, typically SIGTRAP and SIGINT.\n\
7258 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7259 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7260 nopass), \"Q\" (noprint)\n\
7261 Stop means reenter debugger if this signal happens (implies print).\n\
7262 Print means print a message if this signal happens.\n\
7263 Pass means let program see this signal; otherwise program doesn't know.\n\
7264 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7265 Pass and Stop may be combined."));
7266 }
7267
7268 if (!dbx_commands)
7269 stop_command = add_cmd ("stop", class_obscure,
7270 not_just_help_class_command, _("\
7271 There is no `stop' command, but you can set a hook on `stop'.\n\
7272 This allows you to set a list of commands to be run each time execution\n\
7273 of the program stops."), &cmdlist);
7274
7275 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7276 Set inferior debugging."), _("\
7277 Show inferior debugging."), _("\
7278 When non-zero, inferior specific debugging is enabled."),
7279 NULL,
7280 show_debug_infrun,
7281 &setdebuglist, &showdebuglist);
7282
7283 add_setshow_boolean_cmd ("displaced", class_maintenance,
7284 &debug_displaced, _("\
7285 Set displaced stepping debugging."), _("\
7286 Show displaced stepping debugging."), _("\
7287 When non-zero, displaced stepping specific debugging is enabled."),
7288 NULL,
7289 show_debug_displaced,
7290 &setdebuglist, &showdebuglist);
7291
7292 add_setshow_boolean_cmd ("non-stop", no_class,
7293 &non_stop_1, _("\
7294 Set whether gdb controls the inferior in non-stop mode."), _("\
7295 Show whether gdb controls the inferior in non-stop mode."), _("\
7296 When debugging a multi-threaded program and this setting is\n\
7297 off (the default, also called all-stop mode), when one thread stops\n\
7298 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7299 all other threads in the program while you interact with the thread of\n\
7300 interest. When you continue or step a thread, you can allow the other\n\
7301 threads to run, or have them remain stopped, but while you inspect any\n\
7302 thread's state, all threads stop.\n\
7303 \n\
7304 In non-stop mode, when one thread stops, other threads can continue\n\
7305 to run freely. You'll be able to step each thread independently,\n\
7306 leave it stopped or free to run as needed."),
7307 set_non_stop,
7308 show_non_stop,
7309 &setlist,
7310 &showlist);
7311
7312 numsigs = (int) GDB_SIGNAL_LAST;
7313 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7314 signal_print = (unsigned char *)
7315 xmalloc (sizeof (signal_print[0]) * numsigs);
7316 signal_program = (unsigned char *)
7317 xmalloc (sizeof (signal_program[0]) * numsigs);
7318 signal_catch = (unsigned char *)
7319 xmalloc (sizeof (signal_catch[0]) * numsigs);
7320 signal_pass = (unsigned char *)
7321 xmalloc (sizeof (signal_pass[0]) * numsigs);
7322 for (i = 0; i < numsigs; i++)
7323 {
7324 signal_stop[i] = 1;
7325 signal_print[i] = 1;
7326 signal_program[i] = 1;
7327 signal_catch[i] = 0;
7328 }
7329
7330 /* Signals caused by debugger's own actions
7331 should not be given to the program afterwards. */
7332 signal_program[GDB_SIGNAL_TRAP] = 0;
7333 signal_program[GDB_SIGNAL_INT] = 0;
7334
7335 /* Signals that are not errors should not normally enter the debugger. */
7336 signal_stop[GDB_SIGNAL_ALRM] = 0;
7337 signal_print[GDB_SIGNAL_ALRM] = 0;
7338 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7339 signal_print[GDB_SIGNAL_VTALRM] = 0;
7340 signal_stop[GDB_SIGNAL_PROF] = 0;
7341 signal_print[GDB_SIGNAL_PROF] = 0;
7342 signal_stop[GDB_SIGNAL_CHLD] = 0;
7343 signal_print[GDB_SIGNAL_CHLD] = 0;
7344 signal_stop[GDB_SIGNAL_IO] = 0;
7345 signal_print[GDB_SIGNAL_IO] = 0;
7346 signal_stop[GDB_SIGNAL_POLL] = 0;
7347 signal_print[GDB_SIGNAL_POLL] = 0;
7348 signal_stop[GDB_SIGNAL_URG] = 0;
7349 signal_print[GDB_SIGNAL_URG] = 0;
7350 signal_stop[GDB_SIGNAL_WINCH] = 0;
7351 signal_print[GDB_SIGNAL_WINCH] = 0;
7352 signal_stop[GDB_SIGNAL_PRIO] = 0;
7353 signal_print[GDB_SIGNAL_PRIO] = 0;
7354
7355 /* These signals are used internally by user-level thread
7356 implementations. (See signal(5) on Solaris.) Like the above
7357 signals, a healthy program receives and handles them as part of
7358 its normal operation. */
7359 signal_stop[GDB_SIGNAL_LWP] = 0;
7360 signal_print[GDB_SIGNAL_LWP] = 0;
7361 signal_stop[GDB_SIGNAL_WAITING] = 0;
7362 signal_print[GDB_SIGNAL_WAITING] = 0;
7363 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7364 signal_print[GDB_SIGNAL_CANCEL] = 0;
7365
7366 /* Update cached state. */
7367 signal_cache_update (-1);
7368
7369 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7370 &stop_on_solib_events, _("\
7371 Set stopping for shared library events."), _("\
7372 Show stopping for shared library events."), _("\
7373 If nonzero, gdb will give control to the user when the dynamic linker\n\
7374 notifies gdb of shared library events. The most common event of interest\n\
7375 to the user would be loading/unloading of a new library."),
7376 set_stop_on_solib_events,
7377 show_stop_on_solib_events,
7378 &setlist, &showlist);
7379
7380 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7381 follow_fork_mode_kind_names,
7382 &follow_fork_mode_string, _("\
7383 Set debugger response to a program call of fork or vfork."), _("\
7384 Show debugger response to a program call of fork or vfork."), _("\
7385 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7386 parent - the original process is debugged after a fork\n\
7387 child - the new process is debugged after a fork\n\
7388 The unfollowed process will continue to run.\n\
7389 By default, the debugger will follow the parent process."),
7390 NULL,
7391 show_follow_fork_mode_string,
7392 &setlist, &showlist);
7393
7394 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7395 follow_exec_mode_names,
7396 &follow_exec_mode_string, _("\
7397 Set debugger response to a program call of exec."), _("\
7398 Show debugger response to a program call of exec."), _("\
7399 An exec call replaces the program image of a process.\n\
7400 \n\
7401 follow-exec-mode can be:\n\
7402 \n\
7403 new - the debugger creates a new inferior and rebinds the process\n\
7404 to this new inferior. The program the process was running before\n\
7405 the exec call can be restarted afterwards by restarting the original\n\
7406 inferior.\n\
7407 \n\
7408 same - the debugger keeps the process bound to the same inferior.\n\
7409 The new executable image replaces the previous executable loaded in\n\
7410 the inferior. Restarting the inferior after the exec call restarts\n\
7411 the executable the process was running after the exec call.\n\
7412 \n\
7413 By default, the debugger will use the same inferior."),
7414 NULL,
7415 show_follow_exec_mode_string,
7416 &setlist, &showlist);
7417
7418 add_setshow_enum_cmd ("scheduler-locking", class_run,
7419 scheduler_enums, &scheduler_mode, _("\
7420 Set mode for locking scheduler during execution."), _("\
7421 Show mode for locking scheduler during execution."), _("\
7422 off == no locking (threads may preempt at any time)\n\
7423 on == full locking (no thread except the current thread may run)\n\
7424 step == scheduler locked during every single-step operation.\n\
7425 In this mode, no other thread may run during a step command.\n\
7426 Other threads may run while stepping over a function call ('next')."),
7427 set_schedlock_func, /* traps on target vector */
7428 show_scheduler_mode,
7429 &setlist, &showlist);
7430
7431 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7432 Set mode for resuming threads of all processes."), _("\
7433 Show mode for resuming threads of all processes."), _("\
7434 When on, execution commands (such as 'continue' or 'next') resume all\n\
7435 threads of all processes. When off (which is the default), execution\n\
7436 commands only resume the threads of the current process. The set of\n\
7437 threads that are resumed is further refined by the scheduler-locking\n\
7438 mode (see help set scheduler-locking)."),
7439 NULL,
7440 show_schedule_multiple,
7441 &setlist, &showlist);
7442
7443 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7444 Set mode of the step operation."), _("\
7445 Show mode of the step operation."), _("\
7446 When set, doing a step over a function without debug line information\n\
7447 will stop at the first instruction of that function. Otherwise, the\n\
7448 function is skipped and the step command stops at a different source line."),
7449 NULL,
7450 show_step_stop_if_no_debug,
7451 &setlist, &showlist);
7452
7453 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7454 &can_use_displaced_stepping, _("\
7455 Set debugger's willingness to use displaced stepping."), _("\
7456 Show debugger's willingness to use displaced stepping."), _("\
7457 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7458 supported by the target architecture. If off, gdb will not use displaced\n\
7459 stepping to step over breakpoints, even if such is supported by the target\n\
7460 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7461 if the target architecture supports it and non-stop mode is active, but will not\n\
7462 use it in all-stop mode (see help set non-stop)."),
7463 NULL,
7464 show_can_use_displaced_stepping,
7465 &setlist, &showlist);
7466
7467 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7468 &exec_direction, _("Set direction of execution.\n\
7469 Options are 'forward' or 'reverse'."),
7470 _("Show direction of execution (forward/reverse)."),
7471 _("Tells gdb whether to execute forward or backward."),
7472 set_exec_direction_func, show_exec_direction_func,
7473 &setlist, &showlist);
7474
7475 /* Set/show detach-on-fork: user-settable mode. */
7476
7477 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7478 Set whether gdb will detach the child of a fork."), _("\
7479 Show whether gdb will detach the child of a fork."), _("\
7480 Tells gdb whether to detach the child of a fork."),
7481 NULL, NULL, &setlist, &showlist);
7482
7483 /* Set/show disable address space randomization mode. */
7484
7485 add_setshow_boolean_cmd ("disable-randomization", class_support,
7486 &disable_randomization, _("\
7487 Set disabling of debuggee's virtual address space randomization."), _("\
7488 Show disabling of debuggee's virtual address space randomization."), _("\
7489 When this mode is on (which is the default), randomization of the virtual\n\
7490 address space is disabled. Standalone programs run with the randomization\n\
7491 enabled by default on some platforms."),
7492 &set_disable_randomization,
7493 &show_disable_randomization,
7494 &setlist, &showlist);
7495
7496 /* ptid initializations */
7497 inferior_ptid = null_ptid;
7498 target_last_wait_ptid = minus_one_ptid;
7499
7500 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7501 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7502 observer_attach_thread_exit (infrun_thread_thread_exit);
7503 observer_attach_inferior_exit (infrun_inferior_exit);
7504
7505 /* Explicitly create without lookup, since that tries to create a
7506 value with a void typed value, and when we get here, gdbarch
7507 isn't initialized yet. At this point, we're quite sure there
7508 isn't another convenience variable of the same name. */
7509 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7510
7511 add_setshow_boolean_cmd ("observer", no_class,
7512 &observer_mode_1, _("\
7513 Set whether gdb controls the inferior in observer mode."), _("\
7514 Show whether gdb controls the inferior in observer mode."), _("\
7515 In observer mode, GDB can get data from the inferior, but not\n\
7516 affect its execution. Registers and memory may not be changed,\n\
7517 breakpoints may not be set, and the program cannot be interrupted\n\
7518 or signalled."),
7519 set_observer_mode,
7520 show_observer_mode,
7521 &setlist,
7522 &showlist);
7523 }