]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/infrun.c
Make display_gdb_prompt CLI-only.
[thirdparty/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2014 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <string.h>
24 #include <ctype.h>
25 #include "symtab.h"
26 #include "frame.h"
27 #include "inferior.h"
28 #include "exceptions.h"
29 #include "breakpoint.h"
30 #include "gdb_wait.h"
31 #include "gdbcore.h"
32 #include "gdbcmd.h"
33 #include "cli/cli-script.h"
34 #include "target.h"
35 #include "gdbthread.h"
36 #include "annotate.h"
37 #include "symfile.h"
38 #include "top.h"
39 #include <signal.h>
40 #include "inf-loop.h"
41 #include "regcache.h"
42 #include "value.h"
43 #include "observer.h"
44 #include "language.h"
45 #include "solib.h"
46 #include "main.h"
47 #include "dictionary.h"
48 #include "block.h"
49 #include "gdb_assert.h"
50 #include "mi/mi-common.h"
51 #include "event-top.h"
52 #include "record.h"
53 #include "record-full.h"
54 #include "inline-frame.h"
55 #include "jit.h"
56 #include "tracepoint.h"
57 #include "continuations.h"
58 #include "interps.h"
59 #include "skip.h"
60 #include "probe.h"
61 #include "objfiles.h"
62 #include "completer.h"
63 #include "target-descriptions.h"
64 #include "target-dcache.h"
65
66 /* Prototypes for local functions */
67
68 static void signals_info (char *, int);
69
70 static void handle_command (char *, int);
71
72 static void sig_print_info (enum gdb_signal);
73
74 static void sig_print_header (void);
75
76 static void resume_cleanups (void *);
77
78 static int hook_stop_stub (void *);
79
80 static int restore_selected_frame (void *);
81
82 static int follow_fork (void);
83
84 static void set_schedlock_func (char *args, int from_tty,
85 struct cmd_list_element *c);
86
87 static int currently_stepping (struct thread_info *tp);
88
89 static void xdb_handle_command (char *args, int from_tty);
90
91 static void end_stepping_range (void);
92
93 void _initialize_infrun (void);
94
95 void nullify_last_target_wait_ptid (void);
96
97 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
98
99 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
100
101 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
102
103 /* When set, stop the 'step' command if we enter a function which has
104 no line number information. The normal behavior is that we step
105 over such function. */
106 int step_stop_if_no_debug = 0;
107 static void
108 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
109 struct cmd_list_element *c, const char *value)
110 {
111 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
112 }
113
114 /* In asynchronous mode, but simulating synchronous execution. */
115
116 int sync_execution = 0;
117
118 /* proceed and normal_stop use this to notify the user when the
119 inferior stopped in a different thread than it had been running
120 in. */
121
122 static ptid_t previous_inferior_ptid;
123
124 /* If set (default for legacy reasons), when following a fork, GDB
125 will detach from one of the fork branches, child or parent.
126 Exactly which branch is detached depends on 'set follow-fork-mode'
127 setting. */
128
129 static int detach_fork = 1;
130
131 int debug_displaced = 0;
132 static void
133 show_debug_displaced (struct ui_file *file, int from_tty,
134 struct cmd_list_element *c, const char *value)
135 {
136 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
137 }
138
139 unsigned int debug_infrun = 0;
140 static void
141 show_debug_infrun (struct ui_file *file, int from_tty,
142 struct cmd_list_element *c, const char *value)
143 {
144 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
145 }
146
147
148 /* Support for disabling address space randomization. */
149
150 int disable_randomization = 1;
151
152 static void
153 show_disable_randomization (struct ui_file *file, int from_tty,
154 struct cmd_list_element *c, const char *value)
155 {
156 if (target_supports_disable_randomization ())
157 fprintf_filtered (file,
158 _("Disabling randomization of debuggee's "
159 "virtual address space is %s.\n"),
160 value);
161 else
162 fputs_filtered (_("Disabling randomization of debuggee's "
163 "virtual address space is unsupported on\n"
164 "this platform.\n"), file);
165 }
166
167 static void
168 set_disable_randomization (char *args, int from_tty,
169 struct cmd_list_element *c)
170 {
171 if (!target_supports_disable_randomization ())
172 error (_("Disabling randomization of debuggee's "
173 "virtual address space is unsupported on\n"
174 "this platform."));
175 }
176
177 /* User interface for non-stop mode. */
178
179 int non_stop = 0;
180 static int non_stop_1 = 0;
181
182 static void
183 set_non_stop (char *args, int from_tty,
184 struct cmd_list_element *c)
185 {
186 if (target_has_execution)
187 {
188 non_stop_1 = non_stop;
189 error (_("Cannot change this setting while the inferior is running."));
190 }
191
192 non_stop = non_stop_1;
193 }
194
195 static void
196 show_non_stop (struct ui_file *file, int from_tty,
197 struct cmd_list_element *c, const char *value)
198 {
199 fprintf_filtered (file,
200 _("Controlling the inferior in non-stop mode is %s.\n"),
201 value);
202 }
203
204 /* "Observer mode" is somewhat like a more extreme version of
205 non-stop, in which all GDB operations that might affect the
206 target's execution have been disabled. */
207
208 int observer_mode = 0;
209 static int observer_mode_1 = 0;
210
211 static void
212 set_observer_mode (char *args, int from_tty,
213 struct cmd_list_element *c)
214 {
215 if (target_has_execution)
216 {
217 observer_mode_1 = observer_mode;
218 error (_("Cannot change this setting while the inferior is running."));
219 }
220
221 observer_mode = observer_mode_1;
222
223 may_write_registers = !observer_mode;
224 may_write_memory = !observer_mode;
225 may_insert_breakpoints = !observer_mode;
226 may_insert_tracepoints = !observer_mode;
227 /* We can insert fast tracepoints in or out of observer mode,
228 but enable them if we're going into this mode. */
229 if (observer_mode)
230 may_insert_fast_tracepoints = 1;
231 may_stop = !observer_mode;
232 update_target_permissions ();
233
234 /* Going *into* observer mode we must force non-stop, then
235 going out we leave it that way. */
236 if (observer_mode)
237 {
238 target_async_permitted = 1;
239 pagination_enabled = 0;
240 non_stop = non_stop_1 = 1;
241 }
242
243 if (from_tty)
244 printf_filtered (_("Observer mode is now %s.\n"),
245 (observer_mode ? "on" : "off"));
246 }
247
248 static void
249 show_observer_mode (struct ui_file *file, int from_tty,
250 struct cmd_list_element *c, const char *value)
251 {
252 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
253 }
254
255 /* This updates the value of observer mode based on changes in
256 permissions. Note that we are deliberately ignoring the values of
257 may-write-registers and may-write-memory, since the user may have
258 reason to enable these during a session, for instance to turn on a
259 debugging-related global. */
260
261 void
262 update_observer_mode (void)
263 {
264 int newval;
265
266 newval = (!may_insert_breakpoints
267 && !may_insert_tracepoints
268 && may_insert_fast_tracepoints
269 && !may_stop
270 && non_stop);
271
272 /* Let the user know if things change. */
273 if (newval != observer_mode)
274 printf_filtered (_("Observer mode is now %s.\n"),
275 (newval ? "on" : "off"));
276
277 observer_mode = observer_mode_1 = newval;
278 }
279
280 /* Tables of how to react to signals; the user sets them. */
281
282 static unsigned char *signal_stop;
283 static unsigned char *signal_print;
284 static unsigned char *signal_program;
285
286 /* Table of signals that are registered with "catch signal". A
287 non-zero entry indicates that the signal is caught by some "catch
288 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
289 signals. */
290 static unsigned char *signal_catch;
291
292 /* Table of signals that the target may silently handle.
293 This is automatically determined from the flags above,
294 and simply cached here. */
295 static unsigned char *signal_pass;
296
297 #define SET_SIGS(nsigs,sigs,flags) \
298 do { \
299 int signum = (nsigs); \
300 while (signum-- > 0) \
301 if ((sigs)[signum]) \
302 (flags)[signum] = 1; \
303 } while (0)
304
305 #define UNSET_SIGS(nsigs,sigs,flags) \
306 do { \
307 int signum = (nsigs); \
308 while (signum-- > 0) \
309 if ((sigs)[signum]) \
310 (flags)[signum] = 0; \
311 } while (0)
312
313 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
314 this function is to avoid exporting `signal_program'. */
315
316 void
317 update_signals_program_target (void)
318 {
319 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
320 }
321
322 /* Value to pass to target_resume() to cause all threads to resume. */
323
324 #define RESUME_ALL minus_one_ptid
325
326 /* Command list pointer for the "stop" placeholder. */
327
328 static struct cmd_list_element *stop_command;
329
330 /* Function inferior was in as of last step command. */
331
332 static struct symbol *step_start_function;
333
334 /* Nonzero if we want to give control to the user when we're notified
335 of shared library events by the dynamic linker. */
336 int stop_on_solib_events;
337
338 /* Enable or disable optional shared library event breakpoints
339 as appropriate when the above flag is changed. */
340
341 static void
342 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
343 {
344 update_solib_breakpoints ();
345 }
346
347 static void
348 show_stop_on_solib_events (struct ui_file *file, int from_tty,
349 struct cmd_list_element *c, const char *value)
350 {
351 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
352 value);
353 }
354
355 /* Nonzero means expecting a trace trap
356 and should stop the inferior and return silently when it happens. */
357
358 int stop_after_trap;
359
360 /* Save register contents here when executing a "finish" command or are
361 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
362 Thus this contains the return value from the called function (assuming
363 values are returned in a register). */
364
365 struct regcache *stop_registers;
366
367 /* Nonzero after stop if current stack frame should be printed. */
368
369 static int stop_print_frame;
370
371 /* This is a cached copy of the pid/waitstatus of the last event
372 returned by target_wait()/deprecated_target_wait_hook(). This
373 information is returned by get_last_target_status(). */
374 static ptid_t target_last_wait_ptid;
375 static struct target_waitstatus target_last_waitstatus;
376
377 static void context_switch (ptid_t ptid);
378
379 void init_thread_stepping_state (struct thread_info *tss);
380
381 static void init_infwait_state (void);
382
383 static const char follow_fork_mode_child[] = "child";
384 static const char follow_fork_mode_parent[] = "parent";
385
386 static const char *const follow_fork_mode_kind_names[] = {
387 follow_fork_mode_child,
388 follow_fork_mode_parent,
389 NULL
390 };
391
392 static const char *follow_fork_mode_string = follow_fork_mode_parent;
393 static void
394 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
395 struct cmd_list_element *c, const char *value)
396 {
397 fprintf_filtered (file,
398 _("Debugger response to a program "
399 "call of fork or vfork is \"%s\".\n"),
400 value);
401 }
402 \f
403
404 /* Tell the target to follow the fork we're stopped at. Returns true
405 if the inferior should be resumed; false, if the target for some
406 reason decided it's best not to resume. */
407
408 static int
409 follow_fork (void)
410 {
411 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
412 int should_resume = 1;
413 struct thread_info *tp;
414
415 /* Copy user stepping state to the new inferior thread. FIXME: the
416 followed fork child thread should have a copy of most of the
417 parent thread structure's run control related fields, not just these.
418 Initialized to avoid "may be used uninitialized" warnings from gcc. */
419 struct breakpoint *step_resume_breakpoint = NULL;
420 struct breakpoint *exception_resume_breakpoint = NULL;
421 CORE_ADDR step_range_start = 0;
422 CORE_ADDR step_range_end = 0;
423 struct frame_id step_frame_id = { 0 };
424 struct interp *command_interp = NULL;
425
426 if (!non_stop)
427 {
428 ptid_t wait_ptid;
429 struct target_waitstatus wait_status;
430
431 /* Get the last target status returned by target_wait(). */
432 get_last_target_status (&wait_ptid, &wait_status);
433
434 /* If not stopped at a fork event, then there's nothing else to
435 do. */
436 if (wait_status.kind != TARGET_WAITKIND_FORKED
437 && wait_status.kind != TARGET_WAITKIND_VFORKED)
438 return 1;
439
440 /* Check if we switched over from WAIT_PTID, since the event was
441 reported. */
442 if (!ptid_equal (wait_ptid, minus_one_ptid)
443 && !ptid_equal (inferior_ptid, wait_ptid))
444 {
445 /* We did. Switch back to WAIT_PTID thread, to tell the
446 target to follow it (in either direction). We'll
447 afterwards refuse to resume, and inform the user what
448 happened. */
449 switch_to_thread (wait_ptid);
450 should_resume = 0;
451 }
452 }
453
454 tp = inferior_thread ();
455
456 /* If there were any forks/vforks that were caught and are now to be
457 followed, then do so now. */
458 switch (tp->pending_follow.kind)
459 {
460 case TARGET_WAITKIND_FORKED:
461 case TARGET_WAITKIND_VFORKED:
462 {
463 ptid_t parent, child;
464
465 /* If the user did a next/step, etc, over a fork call,
466 preserve the stepping state in the fork child. */
467 if (follow_child && should_resume)
468 {
469 step_resume_breakpoint = clone_momentary_breakpoint
470 (tp->control.step_resume_breakpoint);
471 step_range_start = tp->control.step_range_start;
472 step_range_end = tp->control.step_range_end;
473 step_frame_id = tp->control.step_frame_id;
474 exception_resume_breakpoint
475 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
476 command_interp = tp->control.command_interp;
477
478 /* For now, delete the parent's sr breakpoint, otherwise,
479 parent/child sr breakpoints are considered duplicates,
480 and the child version will not be installed. Remove
481 this when the breakpoints module becomes aware of
482 inferiors and address spaces. */
483 delete_step_resume_breakpoint (tp);
484 tp->control.step_range_start = 0;
485 tp->control.step_range_end = 0;
486 tp->control.step_frame_id = null_frame_id;
487 delete_exception_resume_breakpoint (tp);
488 tp->control.command_interp = NULL;
489 }
490
491 parent = inferior_ptid;
492 child = tp->pending_follow.value.related_pid;
493
494 /* Tell the target to do whatever is necessary to follow
495 either parent or child. */
496 if (target_follow_fork (follow_child, detach_fork))
497 {
498 /* Target refused to follow, or there's some other reason
499 we shouldn't resume. */
500 should_resume = 0;
501 }
502 else
503 {
504 /* This pending follow fork event is now handled, one way
505 or another. The previous selected thread may be gone
506 from the lists by now, but if it is still around, need
507 to clear the pending follow request. */
508 tp = find_thread_ptid (parent);
509 if (tp)
510 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
511
512 /* This makes sure we don't try to apply the "Switched
513 over from WAIT_PID" logic above. */
514 nullify_last_target_wait_ptid ();
515
516 /* If we followed the child, switch to it... */
517 if (follow_child)
518 {
519 switch_to_thread (child);
520
521 /* ... and preserve the stepping state, in case the
522 user was stepping over the fork call. */
523 if (should_resume)
524 {
525 tp = inferior_thread ();
526 tp->control.step_resume_breakpoint
527 = step_resume_breakpoint;
528 tp->control.step_range_start = step_range_start;
529 tp->control.step_range_end = step_range_end;
530 tp->control.step_frame_id = step_frame_id;
531 tp->control.exception_resume_breakpoint
532 = exception_resume_breakpoint;
533 tp->control.command_interp = command_interp;
534 }
535 else
536 {
537 /* If we get here, it was because we're trying to
538 resume from a fork catchpoint, but, the user
539 has switched threads away from the thread that
540 forked. In that case, the resume command
541 issued is most likely not applicable to the
542 child, so just warn, and refuse to resume. */
543 warning (_("Not resuming: switched threads "
544 "before following fork child.\n"));
545 }
546
547 /* Reset breakpoints in the child as appropriate. */
548 follow_inferior_reset_breakpoints ();
549 }
550 else
551 switch_to_thread (parent);
552 }
553 }
554 break;
555 case TARGET_WAITKIND_SPURIOUS:
556 /* Nothing to follow. */
557 break;
558 default:
559 internal_error (__FILE__, __LINE__,
560 "Unexpected pending_follow.kind %d\n",
561 tp->pending_follow.kind);
562 break;
563 }
564
565 return should_resume;
566 }
567
568 void
569 follow_inferior_reset_breakpoints (void)
570 {
571 struct thread_info *tp = inferior_thread ();
572
573 /* Was there a step_resume breakpoint? (There was if the user
574 did a "next" at the fork() call.) If so, explicitly reset its
575 thread number.
576
577 step_resumes are a form of bp that are made to be per-thread.
578 Since we created the step_resume bp when the parent process
579 was being debugged, and now are switching to the child process,
580 from the breakpoint package's viewpoint, that's a switch of
581 "threads". We must update the bp's notion of which thread
582 it is for, or it'll be ignored when it triggers. */
583
584 if (tp->control.step_resume_breakpoint)
585 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
586
587 if (tp->control.exception_resume_breakpoint)
588 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
589
590 /* Reinsert all breakpoints in the child. The user may have set
591 breakpoints after catching the fork, in which case those
592 were never set in the child, but only in the parent. This makes
593 sure the inserted breakpoints match the breakpoint list. */
594
595 breakpoint_re_set ();
596 insert_breakpoints ();
597 }
598
599 /* The child has exited or execed: resume threads of the parent the
600 user wanted to be executing. */
601
602 static int
603 proceed_after_vfork_done (struct thread_info *thread,
604 void *arg)
605 {
606 int pid = * (int *) arg;
607
608 if (ptid_get_pid (thread->ptid) == pid
609 && is_running (thread->ptid)
610 && !is_executing (thread->ptid)
611 && !thread->stop_requested
612 && thread->suspend.stop_signal == GDB_SIGNAL_0)
613 {
614 if (debug_infrun)
615 fprintf_unfiltered (gdb_stdlog,
616 "infrun: resuming vfork parent thread %s\n",
617 target_pid_to_str (thread->ptid));
618
619 switch_to_thread (thread->ptid);
620 clear_proceed_status ();
621 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT, 0);
622 }
623
624 return 0;
625 }
626
627 /* Called whenever we notice an exec or exit event, to handle
628 detaching or resuming a vfork parent. */
629
630 static void
631 handle_vfork_child_exec_or_exit (int exec)
632 {
633 struct inferior *inf = current_inferior ();
634
635 if (inf->vfork_parent)
636 {
637 int resume_parent = -1;
638
639 /* This exec or exit marks the end of the shared memory region
640 between the parent and the child. If the user wanted to
641 detach from the parent, now is the time. */
642
643 if (inf->vfork_parent->pending_detach)
644 {
645 struct thread_info *tp;
646 struct cleanup *old_chain;
647 struct program_space *pspace;
648 struct address_space *aspace;
649
650 /* follow-fork child, detach-on-fork on. */
651
652 inf->vfork_parent->pending_detach = 0;
653
654 if (!exec)
655 {
656 /* If we're handling a child exit, then inferior_ptid
657 points at the inferior's pid, not to a thread. */
658 old_chain = save_inferior_ptid ();
659 save_current_program_space ();
660 save_current_inferior ();
661 }
662 else
663 old_chain = save_current_space_and_thread ();
664
665 /* We're letting loose of the parent. */
666 tp = any_live_thread_of_process (inf->vfork_parent->pid);
667 switch_to_thread (tp->ptid);
668
669 /* We're about to detach from the parent, which implicitly
670 removes breakpoints from its address space. There's a
671 catch here: we want to reuse the spaces for the child,
672 but, parent/child are still sharing the pspace at this
673 point, although the exec in reality makes the kernel give
674 the child a fresh set of new pages. The problem here is
675 that the breakpoints module being unaware of this, would
676 likely chose the child process to write to the parent
677 address space. Swapping the child temporarily away from
678 the spaces has the desired effect. Yes, this is "sort
679 of" a hack. */
680
681 pspace = inf->pspace;
682 aspace = inf->aspace;
683 inf->aspace = NULL;
684 inf->pspace = NULL;
685
686 if (debug_infrun || info_verbose)
687 {
688 target_terminal_ours ();
689
690 if (exec)
691 fprintf_filtered (gdb_stdlog,
692 "Detaching vfork parent process "
693 "%d after child exec.\n",
694 inf->vfork_parent->pid);
695 else
696 fprintf_filtered (gdb_stdlog,
697 "Detaching vfork parent process "
698 "%d after child exit.\n",
699 inf->vfork_parent->pid);
700 }
701
702 target_detach (NULL, 0);
703
704 /* Put it back. */
705 inf->pspace = pspace;
706 inf->aspace = aspace;
707
708 do_cleanups (old_chain);
709 }
710 else if (exec)
711 {
712 /* We're staying attached to the parent, so, really give the
713 child a new address space. */
714 inf->pspace = add_program_space (maybe_new_address_space ());
715 inf->aspace = inf->pspace->aspace;
716 inf->removable = 1;
717 set_current_program_space (inf->pspace);
718
719 resume_parent = inf->vfork_parent->pid;
720
721 /* Break the bonds. */
722 inf->vfork_parent->vfork_child = NULL;
723 }
724 else
725 {
726 struct cleanup *old_chain;
727 struct program_space *pspace;
728
729 /* If this is a vfork child exiting, then the pspace and
730 aspaces were shared with the parent. Since we're
731 reporting the process exit, we'll be mourning all that is
732 found in the address space, and switching to null_ptid,
733 preparing to start a new inferior. But, since we don't
734 want to clobber the parent's address/program spaces, we
735 go ahead and create a new one for this exiting
736 inferior. */
737
738 /* Switch to null_ptid, so that clone_program_space doesn't want
739 to read the selected frame of a dead process. */
740 old_chain = save_inferior_ptid ();
741 inferior_ptid = null_ptid;
742
743 /* This inferior is dead, so avoid giving the breakpoints
744 module the option to write through to it (cloning a
745 program space resets breakpoints). */
746 inf->aspace = NULL;
747 inf->pspace = NULL;
748 pspace = add_program_space (maybe_new_address_space ());
749 set_current_program_space (pspace);
750 inf->removable = 1;
751 inf->symfile_flags = SYMFILE_NO_READ;
752 clone_program_space (pspace, inf->vfork_parent->pspace);
753 inf->pspace = pspace;
754 inf->aspace = pspace->aspace;
755
756 /* Put back inferior_ptid. We'll continue mourning this
757 inferior. */
758 do_cleanups (old_chain);
759
760 resume_parent = inf->vfork_parent->pid;
761 /* Break the bonds. */
762 inf->vfork_parent->vfork_child = NULL;
763 }
764
765 inf->vfork_parent = NULL;
766
767 gdb_assert (current_program_space == inf->pspace);
768
769 if (non_stop && resume_parent != -1)
770 {
771 /* If the user wanted the parent to be running, let it go
772 free now. */
773 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
774
775 if (debug_infrun)
776 fprintf_unfiltered (gdb_stdlog,
777 "infrun: resuming vfork parent process %d\n",
778 resume_parent);
779
780 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
781
782 do_cleanups (old_chain);
783 }
784 }
785 }
786
787 /* Enum strings for "set|show follow-exec-mode". */
788
789 static const char follow_exec_mode_new[] = "new";
790 static const char follow_exec_mode_same[] = "same";
791 static const char *const follow_exec_mode_names[] =
792 {
793 follow_exec_mode_new,
794 follow_exec_mode_same,
795 NULL,
796 };
797
798 static const char *follow_exec_mode_string = follow_exec_mode_same;
799 static void
800 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
801 struct cmd_list_element *c, const char *value)
802 {
803 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
804 }
805
806 /* EXECD_PATHNAME is assumed to be non-NULL. */
807
808 static void
809 follow_exec (ptid_t pid, char *execd_pathname)
810 {
811 struct thread_info *th = inferior_thread ();
812 struct inferior *inf = current_inferior ();
813
814 /* This is an exec event that we actually wish to pay attention to.
815 Refresh our symbol table to the newly exec'd program, remove any
816 momentary bp's, etc.
817
818 If there are breakpoints, they aren't really inserted now,
819 since the exec() transformed our inferior into a fresh set
820 of instructions.
821
822 We want to preserve symbolic breakpoints on the list, since
823 we have hopes that they can be reset after the new a.out's
824 symbol table is read.
825
826 However, any "raw" breakpoints must be removed from the list
827 (e.g., the solib bp's), since their address is probably invalid
828 now.
829
830 And, we DON'T want to call delete_breakpoints() here, since
831 that may write the bp's "shadow contents" (the instruction
832 value that was overwritten witha TRAP instruction). Since
833 we now have a new a.out, those shadow contents aren't valid. */
834
835 mark_breakpoints_out ();
836
837 update_breakpoints_after_exec ();
838
839 /* If there was one, it's gone now. We cannot truly step-to-next
840 statement through an exec(). */
841 th->control.step_resume_breakpoint = NULL;
842 th->control.exception_resume_breakpoint = NULL;
843 th->control.step_range_start = 0;
844 th->control.step_range_end = 0;
845
846 /* The target reports the exec event to the main thread, even if
847 some other thread does the exec, and even if the main thread was
848 already stopped --- if debugging in non-stop mode, it's possible
849 the user had the main thread held stopped in the previous image
850 --- release it now. This is the same behavior as step-over-exec
851 with scheduler-locking on in all-stop mode. */
852 th->stop_requested = 0;
853
854 /* What is this a.out's name? */
855 printf_unfiltered (_("%s is executing new program: %s\n"),
856 target_pid_to_str (inferior_ptid),
857 execd_pathname);
858
859 /* We've followed the inferior through an exec. Therefore, the
860 inferior has essentially been killed & reborn. */
861
862 gdb_flush (gdb_stdout);
863
864 breakpoint_init_inferior (inf_execd);
865
866 if (gdb_sysroot && *gdb_sysroot)
867 {
868 char *name = alloca (strlen (gdb_sysroot)
869 + strlen (execd_pathname)
870 + 1);
871
872 strcpy (name, gdb_sysroot);
873 strcat (name, execd_pathname);
874 execd_pathname = name;
875 }
876
877 /* Reset the shared library package. This ensures that we get a
878 shlib event when the child reaches "_start", at which point the
879 dld will have had a chance to initialize the child. */
880 /* Also, loading a symbol file below may trigger symbol lookups, and
881 we don't want those to be satisfied by the libraries of the
882 previous incarnation of this process. */
883 no_shared_libraries (NULL, 0);
884
885 if (follow_exec_mode_string == follow_exec_mode_new)
886 {
887 struct program_space *pspace;
888
889 /* The user wants to keep the old inferior and program spaces
890 around. Create a new fresh one, and switch to it. */
891
892 inf = add_inferior (current_inferior ()->pid);
893 pspace = add_program_space (maybe_new_address_space ());
894 inf->pspace = pspace;
895 inf->aspace = pspace->aspace;
896
897 exit_inferior_num_silent (current_inferior ()->num);
898
899 set_current_inferior (inf);
900 set_current_program_space (pspace);
901 }
902 else
903 {
904 /* The old description may no longer be fit for the new image.
905 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
906 old description; we'll read a new one below. No need to do
907 this on "follow-exec-mode new", as the old inferior stays
908 around (its description is later cleared/refetched on
909 restart). */
910 target_clear_description ();
911 }
912
913 gdb_assert (current_program_space == inf->pspace);
914
915 /* That a.out is now the one to use. */
916 exec_file_attach (execd_pathname, 0);
917
918 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
919 (Position Independent Executable) main symbol file will get applied by
920 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
921 the breakpoints with the zero displacement. */
922
923 symbol_file_add (execd_pathname,
924 (inf->symfile_flags
925 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
926 NULL, 0);
927
928 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
929 set_initial_language ();
930
931 /* If the target can specify a description, read it. Must do this
932 after flipping to the new executable (because the target supplied
933 description must be compatible with the executable's
934 architecture, and the old executable may e.g., be 32-bit, while
935 the new one 64-bit), and before anything involving memory or
936 registers. */
937 target_find_description ();
938
939 solib_create_inferior_hook (0);
940
941 jit_inferior_created_hook ();
942
943 breakpoint_re_set ();
944
945 /* Reinsert all breakpoints. (Those which were symbolic have
946 been reset to the proper address in the new a.out, thanks
947 to symbol_file_command...). */
948 insert_breakpoints ();
949
950 /* The next resume of this inferior should bring it to the shlib
951 startup breakpoints. (If the user had also set bp's on
952 "main" from the old (parent) process, then they'll auto-
953 matically get reset there in the new process.). */
954 }
955
956 /* Non-zero if we just simulating a single-step. This is needed
957 because we cannot remove the breakpoints in the inferior process
958 until after the `wait' in `wait_for_inferior'. */
959 static int singlestep_breakpoints_inserted_p = 0;
960
961 /* The thread we inserted single-step breakpoints for. */
962 static ptid_t singlestep_ptid;
963
964 /* PC when we started this single-step. */
965 static CORE_ADDR singlestep_pc;
966
967 /* Info about an instruction that is being stepped over. Invalid if
968 ASPACE is NULL. */
969
970 struct step_over_info
971 {
972 /* The instruction's address space. */
973 struct address_space *aspace;
974
975 /* The instruction's address. */
976 CORE_ADDR address;
977 };
978
979 /* The step-over info of the location that is being stepped over.
980
981 Note that with async/breakpoint always-inserted mode, a user might
982 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
983 being stepped over. As setting a new breakpoint inserts all
984 breakpoints, we need to make sure the breakpoint being stepped over
985 isn't inserted then. We do that by only clearing the step-over
986 info when the step-over is actually finished (or aborted).
987
988 Presently GDB can only step over one breakpoint at any given time.
989 Given threads that can't run code in the same address space as the
990 breakpoint's can't really miss the breakpoint, GDB could be taught
991 to step-over at most one breakpoint per address space (so this info
992 could move to the address space object if/when GDB is extended).
993 The set of breakpoints being stepped over will normally be much
994 smaller than the set of all breakpoints, so a flag in the
995 breakpoint location structure would be wasteful. A separate list
996 also saves complexity and run-time, as otherwise we'd have to go
997 through all breakpoint locations clearing their flag whenever we
998 start a new sequence. Similar considerations weigh against storing
999 this info in the thread object. Plus, not all step overs actually
1000 have breakpoint locations -- e.g., stepping past a single-step
1001 breakpoint, or stepping to complete a non-continuable
1002 watchpoint. */
1003 static struct step_over_info step_over_info;
1004
1005 /* Record the address of the breakpoint/instruction we're currently
1006 stepping over. */
1007
1008 static void
1009 set_step_over_info (struct address_space *aspace, CORE_ADDR address)
1010 {
1011 step_over_info.aspace = aspace;
1012 step_over_info.address = address;
1013 }
1014
1015 /* Called when we're not longer stepping over a breakpoint / an
1016 instruction, so all breakpoints are free to be (re)inserted. */
1017
1018 static void
1019 clear_step_over_info (void)
1020 {
1021 step_over_info.aspace = NULL;
1022 step_over_info.address = 0;
1023 }
1024
1025 /* See inferior.h. */
1026
1027 int
1028 stepping_past_instruction_at (struct address_space *aspace,
1029 CORE_ADDR address)
1030 {
1031 return (step_over_info.aspace != NULL
1032 && breakpoint_address_match (aspace, address,
1033 step_over_info.aspace,
1034 step_over_info.address));
1035 }
1036
1037 \f
1038 /* Displaced stepping. */
1039
1040 /* In non-stop debugging mode, we must take special care to manage
1041 breakpoints properly; in particular, the traditional strategy for
1042 stepping a thread past a breakpoint it has hit is unsuitable.
1043 'Displaced stepping' is a tactic for stepping one thread past a
1044 breakpoint it has hit while ensuring that other threads running
1045 concurrently will hit the breakpoint as they should.
1046
1047 The traditional way to step a thread T off a breakpoint in a
1048 multi-threaded program in all-stop mode is as follows:
1049
1050 a0) Initially, all threads are stopped, and breakpoints are not
1051 inserted.
1052 a1) We single-step T, leaving breakpoints uninserted.
1053 a2) We insert breakpoints, and resume all threads.
1054
1055 In non-stop debugging, however, this strategy is unsuitable: we
1056 don't want to have to stop all threads in the system in order to
1057 continue or step T past a breakpoint. Instead, we use displaced
1058 stepping:
1059
1060 n0) Initially, T is stopped, other threads are running, and
1061 breakpoints are inserted.
1062 n1) We copy the instruction "under" the breakpoint to a separate
1063 location, outside the main code stream, making any adjustments
1064 to the instruction, register, and memory state as directed by
1065 T's architecture.
1066 n2) We single-step T over the instruction at its new location.
1067 n3) We adjust the resulting register and memory state as directed
1068 by T's architecture. This includes resetting T's PC to point
1069 back into the main instruction stream.
1070 n4) We resume T.
1071
1072 This approach depends on the following gdbarch methods:
1073
1074 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1075 indicate where to copy the instruction, and how much space must
1076 be reserved there. We use these in step n1.
1077
1078 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1079 address, and makes any necessary adjustments to the instruction,
1080 register contents, and memory. We use this in step n1.
1081
1082 - gdbarch_displaced_step_fixup adjusts registers and memory after
1083 we have successfuly single-stepped the instruction, to yield the
1084 same effect the instruction would have had if we had executed it
1085 at its original address. We use this in step n3.
1086
1087 - gdbarch_displaced_step_free_closure provides cleanup.
1088
1089 The gdbarch_displaced_step_copy_insn and
1090 gdbarch_displaced_step_fixup functions must be written so that
1091 copying an instruction with gdbarch_displaced_step_copy_insn,
1092 single-stepping across the copied instruction, and then applying
1093 gdbarch_displaced_insn_fixup should have the same effects on the
1094 thread's memory and registers as stepping the instruction in place
1095 would have. Exactly which responsibilities fall to the copy and
1096 which fall to the fixup is up to the author of those functions.
1097
1098 See the comments in gdbarch.sh for details.
1099
1100 Note that displaced stepping and software single-step cannot
1101 currently be used in combination, although with some care I think
1102 they could be made to. Software single-step works by placing
1103 breakpoints on all possible subsequent instructions; if the
1104 displaced instruction is a PC-relative jump, those breakpoints
1105 could fall in very strange places --- on pages that aren't
1106 executable, or at addresses that are not proper instruction
1107 boundaries. (We do generally let other threads run while we wait
1108 to hit the software single-step breakpoint, and they might
1109 encounter such a corrupted instruction.) One way to work around
1110 this would be to have gdbarch_displaced_step_copy_insn fully
1111 simulate the effect of PC-relative instructions (and return NULL)
1112 on architectures that use software single-stepping.
1113
1114 In non-stop mode, we can have independent and simultaneous step
1115 requests, so more than one thread may need to simultaneously step
1116 over a breakpoint. The current implementation assumes there is
1117 only one scratch space per process. In this case, we have to
1118 serialize access to the scratch space. If thread A wants to step
1119 over a breakpoint, but we are currently waiting for some other
1120 thread to complete a displaced step, we leave thread A stopped and
1121 place it in the displaced_step_request_queue. Whenever a displaced
1122 step finishes, we pick the next thread in the queue and start a new
1123 displaced step operation on it. See displaced_step_prepare and
1124 displaced_step_fixup for details. */
1125
1126 struct displaced_step_request
1127 {
1128 ptid_t ptid;
1129 struct displaced_step_request *next;
1130 };
1131
1132 /* Per-inferior displaced stepping state. */
1133 struct displaced_step_inferior_state
1134 {
1135 /* Pointer to next in linked list. */
1136 struct displaced_step_inferior_state *next;
1137
1138 /* The process this displaced step state refers to. */
1139 int pid;
1140
1141 /* A queue of pending displaced stepping requests. One entry per
1142 thread that needs to do a displaced step. */
1143 struct displaced_step_request *step_request_queue;
1144
1145 /* If this is not null_ptid, this is the thread carrying out a
1146 displaced single-step in process PID. This thread's state will
1147 require fixing up once it has completed its step. */
1148 ptid_t step_ptid;
1149
1150 /* The architecture the thread had when we stepped it. */
1151 struct gdbarch *step_gdbarch;
1152
1153 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1154 for post-step cleanup. */
1155 struct displaced_step_closure *step_closure;
1156
1157 /* The address of the original instruction, and the copy we
1158 made. */
1159 CORE_ADDR step_original, step_copy;
1160
1161 /* Saved contents of copy area. */
1162 gdb_byte *step_saved_copy;
1163 };
1164
1165 /* The list of states of processes involved in displaced stepping
1166 presently. */
1167 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1168
1169 /* Get the displaced stepping state of process PID. */
1170
1171 static struct displaced_step_inferior_state *
1172 get_displaced_stepping_state (int pid)
1173 {
1174 struct displaced_step_inferior_state *state;
1175
1176 for (state = displaced_step_inferior_states;
1177 state != NULL;
1178 state = state->next)
1179 if (state->pid == pid)
1180 return state;
1181
1182 return NULL;
1183 }
1184
1185 /* Add a new displaced stepping state for process PID to the displaced
1186 stepping state list, or return a pointer to an already existing
1187 entry, if it already exists. Never returns NULL. */
1188
1189 static struct displaced_step_inferior_state *
1190 add_displaced_stepping_state (int pid)
1191 {
1192 struct displaced_step_inferior_state *state;
1193
1194 for (state = displaced_step_inferior_states;
1195 state != NULL;
1196 state = state->next)
1197 if (state->pid == pid)
1198 return state;
1199
1200 state = xcalloc (1, sizeof (*state));
1201 state->pid = pid;
1202 state->next = displaced_step_inferior_states;
1203 displaced_step_inferior_states = state;
1204
1205 return state;
1206 }
1207
1208 /* If inferior is in displaced stepping, and ADDR equals to starting address
1209 of copy area, return corresponding displaced_step_closure. Otherwise,
1210 return NULL. */
1211
1212 struct displaced_step_closure*
1213 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1214 {
1215 struct displaced_step_inferior_state *displaced
1216 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1217
1218 /* If checking the mode of displaced instruction in copy area. */
1219 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1220 && (displaced->step_copy == addr))
1221 return displaced->step_closure;
1222
1223 return NULL;
1224 }
1225
1226 /* Remove the displaced stepping state of process PID. */
1227
1228 static void
1229 remove_displaced_stepping_state (int pid)
1230 {
1231 struct displaced_step_inferior_state *it, **prev_next_p;
1232
1233 gdb_assert (pid != 0);
1234
1235 it = displaced_step_inferior_states;
1236 prev_next_p = &displaced_step_inferior_states;
1237 while (it)
1238 {
1239 if (it->pid == pid)
1240 {
1241 *prev_next_p = it->next;
1242 xfree (it);
1243 return;
1244 }
1245
1246 prev_next_p = &it->next;
1247 it = *prev_next_p;
1248 }
1249 }
1250
1251 static void
1252 infrun_inferior_exit (struct inferior *inf)
1253 {
1254 remove_displaced_stepping_state (inf->pid);
1255 }
1256
1257 /* If ON, and the architecture supports it, GDB will use displaced
1258 stepping to step over breakpoints. If OFF, or if the architecture
1259 doesn't support it, GDB will instead use the traditional
1260 hold-and-step approach. If AUTO (which is the default), GDB will
1261 decide which technique to use to step over breakpoints depending on
1262 which of all-stop or non-stop mode is active --- displaced stepping
1263 in non-stop mode; hold-and-step in all-stop mode. */
1264
1265 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1266
1267 static void
1268 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1269 struct cmd_list_element *c,
1270 const char *value)
1271 {
1272 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1273 fprintf_filtered (file,
1274 _("Debugger's willingness to use displaced stepping "
1275 "to step over breakpoints is %s (currently %s).\n"),
1276 value, non_stop ? "on" : "off");
1277 else
1278 fprintf_filtered (file,
1279 _("Debugger's willingness to use displaced stepping "
1280 "to step over breakpoints is %s.\n"), value);
1281 }
1282
1283 /* Return non-zero if displaced stepping can/should be used to step
1284 over breakpoints. */
1285
1286 static int
1287 use_displaced_stepping (struct gdbarch *gdbarch)
1288 {
1289 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1290 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1291 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1292 && find_record_target () == NULL);
1293 }
1294
1295 /* Clean out any stray displaced stepping state. */
1296 static void
1297 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1298 {
1299 /* Indicate that there is no cleanup pending. */
1300 displaced->step_ptid = null_ptid;
1301
1302 if (displaced->step_closure)
1303 {
1304 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1305 displaced->step_closure);
1306 displaced->step_closure = NULL;
1307 }
1308 }
1309
1310 static void
1311 displaced_step_clear_cleanup (void *arg)
1312 {
1313 struct displaced_step_inferior_state *state = arg;
1314
1315 displaced_step_clear (state);
1316 }
1317
1318 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1319 void
1320 displaced_step_dump_bytes (struct ui_file *file,
1321 const gdb_byte *buf,
1322 size_t len)
1323 {
1324 int i;
1325
1326 for (i = 0; i < len; i++)
1327 fprintf_unfiltered (file, "%02x ", buf[i]);
1328 fputs_unfiltered ("\n", file);
1329 }
1330
1331 /* Prepare to single-step, using displaced stepping.
1332
1333 Note that we cannot use displaced stepping when we have a signal to
1334 deliver. If we have a signal to deliver and an instruction to step
1335 over, then after the step, there will be no indication from the
1336 target whether the thread entered a signal handler or ignored the
1337 signal and stepped over the instruction successfully --- both cases
1338 result in a simple SIGTRAP. In the first case we mustn't do a
1339 fixup, and in the second case we must --- but we can't tell which.
1340 Comments in the code for 'random signals' in handle_inferior_event
1341 explain how we handle this case instead.
1342
1343 Returns 1 if preparing was successful -- this thread is going to be
1344 stepped now; or 0 if displaced stepping this thread got queued. */
1345 static int
1346 displaced_step_prepare (ptid_t ptid)
1347 {
1348 struct cleanup *old_cleanups, *ignore_cleanups;
1349 struct thread_info *tp = find_thread_ptid (ptid);
1350 struct regcache *regcache = get_thread_regcache (ptid);
1351 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1352 CORE_ADDR original, copy;
1353 ULONGEST len;
1354 struct displaced_step_closure *closure;
1355 struct displaced_step_inferior_state *displaced;
1356 int status;
1357
1358 /* We should never reach this function if the architecture does not
1359 support displaced stepping. */
1360 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1361
1362 /* Disable range stepping while executing in the scratch pad. We
1363 want a single-step even if executing the displaced instruction in
1364 the scratch buffer lands within the stepping range (e.g., a
1365 jump/branch). */
1366 tp->control.may_range_step = 0;
1367
1368 /* We have to displaced step one thread at a time, as we only have
1369 access to a single scratch space per inferior. */
1370
1371 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1372
1373 if (!ptid_equal (displaced->step_ptid, null_ptid))
1374 {
1375 /* Already waiting for a displaced step to finish. Defer this
1376 request and place in queue. */
1377 struct displaced_step_request *req, *new_req;
1378
1379 if (debug_displaced)
1380 fprintf_unfiltered (gdb_stdlog,
1381 "displaced: defering step of %s\n",
1382 target_pid_to_str (ptid));
1383
1384 new_req = xmalloc (sizeof (*new_req));
1385 new_req->ptid = ptid;
1386 new_req->next = NULL;
1387
1388 if (displaced->step_request_queue)
1389 {
1390 for (req = displaced->step_request_queue;
1391 req && req->next;
1392 req = req->next)
1393 ;
1394 req->next = new_req;
1395 }
1396 else
1397 displaced->step_request_queue = new_req;
1398
1399 return 0;
1400 }
1401 else
1402 {
1403 if (debug_displaced)
1404 fprintf_unfiltered (gdb_stdlog,
1405 "displaced: stepping %s now\n",
1406 target_pid_to_str (ptid));
1407 }
1408
1409 displaced_step_clear (displaced);
1410
1411 old_cleanups = save_inferior_ptid ();
1412 inferior_ptid = ptid;
1413
1414 original = regcache_read_pc (regcache);
1415
1416 copy = gdbarch_displaced_step_location (gdbarch);
1417 len = gdbarch_max_insn_length (gdbarch);
1418
1419 /* Save the original contents of the copy area. */
1420 displaced->step_saved_copy = xmalloc (len);
1421 ignore_cleanups = make_cleanup (free_current_contents,
1422 &displaced->step_saved_copy);
1423 status = target_read_memory (copy, displaced->step_saved_copy, len);
1424 if (status != 0)
1425 throw_error (MEMORY_ERROR,
1426 _("Error accessing memory address %s (%s) for "
1427 "displaced-stepping scratch space."),
1428 paddress (gdbarch, copy), safe_strerror (status));
1429 if (debug_displaced)
1430 {
1431 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1432 paddress (gdbarch, copy));
1433 displaced_step_dump_bytes (gdb_stdlog,
1434 displaced->step_saved_copy,
1435 len);
1436 };
1437
1438 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1439 original, copy, regcache);
1440
1441 /* We don't support the fully-simulated case at present. */
1442 gdb_assert (closure);
1443
1444 /* Save the information we need to fix things up if the step
1445 succeeds. */
1446 displaced->step_ptid = ptid;
1447 displaced->step_gdbarch = gdbarch;
1448 displaced->step_closure = closure;
1449 displaced->step_original = original;
1450 displaced->step_copy = copy;
1451
1452 make_cleanup (displaced_step_clear_cleanup, displaced);
1453
1454 /* Resume execution at the copy. */
1455 regcache_write_pc (regcache, copy);
1456
1457 discard_cleanups (ignore_cleanups);
1458
1459 do_cleanups (old_cleanups);
1460
1461 if (debug_displaced)
1462 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1463 paddress (gdbarch, copy));
1464
1465 return 1;
1466 }
1467
1468 static void
1469 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1470 const gdb_byte *myaddr, int len)
1471 {
1472 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1473
1474 inferior_ptid = ptid;
1475 write_memory (memaddr, myaddr, len);
1476 do_cleanups (ptid_cleanup);
1477 }
1478
1479 /* Restore the contents of the copy area for thread PTID. */
1480
1481 static void
1482 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1483 ptid_t ptid)
1484 {
1485 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1486
1487 write_memory_ptid (ptid, displaced->step_copy,
1488 displaced->step_saved_copy, len);
1489 if (debug_displaced)
1490 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1491 target_pid_to_str (ptid),
1492 paddress (displaced->step_gdbarch,
1493 displaced->step_copy));
1494 }
1495
1496 static void
1497 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1498 {
1499 struct cleanup *old_cleanups;
1500 struct displaced_step_inferior_state *displaced
1501 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1502
1503 /* Was any thread of this process doing a displaced step? */
1504 if (displaced == NULL)
1505 return;
1506
1507 /* Was this event for the pid we displaced? */
1508 if (ptid_equal (displaced->step_ptid, null_ptid)
1509 || ! ptid_equal (displaced->step_ptid, event_ptid))
1510 return;
1511
1512 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1513
1514 displaced_step_restore (displaced, displaced->step_ptid);
1515
1516 /* Did the instruction complete successfully? */
1517 if (signal == GDB_SIGNAL_TRAP)
1518 {
1519 /* Fix up the resulting state. */
1520 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1521 displaced->step_closure,
1522 displaced->step_original,
1523 displaced->step_copy,
1524 get_thread_regcache (displaced->step_ptid));
1525 }
1526 else
1527 {
1528 /* Since the instruction didn't complete, all we can do is
1529 relocate the PC. */
1530 struct regcache *regcache = get_thread_regcache (event_ptid);
1531 CORE_ADDR pc = regcache_read_pc (regcache);
1532
1533 pc = displaced->step_original + (pc - displaced->step_copy);
1534 regcache_write_pc (regcache, pc);
1535 }
1536
1537 do_cleanups (old_cleanups);
1538
1539 displaced->step_ptid = null_ptid;
1540
1541 /* Are there any pending displaced stepping requests? If so, run
1542 one now. Leave the state object around, since we're likely to
1543 need it again soon. */
1544 while (displaced->step_request_queue)
1545 {
1546 struct displaced_step_request *head;
1547 ptid_t ptid;
1548 struct regcache *regcache;
1549 struct gdbarch *gdbarch;
1550 CORE_ADDR actual_pc;
1551 struct address_space *aspace;
1552
1553 head = displaced->step_request_queue;
1554 ptid = head->ptid;
1555 displaced->step_request_queue = head->next;
1556 xfree (head);
1557
1558 context_switch (ptid);
1559
1560 regcache = get_thread_regcache (ptid);
1561 actual_pc = regcache_read_pc (regcache);
1562 aspace = get_regcache_aspace (regcache);
1563
1564 if (breakpoint_here_p (aspace, actual_pc))
1565 {
1566 if (debug_displaced)
1567 fprintf_unfiltered (gdb_stdlog,
1568 "displaced: stepping queued %s now\n",
1569 target_pid_to_str (ptid));
1570
1571 displaced_step_prepare (ptid);
1572
1573 gdbarch = get_regcache_arch (regcache);
1574
1575 if (debug_displaced)
1576 {
1577 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1578 gdb_byte buf[4];
1579
1580 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1581 paddress (gdbarch, actual_pc));
1582 read_memory (actual_pc, buf, sizeof (buf));
1583 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1584 }
1585
1586 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1587 displaced->step_closure))
1588 target_resume (ptid, 1, GDB_SIGNAL_0);
1589 else
1590 target_resume (ptid, 0, GDB_SIGNAL_0);
1591
1592 /* Done, we're stepping a thread. */
1593 break;
1594 }
1595 else
1596 {
1597 int step;
1598 struct thread_info *tp = inferior_thread ();
1599
1600 /* The breakpoint we were sitting under has since been
1601 removed. */
1602 tp->control.trap_expected = 0;
1603
1604 /* Go back to what we were trying to do. */
1605 step = currently_stepping (tp);
1606
1607 if (debug_displaced)
1608 fprintf_unfiltered (gdb_stdlog,
1609 "displaced: breakpoint is gone: %s, step(%d)\n",
1610 target_pid_to_str (tp->ptid), step);
1611
1612 target_resume (ptid, step, GDB_SIGNAL_0);
1613 tp->suspend.stop_signal = GDB_SIGNAL_0;
1614
1615 /* This request was discarded. See if there's any other
1616 thread waiting for its turn. */
1617 }
1618 }
1619 }
1620
1621 /* Update global variables holding ptids to hold NEW_PTID if they were
1622 holding OLD_PTID. */
1623 static void
1624 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1625 {
1626 struct displaced_step_request *it;
1627 struct displaced_step_inferior_state *displaced;
1628
1629 if (ptid_equal (inferior_ptid, old_ptid))
1630 inferior_ptid = new_ptid;
1631
1632 if (ptid_equal (singlestep_ptid, old_ptid))
1633 singlestep_ptid = new_ptid;
1634
1635 for (displaced = displaced_step_inferior_states;
1636 displaced;
1637 displaced = displaced->next)
1638 {
1639 if (ptid_equal (displaced->step_ptid, old_ptid))
1640 displaced->step_ptid = new_ptid;
1641
1642 for (it = displaced->step_request_queue; it; it = it->next)
1643 if (ptid_equal (it->ptid, old_ptid))
1644 it->ptid = new_ptid;
1645 }
1646 }
1647
1648 \f
1649 /* Resuming. */
1650
1651 /* Things to clean up if we QUIT out of resume (). */
1652 static void
1653 resume_cleanups (void *ignore)
1654 {
1655 normal_stop ();
1656 }
1657
1658 static const char schedlock_off[] = "off";
1659 static const char schedlock_on[] = "on";
1660 static const char schedlock_step[] = "step";
1661 static const char *const scheduler_enums[] = {
1662 schedlock_off,
1663 schedlock_on,
1664 schedlock_step,
1665 NULL
1666 };
1667 static const char *scheduler_mode = schedlock_off;
1668 static void
1669 show_scheduler_mode (struct ui_file *file, int from_tty,
1670 struct cmd_list_element *c, const char *value)
1671 {
1672 fprintf_filtered (file,
1673 _("Mode for locking scheduler "
1674 "during execution is \"%s\".\n"),
1675 value);
1676 }
1677
1678 static void
1679 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1680 {
1681 if (!target_can_lock_scheduler)
1682 {
1683 scheduler_mode = schedlock_off;
1684 error (_("Target '%s' cannot support this command."), target_shortname);
1685 }
1686 }
1687
1688 /* True if execution commands resume all threads of all processes by
1689 default; otherwise, resume only threads of the current inferior
1690 process. */
1691 int sched_multi = 0;
1692
1693 /* Try to setup for software single stepping over the specified location.
1694 Return 1 if target_resume() should use hardware single step.
1695
1696 GDBARCH the current gdbarch.
1697 PC the location to step over. */
1698
1699 static int
1700 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1701 {
1702 int hw_step = 1;
1703
1704 if (execution_direction == EXEC_FORWARD
1705 && gdbarch_software_single_step_p (gdbarch)
1706 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1707 {
1708 hw_step = 0;
1709 /* Do not pull these breakpoints until after a `wait' in
1710 `wait_for_inferior'. */
1711 singlestep_breakpoints_inserted_p = 1;
1712 singlestep_ptid = inferior_ptid;
1713 singlestep_pc = pc;
1714 }
1715 return hw_step;
1716 }
1717
1718 /* Return a ptid representing the set of threads that we will proceed,
1719 in the perspective of the user/frontend. We may actually resume
1720 fewer threads at first, e.g., if a thread is stopped at a
1721 breakpoint that needs stepping-off, but that should not be visible
1722 to the user/frontend, and neither should the frontend/user be
1723 allowed to proceed any of the threads that happen to be stopped for
1724 internal run control handling, if a previous command wanted them
1725 resumed. */
1726
1727 ptid_t
1728 user_visible_resume_ptid (int step)
1729 {
1730 /* By default, resume all threads of all processes. */
1731 ptid_t resume_ptid = RESUME_ALL;
1732
1733 /* Maybe resume only all threads of the current process. */
1734 if (!sched_multi && target_supports_multi_process ())
1735 {
1736 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1737 }
1738
1739 /* Maybe resume a single thread after all. */
1740 if (non_stop)
1741 {
1742 /* With non-stop mode on, threads are always handled
1743 individually. */
1744 resume_ptid = inferior_ptid;
1745 }
1746 else if ((scheduler_mode == schedlock_on)
1747 || (scheduler_mode == schedlock_step
1748 && (step || singlestep_breakpoints_inserted_p)))
1749 {
1750 /* User-settable 'scheduler' mode requires solo thread resume. */
1751 resume_ptid = inferior_ptid;
1752 }
1753
1754 return resume_ptid;
1755 }
1756
1757 /* Resume the inferior, but allow a QUIT. This is useful if the user
1758 wants to interrupt some lengthy single-stepping operation
1759 (for child processes, the SIGINT goes to the inferior, and so
1760 we get a SIGINT random_signal, but for remote debugging and perhaps
1761 other targets, that's not true).
1762
1763 STEP nonzero if we should step (zero to continue instead).
1764 SIG is the signal to give the inferior (zero for none). */
1765 void
1766 resume (int step, enum gdb_signal sig)
1767 {
1768 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1769 struct regcache *regcache = get_current_regcache ();
1770 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1771 struct thread_info *tp = inferior_thread ();
1772 CORE_ADDR pc = regcache_read_pc (regcache);
1773 struct address_space *aspace = get_regcache_aspace (regcache);
1774 ptid_t resume_ptid;
1775 int hw_step = step;
1776
1777 QUIT;
1778
1779 if (current_inferior ()->waiting_for_vfork_done)
1780 {
1781 /* Don't try to single-step a vfork parent that is waiting for
1782 the child to get out of the shared memory region (by exec'ing
1783 or exiting). This is particularly important on software
1784 single-step archs, as the child process would trip on the
1785 software single step breakpoint inserted for the parent
1786 process. Since the parent will not actually execute any
1787 instruction until the child is out of the shared region (such
1788 are vfork's semantics), it is safe to simply continue it.
1789 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1790 the parent, and tell it to `keep_going', which automatically
1791 re-sets it stepping. */
1792 if (debug_infrun)
1793 fprintf_unfiltered (gdb_stdlog,
1794 "infrun: resume : clear step\n");
1795 hw_step = 0;
1796 }
1797
1798 if (debug_infrun)
1799 fprintf_unfiltered (gdb_stdlog,
1800 "infrun: resume (step=%d, signal=%s), "
1801 "trap_expected=%d, current thread [%s] at %s\n",
1802 step, gdb_signal_to_symbol_string (sig),
1803 tp->control.trap_expected,
1804 target_pid_to_str (inferior_ptid),
1805 paddress (gdbarch, pc));
1806
1807 /* Normally, by the time we reach `resume', the breakpoints are either
1808 removed or inserted, as appropriate. The exception is if we're sitting
1809 at a permanent breakpoint; we need to step over it, but permanent
1810 breakpoints can't be removed. So we have to test for it here. */
1811 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1812 {
1813 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1814 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1815 else
1816 error (_("\
1817 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1818 how to step past a permanent breakpoint on this architecture. Try using\n\
1819 a command like `return' or `jump' to continue execution."));
1820 }
1821
1822 /* If we have a breakpoint to step over, make sure to do a single
1823 step only. Same if we have software watchpoints. */
1824 if (tp->control.trap_expected || bpstat_should_step ())
1825 tp->control.may_range_step = 0;
1826
1827 /* If enabled, step over breakpoints by executing a copy of the
1828 instruction at a different address.
1829
1830 We can't use displaced stepping when we have a signal to deliver;
1831 the comments for displaced_step_prepare explain why. The
1832 comments in the handle_inferior event for dealing with 'random
1833 signals' explain what we do instead.
1834
1835 We can't use displaced stepping when we are waiting for vfork_done
1836 event, displaced stepping breaks the vfork child similarly as single
1837 step software breakpoint. */
1838 if (use_displaced_stepping (gdbarch)
1839 && (tp->control.trap_expected
1840 || (hw_step && gdbarch_software_single_step_p (gdbarch)))
1841 && sig == GDB_SIGNAL_0
1842 && !current_inferior ()->waiting_for_vfork_done)
1843 {
1844 struct displaced_step_inferior_state *displaced;
1845
1846 if (!displaced_step_prepare (inferior_ptid))
1847 {
1848 /* Got placed in displaced stepping queue. Will be resumed
1849 later when all the currently queued displaced stepping
1850 requests finish. The thread is not executing at this
1851 point, and the call to set_executing will be made later.
1852 But we need to call set_running here, since from the
1853 user/frontend's point of view, threads were set running.
1854 Unless we're calling an inferior function, as in that
1855 case we pretend the inferior doesn't run at all. */
1856 if (!tp->control.in_infcall)
1857 set_running (user_visible_resume_ptid (step), 1);
1858 discard_cleanups (old_cleanups);
1859 return;
1860 }
1861
1862 /* Update pc to reflect the new address from which we will execute
1863 instructions due to displaced stepping. */
1864 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
1865
1866 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1867 hw_step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1868 displaced->step_closure);
1869 }
1870
1871 /* Do we need to do it the hard way, w/temp breakpoints? */
1872 else if (step)
1873 step = maybe_software_singlestep (gdbarch, pc);
1874
1875 /* Currently, our software single-step implementation leads to different
1876 results than hardware single-stepping in one situation: when stepping
1877 into delivering a signal which has an associated signal handler,
1878 hardware single-step will stop at the first instruction of the handler,
1879 while software single-step will simply skip execution of the handler.
1880
1881 For now, this difference in behavior is accepted since there is no
1882 easy way to actually implement single-stepping into a signal handler
1883 without kernel support.
1884
1885 However, there is one scenario where this difference leads to follow-on
1886 problems: if we're stepping off a breakpoint by removing all breakpoints
1887 and then single-stepping. In this case, the software single-step
1888 behavior means that even if there is a *breakpoint* in the signal
1889 handler, GDB still would not stop.
1890
1891 Fortunately, we can at least fix this particular issue. We detect
1892 here the case where we are about to deliver a signal while software
1893 single-stepping with breakpoints removed. In this situation, we
1894 revert the decisions to remove all breakpoints and insert single-
1895 step breakpoints, and instead we install a step-resume breakpoint
1896 at the current address, deliver the signal without stepping, and
1897 once we arrive back at the step-resume breakpoint, actually step
1898 over the breakpoint we originally wanted to step over. */
1899 if (singlestep_breakpoints_inserted_p
1900 && tp->control.trap_expected && sig != GDB_SIGNAL_0)
1901 {
1902 /* If we have nested signals or a pending signal is delivered
1903 immediately after a handler returns, might might already have
1904 a step-resume breakpoint set on the earlier handler. We cannot
1905 set another step-resume breakpoint; just continue on until the
1906 original breakpoint is hit. */
1907 if (tp->control.step_resume_breakpoint == NULL)
1908 {
1909 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1910 tp->step_after_step_resume_breakpoint = 1;
1911 }
1912
1913 remove_single_step_breakpoints ();
1914 singlestep_breakpoints_inserted_p = 0;
1915
1916 clear_step_over_info ();
1917 tp->control.trap_expected = 0;
1918
1919 insert_breakpoints ();
1920 }
1921
1922 /* If STEP is set, it's a request to use hardware stepping
1923 facilities. But in that case, we should never
1924 use singlestep breakpoint. */
1925 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1926
1927 /* Decide the set of threads to ask the target to resume. Start
1928 by assuming everything will be resumed, than narrow the set
1929 by applying increasingly restricting conditions. */
1930 resume_ptid = user_visible_resume_ptid (step);
1931
1932 /* Even if RESUME_PTID is a wildcard, and we end up resuming less
1933 (e.g., we might need to step over a breakpoint), from the
1934 user/frontend's point of view, all threads in RESUME_PTID are now
1935 running. Unless we're calling an inferior function, as in that
1936 case pretend we inferior doesn't run at all. */
1937 if (!tp->control.in_infcall)
1938 set_running (resume_ptid, 1);
1939
1940 /* Maybe resume a single thread after all. */
1941 if ((step || singlestep_breakpoints_inserted_p)
1942 && tp->control.trap_expected)
1943 {
1944 /* We're allowing a thread to run past a breakpoint it has
1945 hit, by single-stepping the thread with the breakpoint
1946 removed. In which case, we need to single-step only this
1947 thread, and keep others stopped, as they can miss this
1948 breakpoint if allowed to run. */
1949 resume_ptid = inferior_ptid;
1950 }
1951
1952 if (gdbarch_cannot_step_breakpoint (gdbarch))
1953 {
1954 /* Most targets can step a breakpoint instruction, thus
1955 executing it normally. But if this one cannot, just
1956 continue and we will hit it anyway. */
1957 if (step && breakpoint_inserted_here_p (aspace, pc))
1958 step = 0;
1959 }
1960
1961 if (debug_displaced
1962 && use_displaced_stepping (gdbarch)
1963 && tp->control.trap_expected)
1964 {
1965 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1966 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1967 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1968 gdb_byte buf[4];
1969
1970 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1971 paddress (resume_gdbarch, actual_pc));
1972 read_memory (actual_pc, buf, sizeof (buf));
1973 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1974 }
1975
1976 if (tp->control.may_range_step)
1977 {
1978 /* If we're resuming a thread with the PC out of the step
1979 range, then we're doing some nested/finer run control
1980 operation, like stepping the thread out of the dynamic
1981 linker or the displaced stepping scratch pad. We
1982 shouldn't have allowed a range step then. */
1983 gdb_assert (pc_in_thread_step_range (pc, tp));
1984 }
1985
1986 /* Install inferior's terminal modes. */
1987 target_terminal_inferior ();
1988
1989 /* Avoid confusing the next resume, if the next stop/resume
1990 happens to apply to another thread. */
1991 tp->suspend.stop_signal = GDB_SIGNAL_0;
1992
1993 /* Advise target which signals may be handled silently. If we have
1994 removed breakpoints because we are stepping over one (which can
1995 happen only if we are not using displaced stepping), we need to
1996 receive all signals to avoid accidentally skipping a breakpoint
1997 during execution of a signal handler. */
1998 if ((step || singlestep_breakpoints_inserted_p)
1999 && tp->control.trap_expected
2000 && !use_displaced_stepping (gdbarch))
2001 target_pass_signals (0, NULL);
2002 else
2003 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2004
2005 target_resume (resume_ptid, step, sig);
2006
2007 discard_cleanups (old_cleanups);
2008 }
2009 \f
2010 /* Proceeding. */
2011
2012 /* Clear out all variables saying what to do when inferior is continued.
2013 First do this, then set the ones you want, then call `proceed'. */
2014
2015 static void
2016 clear_proceed_status_thread (struct thread_info *tp)
2017 {
2018 if (debug_infrun)
2019 fprintf_unfiltered (gdb_stdlog,
2020 "infrun: clear_proceed_status_thread (%s)\n",
2021 target_pid_to_str (tp->ptid));
2022
2023 tp->control.trap_expected = 0;
2024 tp->control.step_range_start = 0;
2025 tp->control.step_range_end = 0;
2026 tp->control.may_range_step = 0;
2027 tp->control.step_frame_id = null_frame_id;
2028 tp->control.step_stack_frame_id = null_frame_id;
2029 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2030 tp->stop_requested = 0;
2031
2032 tp->control.stop_step = 0;
2033
2034 tp->control.proceed_to_finish = 0;
2035
2036 tp->control.command_interp = NULL;
2037
2038 /* Discard any remaining commands or status from previous stop. */
2039 bpstat_clear (&tp->control.stop_bpstat);
2040 }
2041
2042 static int
2043 clear_proceed_status_callback (struct thread_info *tp, void *data)
2044 {
2045 if (is_exited (tp->ptid))
2046 return 0;
2047
2048 clear_proceed_status_thread (tp);
2049 return 0;
2050 }
2051
2052 void
2053 clear_proceed_status (void)
2054 {
2055 if (!non_stop)
2056 {
2057 /* In all-stop mode, delete the per-thread status of all
2058 threads, even if inferior_ptid is null_ptid, there may be
2059 threads on the list. E.g., we may be launching a new
2060 process, while selecting the executable. */
2061 iterate_over_threads (clear_proceed_status_callback, NULL);
2062 }
2063
2064 if (!ptid_equal (inferior_ptid, null_ptid))
2065 {
2066 struct inferior *inferior;
2067
2068 if (non_stop)
2069 {
2070 /* If in non-stop mode, only delete the per-thread status of
2071 the current thread. */
2072 clear_proceed_status_thread (inferior_thread ());
2073 }
2074
2075 inferior = current_inferior ();
2076 inferior->control.stop_soon = NO_STOP_QUIETLY;
2077 }
2078
2079 stop_after_trap = 0;
2080
2081 clear_step_over_info ();
2082
2083 observer_notify_about_to_proceed ();
2084
2085 if (stop_registers)
2086 {
2087 regcache_xfree (stop_registers);
2088 stop_registers = NULL;
2089 }
2090 }
2091
2092 /* Returns true if TP is still stopped at a breakpoint that needs
2093 stepping-over in order to make progress. If the breakpoint is gone
2094 meanwhile, we can skip the whole step-over dance. */
2095
2096 static int
2097 thread_still_needs_step_over (struct thread_info *tp)
2098 {
2099 if (tp->stepping_over_breakpoint)
2100 {
2101 struct regcache *regcache = get_thread_regcache (tp->ptid);
2102
2103 if (breakpoint_here_p (get_regcache_aspace (regcache),
2104 regcache_read_pc (regcache)))
2105 return 1;
2106
2107 tp->stepping_over_breakpoint = 0;
2108 }
2109
2110 return 0;
2111 }
2112
2113 /* Returns true if scheduler locking applies. STEP indicates whether
2114 we're about to do a step/next-like command to a thread. */
2115
2116 static int
2117 schedlock_applies (int step)
2118 {
2119 return (scheduler_mode == schedlock_on
2120 || (scheduler_mode == schedlock_step
2121 && step));
2122 }
2123
2124 /* Look a thread other than EXCEPT that has previously reported a
2125 breakpoint event, and thus needs a step-over in order to make
2126 progress. Returns NULL is none is found. STEP indicates whether
2127 we're about to step the current thread, in order to decide whether
2128 "set scheduler-locking step" applies. */
2129
2130 static struct thread_info *
2131 find_thread_needs_step_over (int step, struct thread_info *except)
2132 {
2133 struct thread_info *tp, *current;
2134
2135 /* With non-stop mode on, threads are always handled individually. */
2136 gdb_assert (! non_stop);
2137
2138 current = inferior_thread ();
2139
2140 /* If scheduler locking applies, we can avoid iterating over all
2141 threads. */
2142 if (schedlock_applies (step))
2143 {
2144 if (except != current
2145 && thread_still_needs_step_over (current))
2146 return current;
2147
2148 return NULL;
2149 }
2150
2151 ALL_THREADS (tp)
2152 {
2153 /* Ignore the EXCEPT thread. */
2154 if (tp == except)
2155 continue;
2156 /* Ignore threads of processes we're not resuming. */
2157 if (!sched_multi
2158 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
2159 continue;
2160
2161 if (thread_still_needs_step_over (tp))
2162 return tp;
2163 }
2164
2165 return NULL;
2166 }
2167
2168 /* Basic routine for continuing the program in various fashions.
2169
2170 ADDR is the address to resume at, or -1 for resume where stopped.
2171 SIGGNAL is the signal to give it, or 0 for none,
2172 or -1 for act according to how it stopped.
2173 STEP is nonzero if should trap after one instruction.
2174 -1 means return after that and print nothing.
2175 You should probably set various step_... variables
2176 before calling here, if you are stepping.
2177
2178 You should call clear_proceed_status before calling proceed. */
2179
2180 void
2181 proceed (CORE_ADDR addr, enum gdb_signal siggnal, int step)
2182 {
2183 struct regcache *regcache;
2184 struct gdbarch *gdbarch;
2185 struct thread_info *tp;
2186 CORE_ADDR pc;
2187 struct address_space *aspace;
2188
2189 /* If we're stopped at a fork/vfork, follow the branch set by the
2190 "set follow-fork-mode" command; otherwise, we'll just proceed
2191 resuming the current thread. */
2192 if (!follow_fork ())
2193 {
2194 /* The target for some reason decided not to resume. */
2195 normal_stop ();
2196 if (target_can_async_p ())
2197 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2198 return;
2199 }
2200
2201 /* We'll update this if & when we switch to a new thread. */
2202 previous_inferior_ptid = inferior_ptid;
2203
2204 regcache = get_current_regcache ();
2205 gdbarch = get_regcache_arch (regcache);
2206 aspace = get_regcache_aspace (regcache);
2207 pc = regcache_read_pc (regcache);
2208 tp = inferior_thread ();
2209
2210 if (step > 0)
2211 step_start_function = find_pc_function (pc);
2212 if (step < 0)
2213 stop_after_trap = 1;
2214
2215 /* Fill in with reasonable starting values. */
2216 init_thread_stepping_state (tp);
2217
2218 if (addr == (CORE_ADDR) -1)
2219 {
2220 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2221 && execution_direction != EXEC_REVERSE)
2222 /* There is a breakpoint at the address we will resume at,
2223 step one instruction before inserting breakpoints so that
2224 we do not stop right away (and report a second hit at this
2225 breakpoint).
2226
2227 Note, we don't do this in reverse, because we won't
2228 actually be executing the breakpoint insn anyway.
2229 We'll be (un-)executing the previous instruction. */
2230 tp->stepping_over_breakpoint = 1;
2231 else if (gdbarch_single_step_through_delay_p (gdbarch)
2232 && gdbarch_single_step_through_delay (gdbarch,
2233 get_current_frame ()))
2234 /* We stepped onto an instruction that needs to be stepped
2235 again before re-inserting the breakpoint, do so. */
2236 tp->stepping_over_breakpoint = 1;
2237 }
2238 else
2239 {
2240 regcache_write_pc (regcache, addr);
2241 }
2242
2243 /* Record the interpreter that issued the execution command that
2244 caused this thread to resume. If the top level interpreter is
2245 MI/async, and the execution command was a CLI command
2246 (next/step/etc.), we'll want to print stop event output to the MI
2247 console channel (the stepped-to line, etc.), as if the user
2248 entered the execution command on a real GDB console. */
2249 inferior_thread ()->control.command_interp = command_interp ();
2250
2251 if (debug_infrun)
2252 fprintf_unfiltered (gdb_stdlog,
2253 "infrun: proceed (addr=%s, signal=%s, step=%d)\n",
2254 paddress (gdbarch, addr),
2255 gdb_signal_to_symbol_string (siggnal), step);
2256
2257 if (non_stop)
2258 /* In non-stop, each thread is handled individually. The context
2259 must already be set to the right thread here. */
2260 ;
2261 else
2262 {
2263 struct thread_info *step_over;
2264
2265 /* In a multi-threaded task we may select another thread and
2266 then continue or step.
2267
2268 But if the old thread was stopped at a breakpoint, it will
2269 immediately cause another breakpoint stop without any
2270 execution (i.e. it will report a breakpoint hit incorrectly).
2271 So we must step over it first.
2272
2273 Look for a thread other than the current (TP) that reported a
2274 breakpoint hit and hasn't been resumed yet since. */
2275 step_over = find_thread_needs_step_over (step, tp);
2276 if (step_over != NULL)
2277 {
2278 if (debug_infrun)
2279 fprintf_unfiltered (gdb_stdlog,
2280 "infrun: need to step-over [%s] first\n",
2281 target_pid_to_str (step_over->ptid));
2282
2283 /* Store the prev_pc for the stepping thread too, needed by
2284 switch_back_to_stepping thread. */
2285 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2286 switch_to_thread (step_over->ptid);
2287 tp = step_over;
2288 }
2289 }
2290
2291 /* If we need to step over a breakpoint, and we're not using
2292 displaced stepping to do so, insert all breakpoints (watchpoints,
2293 etc.) but the one we're stepping over, step one instruction, and
2294 then re-insert the breakpoint when that step is finished. */
2295 if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
2296 {
2297 struct regcache *regcache = get_current_regcache ();
2298
2299 set_step_over_info (get_regcache_aspace (regcache),
2300 regcache_read_pc (regcache));
2301 }
2302 else
2303 clear_step_over_info ();
2304
2305 insert_breakpoints ();
2306
2307 tp->control.trap_expected = tp->stepping_over_breakpoint;
2308
2309 if (!non_stop)
2310 {
2311 /* Pass the last stop signal to the thread we're resuming,
2312 irrespective of whether the current thread is the thread that
2313 got the last event or not. This was historically GDB's
2314 behaviour before keeping a stop_signal per thread. */
2315
2316 struct thread_info *last_thread;
2317 ptid_t last_ptid;
2318 struct target_waitstatus last_status;
2319
2320 get_last_target_status (&last_ptid, &last_status);
2321 if (!ptid_equal (inferior_ptid, last_ptid)
2322 && !ptid_equal (last_ptid, null_ptid)
2323 && !ptid_equal (last_ptid, minus_one_ptid))
2324 {
2325 last_thread = find_thread_ptid (last_ptid);
2326 if (last_thread)
2327 {
2328 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2329 last_thread->suspend.stop_signal = GDB_SIGNAL_0;
2330 }
2331 }
2332 }
2333
2334 if (siggnal != GDB_SIGNAL_DEFAULT)
2335 tp->suspend.stop_signal = siggnal;
2336 /* If this signal should not be seen by program,
2337 give it zero. Used for debugging signals. */
2338 else if (!signal_program[tp->suspend.stop_signal])
2339 tp->suspend.stop_signal = GDB_SIGNAL_0;
2340
2341 annotate_starting ();
2342
2343 /* Make sure that output from GDB appears before output from the
2344 inferior. */
2345 gdb_flush (gdb_stdout);
2346
2347 /* Refresh prev_pc value just prior to resuming. This used to be
2348 done in stop_stepping, however, setting prev_pc there did not handle
2349 scenarios such as inferior function calls or returning from
2350 a function via the return command. In those cases, the prev_pc
2351 value was not set properly for subsequent commands. The prev_pc value
2352 is used to initialize the starting line number in the ecs. With an
2353 invalid value, the gdb next command ends up stopping at the position
2354 represented by the next line table entry past our start position.
2355 On platforms that generate one line table entry per line, this
2356 is not a problem. However, on the ia64, the compiler generates
2357 extraneous line table entries that do not increase the line number.
2358 When we issue the gdb next command on the ia64 after an inferior call
2359 or a return command, we often end up a few instructions forward, still
2360 within the original line we started.
2361
2362 An attempt was made to refresh the prev_pc at the same time the
2363 execution_control_state is initialized (for instance, just before
2364 waiting for an inferior event). But this approach did not work
2365 because of platforms that use ptrace, where the pc register cannot
2366 be read unless the inferior is stopped. At that point, we are not
2367 guaranteed the inferior is stopped and so the regcache_read_pc() call
2368 can fail. Setting the prev_pc value here ensures the value is updated
2369 correctly when the inferior is stopped. */
2370 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2371
2372 /* Reset to normal state. */
2373 init_infwait_state ();
2374
2375 /* Resume inferior. */
2376 resume (tp->control.trap_expected || step || bpstat_should_step (),
2377 tp->suspend.stop_signal);
2378
2379 /* Wait for it to stop (if not standalone)
2380 and in any case decode why it stopped, and act accordingly. */
2381 /* Do this only if we are not using the event loop, or if the target
2382 does not support asynchronous execution. */
2383 if (!target_can_async_p ())
2384 {
2385 wait_for_inferior ();
2386 normal_stop ();
2387 }
2388 }
2389 \f
2390
2391 /* Start remote-debugging of a machine over a serial link. */
2392
2393 void
2394 start_remote (int from_tty)
2395 {
2396 struct inferior *inferior;
2397
2398 inferior = current_inferior ();
2399 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2400
2401 /* Always go on waiting for the target, regardless of the mode. */
2402 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2403 indicate to wait_for_inferior that a target should timeout if
2404 nothing is returned (instead of just blocking). Because of this,
2405 targets expecting an immediate response need to, internally, set
2406 things up so that the target_wait() is forced to eventually
2407 timeout. */
2408 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2409 differentiate to its caller what the state of the target is after
2410 the initial open has been performed. Here we're assuming that
2411 the target has stopped. It should be possible to eventually have
2412 target_open() return to the caller an indication that the target
2413 is currently running and GDB state should be set to the same as
2414 for an async run. */
2415 wait_for_inferior ();
2416
2417 /* Now that the inferior has stopped, do any bookkeeping like
2418 loading shared libraries. We want to do this before normal_stop,
2419 so that the displayed frame is up to date. */
2420 post_create_inferior (&current_target, from_tty);
2421
2422 normal_stop ();
2423 }
2424
2425 /* Initialize static vars when a new inferior begins. */
2426
2427 void
2428 init_wait_for_inferior (void)
2429 {
2430 /* These are meaningless until the first time through wait_for_inferior. */
2431
2432 breakpoint_init_inferior (inf_starting);
2433
2434 clear_proceed_status ();
2435
2436 target_last_wait_ptid = minus_one_ptid;
2437
2438 previous_inferior_ptid = inferior_ptid;
2439 init_infwait_state ();
2440
2441 /* Discard any skipped inlined frames. */
2442 clear_inline_frame_state (minus_one_ptid);
2443
2444 singlestep_ptid = null_ptid;
2445 singlestep_pc = 0;
2446 }
2447
2448 \f
2449 /* This enum encodes possible reasons for doing a target_wait, so that
2450 wfi can call target_wait in one place. (Ultimately the call will be
2451 moved out of the infinite loop entirely.) */
2452
2453 enum infwait_states
2454 {
2455 infwait_normal_state,
2456 infwait_step_watch_state,
2457 infwait_nonstep_watch_state
2458 };
2459
2460 /* The PTID we'll do a target_wait on.*/
2461 ptid_t waiton_ptid;
2462
2463 /* Current inferior wait state. */
2464 static enum infwait_states infwait_state;
2465
2466 /* Data to be passed around while handling an event. This data is
2467 discarded between events. */
2468 struct execution_control_state
2469 {
2470 ptid_t ptid;
2471 /* The thread that got the event, if this was a thread event; NULL
2472 otherwise. */
2473 struct thread_info *event_thread;
2474
2475 struct target_waitstatus ws;
2476 int stop_func_filled_in;
2477 CORE_ADDR stop_func_start;
2478 CORE_ADDR stop_func_end;
2479 const char *stop_func_name;
2480 int wait_some_more;
2481
2482 /* We were in infwait_step_watch_state or
2483 infwait_nonstep_watch_state state, and the thread reported an
2484 event. */
2485 int stepped_after_stopped_by_watchpoint;
2486
2487 /* True if the event thread hit the single-step breakpoint of
2488 another thread. Thus the event doesn't cause a stop, the thread
2489 needs to be single-stepped past the single-step breakpoint before
2490 we can switch back to the original stepping thread. */
2491 int hit_singlestep_breakpoint;
2492 };
2493
2494 static void handle_inferior_event (struct execution_control_state *ecs);
2495
2496 static void handle_step_into_function (struct gdbarch *gdbarch,
2497 struct execution_control_state *ecs);
2498 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2499 struct execution_control_state *ecs);
2500 static void handle_signal_stop (struct execution_control_state *ecs);
2501 static void check_exception_resume (struct execution_control_state *,
2502 struct frame_info *);
2503
2504 static void stop_stepping (struct execution_control_state *ecs);
2505 static void prepare_to_wait (struct execution_control_state *ecs);
2506 static void keep_going (struct execution_control_state *ecs);
2507 static void process_event_stop_test (struct execution_control_state *ecs);
2508 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2509
2510 /* Callback for iterate over threads. If the thread is stopped, but
2511 the user/frontend doesn't know about that yet, go through
2512 normal_stop, as if the thread had just stopped now. ARG points at
2513 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2514 ptid_is_pid(PTID) is true, applies to all threads of the process
2515 pointed at by PTID. Otherwise, apply only to the thread pointed by
2516 PTID. */
2517
2518 static int
2519 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2520 {
2521 ptid_t ptid = * (ptid_t *) arg;
2522
2523 if ((ptid_equal (info->ptid, ptid)
2524 || ptid_equal (minus_one_ptid, ptid)
2525 || (ptid_is_pid (ptid)
2526 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2527 && is_running (info->ptid)
2528 && !is_executing (info->ptid))
2529 {
2530 struct cleanup *old_chain;
2531 struct execution_control_state ecss;
2532 struct execution_control_state *ecs = &ecss;
2533
2534 memset (ecs, 0, sizeof (*ecs));
2535
2536 old_chain = make_cleanup_restore_current_thread ();
2537
2538 overlay_cache_invalid = 1;
2539 /* Flush target cache before starting to handle each event.
2540 Target was running and cache could be stale. This is just a
2541 heuristic. Running threads may modify target memory, but we
2542 don't get any event. */
2543 target_dcache_invalidate ();
2544
2545 /* Go through handle_inferior_event/normal_stop, so we always
2546 have consistent output as if the stop event had been
2547 reported. */
2548 ecs->ptid = info->ptid;
2549 ecs->event_thread = find_thread_ptid (info->ptid);
2550 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2551 ecs->ws.value.sig = GDB_SIGNAL_0;
2552
2553 handle_inferior_event (ecs);
2554
2555 if (!ecs->wait_some_more)
2556 {
2557 struct thread_info *tp;
2558
2559 normal_stop ();
2560
2561 /* Finish off the continuations. */
2562 tp = inferior_thread ();
2563 do_all_intermediate_continuations_thread (tp, 1);
2564 do_all_continuations_thread (tp, 1);
2565 }
2566
2567 do_cleanups (old_chain);
2568 }
2569
2570 return 0;
2571 }
2572
2573 /* This function is attached as a "thread_stop_requested" observer.
2574 Cleanup local state that assumed the PTID was to be resumed, and
2575 report the stop to the frontend. */
2576
2577 static void
2578 infrun_thread_stop_requested (ptid_t ptid)
2579 {
2580 struct displaced_step_inferior_state *displaced;
2581
2582 /* PTID was requested to stop. Remove it from the displaced
2583 stepping queue, so we don't try to resume it automatically. */
2584
2585 for (displaced = displaced_step_inferior_states;
2586 displaced;
2587 displaced = displaced->next)
2588 {
2589 struct displaced_step_request *it, **prev_next_p;
2590
2591 it = displaced->step_request_queue;
2592 prev_next_p = &displaced->step_request_queue;
2593 while (it)
2594 {
2595 if (ptid_match (it->ptid, ptid))
2596 {
2597 *prev_next_p = it->next;
2598 it->next = NULL;
2599 xfree (it);
2600 }
2601 else
2602 {
2603 prev_next_p = &it->next;
2604 }
2605
2606 it = *prev_next_p;
2607 }
2608 }
2609
2610 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2611 }
2612
2613 static void
2614 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2615 {
2616 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2617 nullify_last_target_wait_ptid ();
2618 }
2619
2620 /* Callback for iterate_over_threads. */
2621
2622 static int
2623 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2624 {
2625 if (is_exited (info->ptid))
2626 return 0;
2627
2628 delete_step_resume_breakpoint (info);
2629 delete_exception_resume_breakpoint (info);
2630 return 0;
2631 }
2632
2633 /* In all-stop, delete the step resume breakpoint of any thread that
2634 had one. In non-stop, delete the step resume breakpoint of the
2635 thread that just stopped. */
2636
2637 static void
2638 delete_step_thread_step_resume_breakpoint (void)
2639 {
2640 if (!target_has_execution
2641 || ptid_equal (inferior_ptid, null_ptid))
2642 /* If the inferior has exited, we have already deleted the step
2643 resume breakpoints out of GDB's lists. */
2644 return;
2645
2646 if (non_stop)
2647 {
2648 /* If in non-stop mode, only delete the step-resume or
2649 longjmp-resume breakpoint of the thread that just stopped
2650 stepping. */
2651 struct thread_info *tp = inferior_thread ();
2652
2653 delete_step_resume_breakpoint (tp);
2654 delete_exception_resume_breakpoint (tp);
2655 }
2656 else
2657 /* In all-stop mode, delete all step-resume and longjmp-resume
2658 breakpoints of any thread that had them. */
2659 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2660 }
2661
2662 /* A cleanup wrapper. */
2663
2664 static void
2665 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2666 {
2667 delete_step_thread_step_resume_breakpoint ();
2668 }
2669
2670 /* Pretty print the results of target_wait, for debugging purposes. */
2671
2672 static void
2673 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2674 const struct target_waitstatus *ws)
2675 {
2676 char *status_string = target_waitstatus_to_string (ws);
2677 struct ui_file *tmp_stream = mem_fileopen ();
2678 char *text;
2679
2680 /* The text is split over several lines because it was getting too long.
2681 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2682 output as a unit; we want only one timestamp printed if debug_timestamp
2683 is set. */
2684
2685 fprintf_unfiltered (tmp_stream,
2686 "infrun: target_wait (%d", ptid_get_pid (waiton_ptid));
2687 if (ptid_get_pid (waiton_ptid) != -1)
2688 fprintf_unfiltered (tmp_stream,
2689 " [%s]", target_pid_to_str (waiton_ptid));
2690 fprintf_unfiltered (tmp_stream, ", status) =\n");
2691 fprintf_unfiltered (tmp_stream,
2692 "infrun: %d [%s],\n",
2693 ptid_get_pid (result_ptid),
2694 target_pid_to_str (result_ptid));
2695 fprintf_unfiltered (tmp_stream,
2696 "infrun: %s\n",
2697 status_string);
2698
2699 text = ui_file_xstrdup (tmp_stream, NULL);
2700
2701 /* This uses %s in part to handle %'s in the text, but also to avoid
2702 a gcc error: the format attribute requires a string literal. */
2703 fprintf_unfiltered (gdb_stdlog, "%s", text);
2704
2705 xfree (status_string);
2706 xfree (text);
2707 ui_file_delete (tmp_stream);
2708 }
2709
2710 /* Prepare and stabilize the inferior for detaching it. E.g.,
2711 detaching while a thread is displaced stepping is a recipe for
2712 crashing it, as nothing would readjust the PC out of the scratch
2713 pad. */
2714
2715 void
2716 prepare_for_detach (void)
2717 {
2718 struct inferior *inf = current_inferior ();
2719 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2720 struct cleanup *old_chain_1;
2721 struct displaced_step_inferior_state *displaced;
2722
2723 displaced = get_displaced_stepping_state (inf->pid);
2724
2725 /* Is any thread of this process displaced stepping? If not,
2726 there's nothing else to do. */
2727 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2728 return;
2729
2730 if (debug_infrun)
2731 fprintf_unfiltered (gdb_stdlog,
2732 "displaced-stepping in-process while detaching");
2733
2734 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2735 inf->detaching = 1;
2736
2737 while (!ptid_equal (displaced->step_ptid, null_ptid))
2738 {
2739 struct cleanup *old_chain_2;
2740 struct execution_control_state ecss;
2741 struct execution_control_state *ecs;
2742
2743 ecs = &ecss;
2744 memset (ecs, 0, sizeof (*ecs));
2745
2746 overlay_cache_invalid = 1;
2747 /* Flush target cache before starting to handle each event.
2748 Target was running and cache could be stale. This is just a
2749 heuristic. Running threads may modify target memory, but we
2750 don't get any event. */
2751 target_dcache_invalidate ();
2752
2753 if (deprecated_target_wait_hook)
2754 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2755 else
2756 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2757
2758 if (debug_infrun)
2759 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2760
2761 /* If an error happens while handling the event, propagate GDB's
2762 knowledge of the executing state to the frontend/user running
2763 state. */
2764 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2765 &minus_one_ptid);
2766
2767 /* Now figure out what to do with the result of the result. */
2768 handle_inferior_event (ecs);
2769
2770 /* No error, don't finish the state yet. */
2771 discard_cleanups (old_chain_2);
2772
2773 /* Breakpoints and watchpoints are not installed on the target
2774 at this point, and signals are passed directly to the
2775 inferior, so this must mean the process is gone. */
2776 if (!ecs->wait_some_more)
2777 {
2778 discard_cleanups (old_chain_1);
2779 error (_("Program exited while detaching"));
2780 }
2781 }
2782
2783 discard_cleanups (old_chain_1);
2784 }
2785
2786 /* Wait for control to return from inferior to debugger.
2787
2788 If inferior gets a signal, we may decide to start it up again
2789 instead of returning. That is why there is a loop in this function.
2790 When this function actually returns it means the inferior
2791 should be left stopped and GDB should read more commands. */
2792
2793 void
2794 wait_for_inferior (void)
2795 {
2796 struct cleanup *old_cleanups;
2797
2798 if (debug_infrun)
2799 fprintf_unfiltered
2800 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2801
2802 old_cleanups =
2803 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2804
2805 while (1)
2806 {
2807 struct execution_control_state ecss;
2808 struct execution_control_state *ecs = &ecss;
2809 struct cleanup *old_chain;
2810
2811 memset (ecs, 0, sizeof (*ecs));
2812
2813 overlay_cache_invalid = 1;
2814
2815 /* Flush target cache before starting to handle each event.
2816 Target was running and cache could be stale. This is just a
2817 heuristic. Running threads may modify target memory, but we
2818 don't get any event. */
2819 target_dcache_invalidate ();
2820
2821 if (deprecated_target_wait_hook)
2822 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2823 else
2824 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2825
2826 if (debug_infrun)
2827 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2828
2829 /* If an error happens while handling the event, propagate GDB's
2830 knowledge of the executing state to the frontend/user running
2831 state. */
2832 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2833
2834 /* Now figure out what to do with the result of the result. */
2835 handle_inferior_event (ecs);
2836
2837 /* No error, don't finish the state yet. */
2838 discard_cleanups (old_chain);
2839
2840 if (!ecs->wait_some_more)
2841 break;
2842 }
2843
2844 do_cleanups (old_cleanups);
2845 }
2846
2847 /* Asynchronous version of wait_for_inferior. It is called by the
2848 event loop whenever a change of state is detected on the file
2849 descriptor corresponding to the target. It can be called more than
2850 once to complete a single execution command. In such cases we need
2851 to keep the state in a global variable ECSS. If it is the last time
2852 that this function is called for a single execution command, then
2853 report to the user that the inferior has stopped, and do the
2854 necessary cleanups. */
2855
2856 void
2857 fetch_inferior_event (void *client_data)
2858 {
2859 struct execution_control_state ecss;
2860 struct execution_control_state *ecs = &ecss;
2861 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2862 struct cleanup *ts_old_chain;
2863 int was_sync = sync_execution;
2864 int cmd_done = 0;
2865
2866 memset (ecs, 0, sizeof (*ecs));
2867
2868 /* We're handling a live event, so make sure we're doing live
2869 debugging. If we're looking at traceframes while the target is
2870 running, we're going to need to get back to that mode after
2871 handling the event. */
2872 if (non_stop)
2873 {
2874 make_cleanup_restore_current_traceframe ();
2875 set_current_traceframe (-1);
2876 }
2877
2878 if (non_stop)
2879 /* In non-stop mode, the user/frontend should not notice a thread
2880 switch due to internal events. Make sure we reverse to the
2881 user selected thread and frame after handling the event and
2882 running any breakpoint commands. */
2883 make_cleanup_restore_current_thread ();
2884
2885 overlay_cache_invalid = 1;
2886 /* Flush target cache before starting to handle each event. Target
2887 was running and cache could be stale. This is just a heuristic.
2888 Running threads may modify target memory, but we don't get any
2889 event. */
2890 target_dcache_invalidate ();
2891
2892 make_cleanup_restore_integer (&execution_direction);
2893 execution_direction = target_execution_direction ();
2894
2895 if (deprecated_target_wait_hook)
2896 ecs->ptid =
2897 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2898 else
2899 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2900
2901 if (debug_infrun)
2902 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2903
2904 /* If an error happens while handling the event, propagate GDB's
2905 knowledge of the executing state to the frontend/user running
2906 state. */
2907 if (!non_stop)
2908 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2909 else
2910 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2911
2912 /* Get executed before make_cleanup_restore_current_thread above to apply
2913 still for the thread which has thrown the exception. */
2914 make_bpstat_clear_actions_cleanup ();
2915
2916 /* Now figure out what to do with the result of the result. */
2917 handle_inferior_event (ecs);
2918
2919 if (!ecs->wait_some_more)
2920 {
2921 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2922
2923 delete_step_thread_step_resume_breakpoint ();
2924
2925 /* We may not find an inferior if this was a process exit. */
2926 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2927 normal_stop ();
2928
2929 if (target_has_execution
2930 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2931 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2932 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2933 && ecs->event_thread->step_multi
2934 && ecs->event_thread->control.stop_step)
2935 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2936 else
2937 {
2938 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2939 cmd_done = 1;
2940 }
2941 }
2942
2943 /* No error, don't finish the thread states yet. */
2944 discard_cleanups (ts_old_chain);
2945
2946 /* Revert thread and frame. */
2947 do_cleanups (old_chain);
2948
2949 /* If the inferior was in sync execution mode, and now isn't,
2950 restore the prompt (a synchronous execution command has finished,
2951 and we're ready for input). */
2952 if (interpreter_async && was_sync && !sync_execution)
2953 observer_notify_sync_execution_done ();
2954
2955 if (cmd_done
2956 && !was_sync
2957 && exec_done_display_p
2958 && (ptid_equal (inferior_ptid, null_ptid)
2959 || !is_running (inferior_ptid)))
2960 printf_unfiltered (_("completed.\n"));
2961 }
2962
2963 /* Record the frame and location we're currently stepping through. */
2964 void
2965 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2966 {
2967 struct thread_info *tp = inferior_thread ();
2968
2969 tp->control.step_frame_id = get_frame_id (frame);
2970 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2971
2972 tp->current_symtab = sal.symtab;
2973 tp->current_line = sal.line;
2974 }
2975
2976 /* Clear context switchable stepping state. */
2977
2978 void
2979 init_thread_stepping_state (struct thread_info *tss)
2980 {
2981 tss->stepping_over_breakpoint = 0;
2982 tss->step_after_step_resume_breakpoint = 0;
2983 }
2984
2985 /* Set the cached copy of the last ptid/waitstatus. */
2986
2987 static void
2988 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
2989 {
2990 target_last_wait_ptid = ptid;
2991 target_last_waitstatus = status;
2992 }
2993
2994 /* Return the cached copy of the last pid/waitstatus returned by
2995 target_wait()/deprecated_target_wait_hook(). The data is actually
2996 cached by handle_inferior_event(), which gets called immediately
2997 after target_wait()/deprecated_target_wait_hook(). */
2998
2999 void
3000 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
3001 {
3002 *ptidp = target_last_wait_ptid;
3003 *status = target_last_waitstatus;
3004 }
3005
3006 void
3007 nullify_last_target_wait_ptid (void)
3008 {
3009 target_last_wait_ptid = minus_one_ptid;
3010 }
3011
3012 /* Switch thread contexts. */
3013
3014 static void
3015 context_switch (ptid_t ptid)
3016 {
3017 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
3018 {
3019 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3020 target_pid_to_str (inferior_ptid));
3021 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3022 target_pid_to_str (ptid));
3023 }
3024
3025 switch_to_thread (ptid);
3026 }
3027
3028 static void
3029 adjust_pc_after_break (struct execution_control_state *ecs)
3030 {
3031 struct regcache *regcache;
3032 struct gdbarch *gdbarch;
3033 struct address_space *aspace;
3034 CORE_ADDR breakpoint_pc, decr_pc;
3035
3036 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3037 we aren't, just return.
3038
3039 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3040 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3041 implemented by software breakpoints should be handled through the normal
3042 breakpoint layer.
3043
3044 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3045 different signals (SIGILL or SIGEMT for instance), but it is less
3046 clear where the PC is pointing afterwards. It may not match
3047 gdbarch_decr_pc_after_break. I don't know any specific target that
3048 generates these signals at breakpoints (the code has been in GDB since at
3049 least 1992) so I can not guess how to handle them here.
3050
3051 In earlier versions of GDB, a target with
3052 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3053 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3054 target with both of these set in GDB history, and it seems unlikely to be
3055 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3056
3057 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3058 return;
3059
3060 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3061 return;
3062
3063 /* In reverse execution, when a breakpoint is hit, the instruction
3064 under it has already been de-executed. The reported PC always
3065 points at the breakpoint address, so adjusting it further would
3066 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3067 architecture:
3068
3069 B1 0x08000000 : INSN1
3070 B2 0x08000001 : INSN2
3071 0x08000002 : INSN3
3072 PC -> 0x08000003 : INSN4
3073
3074 Say you're stopped at 0x08000003 as above. Reverse continuing
3075 from that point should hit B2 as below. Reading the PC when the
3076 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3077 been de-executed already.
3078
3079 B1 0x08000000 : INSN1
3080 B2 PC -> 0x08000001 : INSN2
3081 0x08000002 : INSN3
3082 0x08000003 : INSN4
3083
3084 We can't apply the same logic as for forward execution, because
3085 we would wrongly adjust the PC to 0x08000000, since there's a
3086 breakpoint at PC - 1. We'd then report a hit on B1, although
3087 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3088 behaviour. */
3089 if (execution_direction == EXEC_REVERSE)
3090 return;
3091
3092 /* If this target does not decrement the PC after breakpoints, then
3093 we have nothing to do. */
3094 regcache = get_thread_regcache (ecs->ptid);
3095 gdbarch = get_regcache_arch (regcache);
3096
3097 decr_pc = target_decr_pc_after_break (gdbarch);
3098 if (decr_pc == 0)
3099 return;
3100
3101 aspace = get_regcache_aspace (regcache);
3102
3103 /* Find the location where (if we've hit a breakpoint) the
3104 breakpoint would be. */
3105 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3106
3107 /* Check whether there actually is a software breakpoint inserted at
3108 that location.
3109
3110 If in non-stop mode, a race condition is possible where we've
3111 removed a breakpoint, but stop events for that breakpoint were
3112 already queued and arrive later. To suppress those spurious
3113 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3114 and retire them after a number of stop events are reported. */
3115 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3116 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3117 {
3118 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3119
3120 if (record_full_is_used ())
3121 record_full_gdb_operation_disable_set ();
3122
3123 /* When using hardware single-step, a SIGTRAP is reported for both
3124 a completed single-step and a software breakpoint. Need to
3125 differentiate between the two, as the latter needs adjusting
3126 but the former does not.
3127
3128 The SIGTRAP can be due to a completed hardware single-step only if
3129 - we didn't insert software single-step breakpoints
3130 - the thread to be examined is still the current thread
3131 - this thread is currently being stepped
3132
3133 If any of these events did not occur, we must have stopped due
3134 to hitting a software breakpoint, and have to back up to the
3135 breakpoint address.
3136
3137 As a special case, we could have hardware single-stepped a
3138 software breakpoint. In this case (prev_pc == breakpoint_pc),
3139 we also need to back up to the breakpoint address. */
3140
3141 if (singlestep_breakpoints_inserted_p
3142 || !ptid_equal (ecs->ptid, inferior_ptid)
3143 || !currently_stepping (ecs->event_thread)
3144 || ecs->event_thread->prev_pc == breakpoint_pc)
3145 regcache_write_pc (regcache, breakpoint_pc);
3146
3147 do_cleanups (old_cleanups);
3148 }
3149 }
3150
3151 static void
3152 init_infwait_state (void)
3153 {
3154 waiton_ptid = pid_to_ptid (-1);
3155 infwait_state = infwait_normal_state;
3156 }
3157
3158 static int
3159 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3160 {
3161 for (frame = get_prev_frame (frame);
3162 frame != NULL;
3163 frame = get_prev_frame (frame))
3164 {
3165 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3166 return 1;
3167 if (get_frame_type (frame) != INLINE_FRAME)
3168 break;
3169 }
3170
3171 return 0;
3172 }
3173
3174 /* Auxiliary function that handles syscall entry/return events.
3175 It returns 1 if the inferior should keep going (and GDB
3176 should ignore the event), or 0 if the event deserves to be
3177 processed. */
3178
3179 static int
3180 handle_syscall_event (struct execution_control_state *ecs)
3181 {
3182 struct regcache *regcache;
3183 int syscall_number;
3184
3185 if (!ptid_equal (ecs->ptid, inferior_ptid))
3186 context_switch (ecs->ptid);
3187
3188 regcache = get_thread_regcache (ecs->ptid);
3189 syscall_number = ecs->ws.value.syscall_number;
3190 stop_pc = regcache_read_pc (regcache);
3191
3192 if (catch_syscall_enabled () > 0
3193 && catching_syscall_number (syscall_number) > 0)
3194 {
3195 if (debug_infrun)
3196 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3197 syscall_number);
3198
3199 ecs->event_thread->control.stop_bpstat
3200 = bpstat_stop_status (get_regcache_aspace (regcache),
3201 stop_pc, ecs->ptid, &ecs->ws);
3202
3203 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3204 {
3205 /* Catchpoint hit. */
3206 return 0;
3207 }
3208 }
3209
3210 /* If no catchpoint triggered for this, then keep going. */
3211 keep_going (ecs);
3212 return 1;
3213 }
3214
3215 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3216
3217 static void
3218 fill_in_stop_func (struct gdbarch *gdbarch,
3219 struct execution_control_state *ecs)
3220 {
3221 if (!ecs->stop_func_filled_in)
3222 {
3223 /* Don't care about return value; stop_func_start and stop_func_name
3224 will both be 0 if it doesn't work. */
3225 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3226 &ecs->stop_func_start, &ecs->stop_func_end);
3227 ecs->stop_func_start
3228 += gdbarch_deprecated_function_start_offset (gdbarch);
3229
3230 if (gdbarch_skip_entrypoint_p (gdbarch))
3231 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3232 ecs->stop_func_start);
3233
3234 ecs->stop_func_filled_in = 1;
3235 }
3236 }
3237
3238
3239 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3240
3241 static enum stop_kind
3242 get_inferior_stop_soon (ptid_t ptid)
3243 {
3244 struct inferior *inf = find_inferior_pid (ptid_get_pid (ptid));
3245
3246 gdb_assert (inf != NULL);
3247 return inf->control.stop_soon;
3248 }
3249
3250 /* Given an execution control state that has been freshly filled in by
3251 an event from the inferior, figure out what it means and take
3252 appropriate action.
3253
3254 The alternatives are:
3255
3256 1) stop_stepping and return; to really stop and return to the
3257 debugger.
3258
3259 2) keep_going and return; to wait for the next event (set
3260 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3261 once). */
3262
3263 static void
3264 handle_inferior_event (struct execution_control_state *ecs)
3265 {
3266 enum stop_kind stop_soon;
3267
3268 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3269 {
3270 /* We had an event in the inferior, but we are not interested in
3271 handling it at this level. The lower layers have already
3272 done what needs to be done, if anything.
3273
3274 One of the possible circumstances for this is when the
3275 inferior produces output for the console. The inferior has
3276 not stopped, and we are ignoring the event. Another possible
3277 circumstance is any event which the lower level knows will be
3278 reported multiple times without an intervening resume. */
3279 if (debug_infrun)
3280 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3281 prepare_to_wait (ecs);
3282 return;
3283 }
3284
3285 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3286 && target_can_async_p () && !sync_execution)
3287 {
3288 /* There were no unwaited-for children left in the target, but,
3289 we're not synchronously waiting for events either. Just
3290 ignore. Otherwise, if we were running a synchronous
3291 execution command, we need to cancel it and give the user
3292 back the terminal. */
3293 if (debug_infrun)
3294 fprintf_unfiltered (gdb_stdlog,
3295 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3296 prepare_to_wait (ecs);
3297 return;
3298 }
3299
3300 /* Cache the last pid/waitstatus. */
3301 set_last_target_status (ecs->ptid, ecs->ws);
3302
3303 /* Always clear state belonging to the previous time we stopped. */
3304 stop_stack_dummy = STOP_NONE;
3305
3306 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3307 {
3308 /* No unwaited-for children left. IOW, all resumed children
3309 have exited. */
3310 if (debug_infrun)
3311 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3312
3313 stop_print_frame = 0;
3314 stop_stepping (ecs);
3315 return;
3316 }
3317
3318 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3319 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3320 {
3321 ecs->event_thread = find_thread_ptid (ecs->ptid);
3322 /* If it's a new thread, add it to the thread database. */
3323 if (ecs->event_thread == NULL)
3324 ecs->event_thread = add_thread (ecs->ptid);
3325
3326 /* Disable range stepping. If the next step request could use a
3327 range, this will be end up re-enabled then. */
3328 ecs->event_thread->control.may_range_step = 0;
3329 }
3330
3331 /* Dependent on valid ECS->EVENT_THREAD. */
3332 adjust_pc_after_break (ecs);
3333
3334 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3335 reinit_frame_cache ();
3336
3337 breakpoint_retire_moribund ();
3338
3339 /* First, distinguish signals caused by the debugger from signals
3340 that have to do with the program's own actions. Note that
3341 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3342 on the operating system version. Here we detect when a SIGILL or
3343 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3344 something similar for SIGSEGV, since a SIGSEGV will be generated
3345 when we're trying to execute a breakpoint instruction on a
3346 non-executable stack. This happens for call dummy breakpoints
3347 for architectures like SPARC that place call dummies on the
3348 stack. */
3349 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3350 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3351 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3352 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3353 {
3354 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3355
3356 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3357 regcache_read_pc (regcache)))
3358 {
3359 if (debug_infrun)
3360 fprintf_unfiltered (gdb_stdlog,
3361 "infrun: Treating signal as SIGTRAP\n");
3362 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3363 }
3364 }
3365
3366 /* Mark the non-executing threads accordingly. In all-stop, all
3367 threads of all processes are stopped when we get any event
3368 reported. In non-stop mode, only the event thread stops. If
3369 we're handling a process exit in non-stop mode, there's nothing
3370 to do, as threads of the dead process are gone, and threads of
3371 any other process were left running. */
3372 if (!non_stop)
3373 set_executing (minus_one_ptid, 0);
3374 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3375 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3376 set_executing (ecs->ptid, 0);
3377
3378 switch (infwait_state)
3379 {
3380 case infwait_normal_state:
3381 if (debug_infrun)
3382 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3383 break;
3384
3385 case infwait_step_watch_state:
3386 if (debug_infrun)
3387 fprintf_unfiltered (gdb_stdlog,
3388 "infrun: infwait_step_watch_state\n");
3389
3390 ecs->stepped_after_stopped_by_watchpoint = 1;
3391 break;
3392
3393 case infwait_nonstep_watch_state:
3394 if (debug_infrun)
3395 fprintf_unfiltered (gdb_stdlog,
3396 "infrun: infwait_nonstep_watch_state\n");
3397 insert_breakpoints ();
3398
3399 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3400 handle things like signals arriving and other things happening
3401 in combination correctly? */
3402 ecs->stepped_after_stopped_by_watchpoint = 1;
3403 break;
3404
3405 default:
3406 internal_error (__FILE__, __LINE__, _("bad switch"));
3407 }
3408
3409 infwait_state = infwait_normal_state;
3410 waiton_ptid = pid_to_ptid (-1);
3411
3412 switch (ecs->ws.kind)
3413 {
3414 case TARGET_WAITKIND_LOADED:
3415 if (debug_infrun)
3416 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3417 if (!ptid_equal (ecs->ptid, inferior_ptid))
3418 context_switch (ecs->ptid);
3419 /* Ignore gracefully during startup of the inferior, as it might
3420 be the shell which has just loaded some objects, otherwise
3421 add the symbols for the newly loaded objects. Also ignore at
3422 the beginning of an attach or remote session; we will query
3423 the full list of libraries once the connection is
3424 established. */
3425
3426 stop_soon = get_inferior_stop_soon (ecs->ptid);
3427 if (stop_soon == NO_STOP_QUIETLY)
3428 {
3429 struct regcache *regcache;
3430
3431 regcache = get_thread_regcache (ecs->ptid);
3432
3433 handle_solib_event ();
3434
3435 ecs->event_thread->control.stop_bpstat
3436 = bpstat_stop_status (get_regcache_aspace (regcache),
3437 stop_pc, ecs->ptid, &ecs->ws);
3438
3439 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3440 {
3441 /* A catchpoint triggered. */
3442 process_event_stop_test (ecs);
3443 return;
3444 }
3445
3446 /* If requested, stop when the dynamic linker notifies
3447 gdb of events. This allows the user to get control
3448 and place breakpoints in initializer routines for
3449 dynamically loaded objects (among other things). */
3450 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3451 if (stop_on_solib_events)
3452 {
3453 /* Make sure we print "Stopped due to solib-event" in
3454 normal_stop. */
3455 stop_print_frame = 1;
3456
3457 stop_stepping (ecs);
3458 return;
3459 }
3460 }
3461
3462 /* If we are skipping through a shell, or through shared library
3463 loading that we aren't interested in, resume the program. If
3464 we're running the program normally, also resume. */
3465 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3466 {
3467 /* Loading of shared libraries might have changed breakpoint
3468 addresses. Make sure new breakpoints are inserted. */
3469 if (stop_soon == NO_STOP_QUIETLY
3470 && !breakpoints_always_inserted_mode ())
3471 insert_breakpoints ();
3472 resume (0, GDB_SIGNAL_0);
3473 prepare_to_wait (ecs);
3474 return;
3475 }
3476
3477 /* But stop if we're attaching or setting up a remote
3478 connection. */
3479 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3480 || stop_soon == STOP_QUIETLY_REMOTE)
3481 {
3482 if (debug_infrun)
3483 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3484 stop_stepping (ecs);
3485 return;
3486 }
3487
3488 internal_error (__FILE__, __LINE__,
3489 _("unhandled stop_soon: %d"), (int) stop_soon);
3490
3491 case TARGET_WAITKIND_SPURIOUS:
3492 if (debug_infrun)
3493 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3494 if (!ptid_equal (ecs->ptid, inferior_ptid))
3495 context_switch (ecs->ptid);
3496 resume (0, GDB_SIGNAL_0);
3497 prepare_to_wait (ecs);
3498 return;
3499
3500 case TARGET_WAITKIND_EXITED:
3501 case TARGET_WAITKIND_SIGNALLED:
3502 if (debug_infrun)
3503 {
3504 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3505 fprintf_unfiltered (gdb_stdlog,
3506 "infrun: TARGET_WAITKIND_EXITED\n");
3507 else
3508 fprintf_unfiltered (gdb_stdlog,
3509 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3510 }
3511
3512 inferior_ptid = ecs->ptid;
3513 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3514 set_current_program_space (current_inferior ()->pspace);
3515 handle_vfork_child_exec_or_exit (0);
3516 target_terminal_ours (); /* Must do this before mourn anyway. */
3517
3518 /* Clearing any previous state of convenience variables. */
3519 clear_exit_convenience_vars ();
3520
3521 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3522 {
3523 /* Record the exit code in the convenience variable $_exitcode, so
3524 that the user can inspect this again later. */
3525 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3526 (LONGEST) ecs->ws.value.integer);
3527
3528 /* Also record this in the inferior itself. */
3529 current_inferior ()->has_exit_code = 1;
3530 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3531
3532 /* Support the --return-child-result option. */
3533 return_child_result_value = ecs->ws.value.integer;
3534
3535 observer_notify_exited (ecs->ws.value.integer);
3536 }
3537 else
3538 {
3539 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3540 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3541
3542 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3543 {
3544 /* Set the value of the internal variable $_exitsignal,
3545 which holds the signal uncaught by the inferior. */
3546 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3547 gdbarch_gdb_signal_to_target (gdbarch,
3548 ecs->ws.value.sig));
3549 }
3550 else
3551 {
3552 /* We don't have access to the target's method used for
3553 converting between signal numbers (GDB's internal
3554 representation <-> target's representation).
3555 Therefore, we cannot do a good job at displaying this
3556 information to the user. It's better to just warn
3557 her about it (if infrun debugging is enabled), and
3558 give up. */
3559 if (debug_infrun)
3560 fprintf_filtered (gdb_stdlog, _("\
3561 Cannot fill $_exitsignal with the correct signal number.\n"));
3562 }
3563
3564 observer_notify_signal_exited (ecs->ws.value.sig);
3565 }
3566
3567 gdb_flush (gdb_stdout);
3568 target_mourn_inferior ();
3569 singlestep_breakpoints_inserted_p = 0;
3570 cancel_single_step_breakpoints ();
3571 stop_print_frame = 0;
3572 stop_stepping (ecs);
3573 return;
3574
3575 /* The following are the only cases in which we keep going;
3576 the above cases end in a continue or goto. */
3577 case TARGET_WAITKIND_FORKED:
3578 case TARGET_WAITKIND_VFORKED:
3579 if (debug_infrun)
3580 {
3581 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3582 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3583 else
3584 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3585 }
3586
3587 /* Check whether the inferior is displaced stepping. */
3588 {
3589 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3590 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3591 struct displaced_step_inferior_state *displaced
3592 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3593
3594 /* If checking displaced stepping is supported, and thread
3595 ecs->ptid is displaced stepping. */
3596 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3597 {
3598 struct inferior *parent_inf
3599 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3600 struct regcache *child_regcache;
3601 CORE_ADDR parent_pc;
3602
3603 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3604 indicating that the displaced stepping of syscall instruction
3605 has been done. Perform cleanup for parent process here. Note
3606 that this operation also cleans up the child process for vfork,
3607 because their pages are shared. */
3608 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
3609
3610 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3611 {
3612 /* Restore scratch pad for child process. */
3613 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3614 }
3615
3616 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3617 the child's PC is also within the scratchpad. Set the child's PC
3618 to the parent's PC value, which has already been fixed up.
3619 FIXME: we use the parent's aspace here, although we're touching
3620 the child, because the child hasn't been added to the inferior
3621 list yet at this point. */
3622
3623 child_regcache
3624 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3625 gdbarch,
3626 parent_inf->aspace);
3627 /* Read PC value of parent process. */
3628 parent_pc = regcache_read_pc (regcache);
3629
3630 if (debug_displaced)
3631 fprintf_unfiltered (gdb_stdlog,
3632 "displaced: write child pc from %s to %s\n",
3633 paddress (gdbarch,
3634 regcache_read_pc (child_regcache)),
3635 paddress (gdbarch, parent_pc));
3636
3637 regcache_write_pc (child_regcache, parent_pc);
3638 }
3639 }
3640
3641 if (!ptid_equal (ecs->ptid, inferior_ptid))
3642 context_switch (ecs->ptid);
3643
3644 /* Immediately detach breakpoints from the child before there's
3645 any chance of letting the user delete breakpoints from the
3646 breakpoint lists. If we don't do this early, it's easy to
3647 leave left over traps in the child, vis: "break foo; catch
3648 fork; c; <fork>; del; c; <child calls foo>". We only follow
3649 the fork on the last `continue', and by that time the
3650 breakpoint at "foo" is long gone from the breakpoint table.
3651 If we vforked, then we don't need to unpatch here, since both
3652 parent and child are sharing the same memory pages; we'll
3653 need to unpatch at follow/detach time instead to be certain
3654 that new breakpoints added between catchpoint hit time and
3655 vfork follow are detached. */
3656 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3657 {
3658 /* This won't actually modify the breakpoint list, but will
3659 physically remove the breakpoints from the child. */
3660 detach_breakpoints (ecs->ws.value.related_pid);
3661 }
3662
3663 if (singlestep_breakpoints_inserted_p)
3664 {
3665 /* Pull the single step breakpoints out of the target. */
3666 remove_single_step_breakpoints ();
3667 singlestep_breakpoints_inserted_p = 0;
3668 }
3669
3670 /* In case the event is caught by a catchpoint, remember that
3671 the event is to be followed at the next resume of the thread,
3672 and not immediately. */
3673 ecs->event_thread->pending_follow = ecs->ws;
3674
3675 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3676
3677 ecs->event_thread->control.stop_bpstat
3678 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3679 stop_pc, ecs->ptid, &ecs->ws);
3680
3681 /* If no catchpoint triggered for this, then keep going. Note
3682 that we're interested in knowing the bpstat actually causes a
3683 stop, not just if it may explain the signal. Software
3684 watchpoints, for example, always appear in the bpstat. */
3685 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3686 {
3687 ptid_t parent;
3688 ptid_t child;
3689 int should_resume;
3690 int follow_child
3691 = (follow_fork_mode_string == follow_fork_mode_child);
3692
3693 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3694
3695 should_resume = follow_fork ();
3696
3697 parent = ecs->ptid;
3698 child = ecs->ws.value.related_pid;
3699
3700 /* In non-stop mode, also resume the other branch. */
3701 if (non_stop && !detach_fork)
3702 {
3703 if (follow_child)
3704 switch_to_thread (parent);
3705 else
3706 switch_to_thread (child);
3707
3708 ecs->event_thread = inferior_thread ();
3709 ecs->ptid = inferior_ptid;
3710 keep_going (ecs);
3711 }
3712
3713 if (follow_child)
3714 switch_to_thread (child);
3715 else
3716 switch_to_thread (parent);
3717
3718 ecs->event_thread = inferior_thread ();
3719 ecs->ptid = inferior_ptid;
3720
3721 if (should_resume)
3722 keep_going (ecs);
3723 else
3724 stop_stepping (ecs);
3725 return;
3726 }
3727 process_event_stop_test (ecs);
3728 return;
3729
3730 case TARGET_WAITKIND_VFORK_DONE:
3731 /* Done with the shared memory region. Re-insert breakpoints in
3732 the parent, and keep going. */
3733
3734 if (debug_infrun)
3735 fprintf_unfiltered (gdb_stdlog,
3736 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3737
3738 if (!ptid_equal (ecs->ptid, inferior_ptid))
3739 context_switch (ecs->ptid);
3740
3741 current_inferior ()->waiting_for_vfork_done = 0;
3742 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3743 /* This also takes care of reinserting breakpoints in the
3744 previously locked inferior. */
3745 keep_going (ecs);
3746 return;
3747
3748 case TARGET_WAITKIND_EXECD:
3749 if (debug_infrun)
3750 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3751
3752 if (!ptid_equal (ecs->ptid, inferior_ptid))
3753 context_switch (ecs->ptid);
3754
3755 singlestep_breakpoints_inserted_p = 0;
3756 cancel_single_step_breakpoints ();
3757
3758 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3759
3760 /* Do whatever is necessary to the parent branch of the vfork. */
3761 handle_vfork_child_exec_or_exit (1);
3762
3763 /* This causes the eventpoints and symbol table to be reset.
3764 Must do this now, before trying to determine whether to
3765 stop. */
3766 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3767
3768 ecs->event_thread->control.stop_bpstat
3769 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3770 stop_pc, ecs->ptid, &ecs->ws);
3771
3772 /* Note that this may be referenced from inside
3773 bpstat_stop_status above, through inferior_has_execd. */
3774 xfree (ecs->ws.value.execd_pathname);
3775 ecs->ws.value.execd_pathname = NULL;
3776
3777 /* If no catchpoint triggered for this, then keep going. */
3778 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3779 {
3780 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3781 keep_going (ecs);
3782 return;
3783 }
3784 process_event_stop_test (ecs);
3785 return;
3786
3787 /* Be careful not to try to gather much state about a thread
3788 that's in a syscall. It's frequently a losing proposition. */
3789 case TARGET_WAITKIND_SYSCALL_ENTRY:
3790 if (debug_infrun)
3791 fprintf_unfiltered (gdb_stdlog,
3792 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3793 /* Getting the current syscall number. */
3794 if (handle_syscall_event (ecs) == 0)
3795 process_event_stop_test (ecs);
3796 return;
3797
3798 /* Before examining the threads further, step this thread to
3799 get it entirely out of the syscall. (We get notice of the
3800 event when the thread is just on the verge of exiting a
3801 syscall. Stepping one instruction seems to get it back
3802 into user code.) */
3803 case TARGET_WAITKIND_SYSCALL_RETURN:
3804 if (debug_infrun)
3805 fprintf_unfiltered (gdb_stdlog,
3806 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3807 if (handle_syscall_event (ecs) == 0)
3808 process_event_stop_test (ecs);
3809 return;
3810
3811 case TARGET_WAITKIND_STOPPED:
3812 if (debug_infrun)
3813 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3814 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3815 handle_signal_stop (ecs);
3816 return;
3817
3818 case TARGET_WAITKIND_NO_HISTORY:
3819 if (debug_infrun)
3820 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
3821 /* Reverse execution: target ran out of history info. */
3822
3823 /* Pull the single step breakpoints out of the target. */
3824 if (singlestep_breakpoints_inserted_p)
3825 {
3826 if (!ptid_equal (ecs->ptid, inferior_ptid))
3827 context_switch (ecs->ptid);
3828 remove_single_step_breakpoints ();
3829 singlestep_breakpoints_inserted_p = 0;
3830 }
3831 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3832 observer_notify_no_history ();
3833 stop_stepping (ecs);
3834 return;
3835 }
3836 }
3837
3838 /* Come here when the program has stopped with a signal. */
3839
3840 static void
3841 handle_signal_stop (struct execution_control_state *ecs)
3842 {
3843 struct frame_info *frame;
3844 struct gdbarch *gdbarch;
3845 int stopped_by_watchpoint;
3846 enum stop_kind stop_soon;
3847 int random_signal;
3848
3849 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
3850
3851 /* Do we need to clean up the state of a thread that has
3852 completed a displaced single-step? (Doing so usually affects
3853 the PC, so do it here, before we set stop_pc.) */
3854 displaced_step_fixup (ecs->ptid,
3855 ecs->event_thread->suspend.stop_signal);
3856
3857 /* If we either finished a single-step or hit a breakpoint, but
3858 the user wanted this thread to be stopped, pretend we got a
3859 SIG0 (generic unsignaled stop). */
3860 if (ecs->event_thread->stop_requested
3861 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3862 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3863
3864 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3865
3866 if (debug_infrun)
3867 {
3868 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3869 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3870 struct cleanup *old_chain = save_inferior_ptid ();
3871
3872 inferior_ptid = ecs->ptid;
3873
3874 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3875 paddress (gdbarch, stop_pc));
3876 if (target_stopped_by_watchpoint ())
3877 {
3878 CORE_ADDR addr;
3879
3880 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3881
3882 if (target_stopped_data_address (&current_target, &addr))
3883 fprintf_unfiltered (gdb_stdlog,
3884 "infrun: stopped data address = %s\n",
3885 paddress (gdbarch, addr));
3886 else
3887 fprintf_unfiltered (gdb_stdlog,
3888 "infrun: (no data address available)\n");
3889 }
3890
3891 do_cleanups (old_chain);
3892 }
3893
3894 /* This is originated from start_remote(), start_inferior() and
3895 shared libraries hook functions. */
3896 stop_soon = get_inferior_stop_soon (ecs->ptid);
3897 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3898 {
3899 if (!ptid_equal (ecs->ptid, inferior_ptid))
3900 context_switch (ecs->ptid);
3901 if (debug_infrun)
3902 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3903 stop_print_frame = 1;
3904 stop_stepping (ecs);
3905 return;
3906 }
3907
3908 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3909 && stop_after_trap)
3910 {
3911 if (!ptid_equal (ecs->ptid, inferior_ptid))
3912 context_switch (ecs->ptid);
3913 if (debug_infrun)
3914 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3915 stop_print_frame = 0;
3916 stop_stepping (ecs);
3917 return;
3918 }
3919
3920 /* This originates from attach_command(). We need to overwrite
3921 the stop_signal here, because some kernels don't ignore a
3922 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3923 See more comments in inferior.h. On the other hand, if we
3924 get a non-SIGSTOP, report it to the user - assume the backend
3925 will handle the SIGSTOP if it should show up later.
3926
3927 Also consider that the attach is complete when we see a
3928 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3929 target extended-remote report it instead of a SIGSTOP
3930 (e.g. gdbserver). We already rely on SIGTRAP being our
3931 signal, so this is no exception.
3932
3933 Also consider that the attach is complete when we see a
3934 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3935 the target to stop all threads of the inferior, in case the
3936 low level attach operation doesn't stop them implicitly. If
3937 they weren't stopped implicitly, then the stub will report a
3938 GDB_SIGNAL_0, meaning: stopped for no particular reason
3939 other than GDB's request. */
3940 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3941 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
3942 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3943 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
3944 {
3945 stop_print_frame = 1;
3946 stop_stepping (ecs);
3947 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3948 return;
3949 }
3950
3951 /* See if something interesting happened to the non-current thread. If
3952 so, then switch to that thread. */
3953 if (!ptid_equal (ecs->ptid, inferior_ptid))
3954 {
3955 if (debug_infrun)
3956 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3957
3958 context_switch (ecs->ptid);
3959
3960 if (deprecated_context_hook)
3961 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3962 }
3963
3964 /* At this point, get hold of the now-current thread's frame. */
3965 frame = get_current_frame ();
3966 gdbarch = get_frame_arch (frame);
3967
3968 /* Pull the single step breakpoints out of the target. */
3969 if (singlestep_breakpoints_inserted_p)
3970 {
3971 /* However, before doing so, if this single-step breakpoint was
3972 actually for another thread, set this thread up for moving
3973 past it. */
3974 if (!ptid_equal (ecs->ptid, singlestep_ptid)
3975 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3976 {
3977 struct regcache *regcache;
3978 struct address_space *aspace;
3979 CORE_ADDR pc;
3980
3981 regcache = get_thread_regcache (ecs->ptid);
3982 aspace = get_regcache_aspace (regcache);
3983 pc = regcache_read_pc (regcache);
3984 if (single_step_breakpoint_inserted_here_p (aspace, pc))
3985 {
3986 if (debug_infrun)
3987 {
3988 fprintf_unfiltered (gdb_stdlog,
3989 "infrun: [%s] hit step over single-step"
3990 " breakpoint of [%s]\n",
3991 target_pid_to_str (ecs->ptid),
3992 target_pid_to_str (singlestep_ptid));
3993 }
3994 ecs->hit_singlestep_breakpoint = 1;
3995 }
3996 }
3997
3998 remove_single_step_breakpoints ();
3999 singlestep_breakpoints_inserted_p = 0;
4000 }
4001
4002 if (ecs->stepped_after_stopped_by_watchpoint)
4003 stopped_by_watchpoint = 0;
4004 else
4005 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4006
4007 /* If necessary, step over this watchpoint. We'll be back to display
4008 it in a moment. */
4009 if (stopped_by_watchpoint
4010 && (target_have_steppable_watchpoint
4011 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4012 {
4013 /* At this point, we are stopped at an instruction which has
4014 attempted to write to a piece of memory under control of
4015 a watchpoint. The instruction hasn't actually executed
4016 yet. If we were to evaluate the watchpoint expression
4017 now, we would get the old value, and therefore no change
4018 would seem to have occurred.
4019
4020 In order to make watchpoints work `right', we really need
4021 to complete the memory write, and then evaluate the
4022 watchpoint expression. We do this by single-stepping the
4023 target.
4024
4025 It may not be necessary to disable the watchpoint to stop over
4026 it. For example, the PA can (with some kernel cooperation)
4027 single step over a watchpoint without disabling the watchpoint.
4028
4029 It is far more common to need to disable a watchpoint to step
4030 the inferior over it. If we have non-steppable watchpoints,
4031 we must disable the current watchpoint; it's simplest to
4032 disable all watchpoints and breakpoints. */
4033 int hw_step = 1;
4034
4035 if (!target_have_steppable_watchpoint)
4036 {
4037 remove_breakpoints ();
4038 /* See comment in resume why we need to stop bypassing signals
4039 while breakpoints have been removed. */
4040 target_pass_signals (0, NULL);
4041 }
4042 /* Single step */
4043 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4044 target_resume (ecs->ptid, hw_step, GDB_SIGNAL_0);
4045 waiton_ptid = ecs->ptid;
4046 if (target_have_steppable_watchpoint)
4047 infwait_state = infwait_step_watch_state;
4048 else
4049 infwait_state = infwait_nonstep_watch_state;
4050 prepare_to_wait (ecs);
4051 return;
4052 }
4053
4054 ecs->event_thread->stepping_over_breakpoint = 0;
4055 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4056 ecs->event_thread->control.stop_step = 0;
4057 stop_print_frame = 1;
4058 stopped_by_random_signal = 0;
4059
4060 /* Hide inlined functions starting here, unless we just performed stepi or
4061 nexti. After stepi and nexti, always show the innermost frame (not any
4062 inline function call sites). */
4063 if (ecs->event_thread->control.step_range_end != 1)
4064 {
4065 struct address_space *aspace =
4066 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4067
4068 /* skip_inline_frames is expensive, so we avoid it if we can
4069 determine that the address is one where functions cannot have
4070 been inlined. This improves performance with inferiors that
4071 load a lot of shared libraries, because the solib event
4072 breakpoint is defined as the address of a function (i.e. not
4073 inline). Note that we have to check the previous PC as well
4074 as the current one to catch cases when we have just
4075 single-stepped off a breakpoint prior to reinstating it.
4076 Note that we're assuming that the code we single-step to is
4077 not inline, but that's not definitive: there's nothing
4078 preventing the event breakpoint function from containing
4079 inlined code, and the single-step ending up there. If the
4080 user had set a breakpoint on that inlined code, the missing
4081 skip_inline_frames call would break things. Fortunately
4082 that's an extremely unlikely scenario. */
4083 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4084 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4085 && ecs->event_thread->control.trap_expected
4086 && pc_at_non_inline_function (aspace,
4087 ecs->event_thread->prev_pc,
4088 &ecs->ws)))
4089 {
4090 skip_inline_frames (ecs->ptid);
4091
4092 /* Re-fetch current thread's frame in case that invalidated
4093 the frame cache. */
4094 frame = get_current_frame ();
4095 gdbarch = get_frame_arch (frame);
4096 }
4097 }
4098
4099 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4100 && ecs->event_thread->control.trap_expected
4101 && gdbarch_single_step_through_delay_p (gdbarch)
4102 && currently_stepping (ecs->event_thread))
4103 {
4104 /* We're trying to step off a breakpoint. Turns out that we're
4105 also on an instruction that needs to be stepped multiple
4106 times before it's been fully executing. E.g., architectures
4107 with a delay slot. It needs to be stepped twice, once for
4108 the instruction and once for the delay slot. */
4109 int step_through_delay
4110 = gdbarch_single_step_through_delay (gdbarch, frame);
4111
4112 if (debug_infrun && step_through_delay)
4113 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4114 if (ecs->event_thread->control.step_range_end == 0
4115 && step_through_delay)
4116 {
4117 /* The user issued a continue when stopped at a breakpoint.
4118 Set up for another trap and get out of here. */
4119 ecs->event_thread->stepping_over_breakpoint = 1;
4120 keep_going (ecs);
4121 return;
4122 }
4123 else if (step_through_delay)
4124 {
4125 /* The user issued a step when stopped at a breakpoint.
4126 Maybe we should stop, maybe we should not - the delay
4127 slot *might* correspond to a line of source. In any
4128 case, don't decide that here, just set
4129 ecs->stepping_over_breakpoint, making sure we
4130 single-step again before breakpoints are re-inserted. */
4131 ecs->event_thread->stepping_over_breakpoint = 1;
4132 }
4133 }
4134
4135 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4136 handles this event. */
4137 ecs->event_thread->control.stop_bpstat
4138 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4139 stop_pc, ecs->ptid, &ecs->ws);
4140
4141 /* Following in case break condition called a
4142 function. */
4143 stop_print_frame = 1;
4144
4145 /* This is where we handle "moribund" watchpoints. Unlike
4146 software breakpoints traps, hardware watchpoint traps are
4147 always distinguishable from random traps. If no high-level
4148 watchpoint is associated with the reported stop data address
4149 anymore, then the bpstat does not explain the signal ---
4150 simply make sure to ignore it if `stopped_by_watchpoint' is
4151 set. */
4152
4153 if (debug_infrun
4154 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4155 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4156 GDB_SIGNAL_TRAP)
4157 && stopped_by_watchpoint)
4158 fprintf_unfiltered (gdb_stdlog,
4159 "infrun: no user watchpoint explains "
4160 "watchpoint SIGTRAP, ignoring\n");
4161
4162 /* NOTE: cagney/2003-03-29: These checks for a random signal
4163 at one stage in the past included checks for an inferior
4164 function call's call dummy's return breakpoint. The original
4165 comment, that went with the test, read:
4166
4167 ``End of a stack dummy. Some systems (e.g. Sony news) give
4168 another signal besides SIGTRAP, so check here as well as
4169 above.''
4170
4171 If someone ever tries to get call dummys on a
4172 non-executable stack to work (where the target would stop
4173 with something like a SIGSEGV), then those tests might need
4174 to be re-instated. Given, however, that the tests were only
4175 enabled when momentary breakpoints were not being used, I
4176 suspect that it won't be the case.
4177
4178 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4179 be necessary for call dummies on a non-executable stack on
4180 SPARC. */
4181
4182 /* See if the breakpoints module can explain the signal. */
4183 random_signal
4184 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4185 ecs->event_thread->suspend.stop_signal);
4186
4187 /* If not, perhaps stepping/nexting can. */
4188 if (random_signal)
4189 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4190 && currently_stepping (ecs->event_thread));
4191
4192 /* Perhaps the thread hit a single-step breakpoint of _another_
4193 thread. Single-step breakpoints are transparent to the
4194 breakpoints module. */
4195 if (random_signal)
4196 random_signal = !ecs->hit_singlestep_breakpoint;
4197
4198 /* No? Perhaps we got a moribund watchpoint. */
4199 if (random_signal)
4200 random_signal = !stopped_by_watchpoint;
4201
4202 /* For the program's own signals, act according to
4203 the signal handling tables. */
4204
4205 if (random_signal)
4206 {
4207 /* Signal not for debugging purposes. */
4208 int printed = 0;
4209 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4210 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4211
4212 if (debug_infrun)
4213 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4214 gdb_signal_to_symbol_string (stop_signal));
4215
4216 stopped_by_random_signal = 1;
4217
4218 if (signal_print[ecs->event_thread->suspend.stop_signal])
4219 {
4220 /* The signal table tells us to print about this signal. */
4221 printed = 1;
4222 target_terminal_ours_for_output ();
4223 observer_notify_signal_received (ecs->event_thread->suspend.stop_signal);
4224 }
4225 /* Always stop on signals if we're either just gaining control
4226 of the program, or the user explicitly requested this thread
4227 to remain stopped. */
4228 if (stop_soon != NO_STOP_QUIETLY
4229 || ecs->event_thread->stop_requested
4230 || (!inf->detaching
4231 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4232 {
4233 stop_stepping (ecs);
4234 return;
4235 }
4236 /* If not going to stop, give terminal back
4237 if we took it away. */
4238 else if (printed)
4239 target_terminal_inferior ();
4240
4241 /* Clear the signal if it should not be passed. */
4242 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4243 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4244
4245 if (ecs->event_thread->prev_pc == stop_pc
4246 && ecs->event_thread->control.trap_expected
4247 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4248 {
4249 /* We were just starting a new sequence, attempting to
4250 single-step off of a breakpoint and expecting a SIGTRAP.
4251 Instead this signal arrives. This signal will take us out
4252 of the stepping range so GDB needs to remember to, when
4253 the signal handler returns, resume stepping off that
4254 breakpoint. */
4255 /* To simplify things, "continue" is forced to use the same
4256 code paths as single-step - set a breakpoint at the
4257 signal return address and then, once hit, step off that
4258 breakpoint. */
4259 if (debug_infrun)
4260 fprintf_unfiltered (gdb_stdlog,
4261 "infrun: signal arrived while stepping over "
4262 "breakpoint\n");
4263
4264 insert_hp_step_resume_breakpoint_at_frame (frame);
4265 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4266 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4267 ecs->event_thread->control.trap_expected = 0;
4268
4269 /* If we were nexting/stepping some other thread, switch to
4270 it, so that we don't continue it, losing control. */
4271 if (!switch_back_to_stepped_thread (ecs))
4272 keep_going (ecs);
4273 return;
4274 }
4275
4276 if (ecs->event_thread->control.step_range_end != 0
4277 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4278 && pc_in_thread_step_range (stop_pc, ecs->event_thread)
4279 && frame_id_eq (get_stack_frame_id (frame),
4280 ecs->event_thread->control.step_stack_frame_id)
4281 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4282 {
4283 /* The inferior is about to take a signal that will take it
4284 out of the single step range. Set a breakpoint at the
4285 current PC (which is presumably where the signal handler
4286 will eventually return) and then allow the inferior to
4287 run free.
4288
4289 Note that this is only needed for a signal delivered
4290 while in the single-step range. Nested signals aren't a
4291 problem as they eventually all return. */
4292 if (debug_infrun)
4293 fprintf_unfiltered (gdb_stdlog,
4294 "infrun: signal may take us out of "
4295 "single-step range\n");
4296
4297 insert_hp_step_resume_breakpoint_at_frame (frame);
4298 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4299 ecs->event_thread->control.trap_expected = 0;
4300 keep_going (ecs);
4301 return;
4302 }
4303
4304 /* Note: step_resume_breakpoint may be non-NULL. This occures
4305 when either there's a nested signal, or when there's a
4306 pending signal enabled just as the signal handler returns
4307 (leaving the inferior at the step-resume-breakpoint without
4308 actually executing it). Either way continue until the
4309 breakpoint is really hit. */
4310
4311 if (!switch_back_to_stepped_thread (ecs))
4312 {
4313 if (debug_infrun)
4314 fprintf_unfiltered (gdb_stdlog,
4315 "infrun: random signal, keep going\n");
4316
4317 keep_going (ecs);
4318 }
4319 return;
4320 }
4321
4322 process_event_stop_test (ecs);
4323 }
4324
4325 /* Come here when we've got some debug event / signal we can explain
4326 (IOW, not a random signal), and test whether it should cause a
4327 stop, or whether we should resume the inferior (transparently).
4328 E.g., could be a breakpoint whose condition evaluates false; we
4329 could be still stepping within the line; etc. */
4330
4331 static void
4332 process_event_stop_test (struct execution_control_state *ecs)
4333 {
4334 struct symtab_and_line stop_pc_sal;
4335 struct frame_info *frame;
4336 struct gdbarch *gdbarch;
4337 CORE_ADDR jmp_buf_pc;
4338 struct bpstat_what what;
4339
4340 /* Handle cases caused by hitting a breakpoint. */
4341
4342 frame = get_current_frame ();
4343 gdbarch = get_frame_arch (frame);
4344
4345 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4346
4347 if (what.call_dummy)
4348 {
4349 stop_stack_dummy = what.call_dummy;
4350 }
4351
4352 /* If we hit an internal event that triggers symbol changes, the
4353 current frame will be invalidated within bpstat_what (e.g., if we
4354 hit an internal solib event). Re-fetch it. */
4355 frame = get_current_frame ();
4356 gdbarch = get_frame_arch (frame);
4357
4358 switch (what.main_action)
4359 {
4360 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4361 /* If we hit the breakpoint at longjmp while stepping, we
4362 install a momentary breakpoint at the target of the
4363 jmp_buf. */
4364
4365 if (debug_infrun)
4366 fprintf_unfiltered (gdb_stdlog,
4367 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4368
4369 ecs->event_thread->stepping_over_breakpoint = 1;
4370
4371 if (what.is_longjmp)
4372 {
4373 struct value *arg_value;
4374
4375 /* If we set the longjmp breakpoint via a SystemTap probe,
4376 then use it to extract the arguments. The destination PC
4377 is the third argument to the probe. */
4378 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4379 if (arg_value)
4380 jmp_buf_pc = value_as_address (arg_value);
4381 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4382 || !gdbarch_get_longjmp_target (gdbarch,
4383 frame, &jmp_buf_pc))
4384 {
4385 if (debug_infrun)
4386 fprintf_unfiltered (gdb_stdlog,
4387 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4388 "(!gdbarch_get_longjmp_target)\n");
4389 keep_going (ecs);
4390 return;
4391 }
4392
4393 /* Insert a breakpoint at resume address. */
4394 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4395 }
4396 else
4397 check_exception_resume (ecs, frame);
4398 keep_going (ecs);
4399 return;
4400
4401 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4402 {
4403 struct frame_info *init_frame;
4404
4405 /* There are several cases to consider.
4406
4407 1. The initiating frame no longer exists. In this case we
4408 must stop, because the exception or longjmp has gone too
4409 far.
4410
4411 2. The initiating frame exists, and is the same as the
4412 current frame. We stop, because the exception or longjmp
4413 has been caught.
4414
4415 3. The initiating frame exists and is different from the
4416 current frame. This means the exception or longjmp has
4417 been caught beneath the initiating frame, so keep going.
4418
4419 4. longjmp breakpoint has been placed just to protect
4420 against stale dummy frames and user is not interested in
4421 stopping around longjmps. */
4422
4423 if (debug_infrun)
4424 fprintf_unfiltered (gdb_stdlog,
4425 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4426
4427 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4428 != NULL);
4429 delete_exception_resume_breakpoint (ecs->event_thread);
4430
4431 if (what.is_longjmp)
4432 {
4433 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread->num);
4434
4435 if (!frame_id_p (ecs->event_thread->initiating_frame))
4436 {
4437 /* Case 4. */
4438 keep_going (ecs);
4439 return;
4440 }
4441 }
4442
4443 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4444
4445 if (init_frame)
4446 {
4447 struct frame_id current_id
4448 = get_frame_id (get_current_frame ());
4449 if (frame_id_eq (current_id,
4450 ecs->event_thread->initiating_frame))
4451 {
4452 /* Case 2. Fall through. */
4453 }
4454 else
4455 {
4456 /* Case 3. */
4457 keep_going (ecs);
4458 return;
4459 }
4460 }
4461
4462 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4463 exists. */
4464 delete_step_resume_breakpoint (ecs->event_thread);
4465
4466 ecs->event_thread->control.stop_step = 1;
4467 end_stepping_range ();
4468 stop_stepping (ecs);
4469 }
4470 return;
4471
4472 case BPSTAT_WHAT_SINGLE:
4473 if (debug_infrun)
4474 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4475 ecs->event_thread->stepping_over_breakpoint = 1;
4476 /* Still need to check other stuff, at least the case where we
4477 are stepping and step out of the right range. */
4478 break;
4479
4480 case BPSTAT_WHAT_STEP_RESUME:
4481 if (debug_infrun)
4482 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4483
4484 delete_step_resume_breakpoint (ecs->event_thread);
4485 if (ecs->event_thread->control.proceed_to_finish
4486 && execution_direction == EXEC_REVERSE)
4487 {
4488 struct thread_info *tp = ecs->event_thread;
4489
4490 /* We are finishing a function in reverse, and just hit the
4491 step-resume breakpoint at the start address of the
4492 function, and we're almost there -- just need to back up
4493 by one more single-step, which should take us back to the
4494 function call. */
4495 tp->control.step_range_start = tp->control.step_range_end = 1;
4496 keep_going (ecs);
4497 return;
4498 }
4499 fill_in_stop_func (gdbarch, ecs);
4500 if (stop_pc == ecs->stop_func_start
4501 && execution_direction == EXEC_REVERSE)
4502 {
4503 /* We are stepping over a function call in reverse, and just
4504 hit the step-resume breakpoint at the start address of
4505 the function. Go back to single-stepping, which should
4506 take us back to the function call. */
4507 ecs->event_thread->stepping_over_breakpoint = 1;
4508 keep_going (ecs);
4509 return;
4510 }
4511 break;
4512
4513 case BPSTAT_WHAT_STOP_NOISY:
4514 if (debug_infrun)
4515 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4516 stop_print_frame = 1;
4517
4518 /* Assume the thread stopped for a breapoint. We'll still check
4519 whether a/the breakpoint is there when the thread is next
4520 resumed. */
4521 ecs->event_thread->stepping_over_breakpoint = 1;
4522
4523 stop_stepping (ecs);
4524 return;
4525
4526 case BPSTAT_WHAT_STOP_SILENT:
4527 if (debug_infrun)
4528 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4529 stop_print_frame = 0;
4530
4531 /* Assume the thread stopped for a breapoint. We'll still check
4532 whether a/the breakpoint is there when the thread is next
4533 resumed. */
4534 ecs->event_thread->stepping_over_breakpoint = 1;
4535 stop_stepping (ecs);
4536 return;
4537
4538 case BPSTAT_WHAT_HP_STEP_RESUME:
4539 if (debug_infrun)
4540 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4541
4542 delete_step_resume_breakpoint (ecs->event_thread);
4543 if (ecs->event_thread->step_after_step_resume_breakpoint)
4544 {
4545 /* Back when the step-resume breakpoint was inserted, we
4546 were trying to single-step off a breakpoint. Go back to
4547 doing that. */
4548 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4549 ecs->event_thread->stepping_over_breakpoint = 1;
4550 keep_going (ecs);
4551 return;
4552 }
4553 break;
4554
4555 case BPSTAT_WHAT_KEEP_CHECKING:
4556 break;
4557 }
4558
4559 /* We come here if we hit a breakpoint but should not stop for it.
4560 Possibly we also were stepping and should stop for that. So fall
4561 through and test for stepping. But, if not stepping, do not
4562 stop. */
4563
4564 /* In all-stop mode, if we're currently stepping but have stopped in
4565 some other thread, we need to switch back to the stepped thread. */
4566 if (switch_back_to_stepped_thread (ecs))
4567 return;
4568
4569 if (ecs->event_thread->control.step_resume_breakpoint)
4570 {
4571 if (debug_infrun)
4572 fprintf_unfiltered (gdb_stdlog,
4573 "infrun: step-resume breakpoint is inserted\n");
4574
4575 /* Having a step-resume breakpoint overrides anything
4576 else having to do with stepping commands until
4577 that breakpoint is reached. */
4578 keep_going (ecs);
4579 return;
4580 }
4581
4582 if (ecs->event_thread->control.step_range_end == 0)
4583 {
4584 if (debug_infrun)
4585 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4586 /* Likewise if we aren't even stepping. */
4587 keep_going (ecs);
4588 return;
4589 }
4590
4591 /* Re-fetch current thread's frame in case the code above caused
4592 the frame cache to be re-initialized, making our FRAME variable
4593 a dangling pointer. */
4594 frame = get_current_frame ();
4595 gdbarch = get_frame_arch (frame);
4596 fill_in_stop_func (gdbarch, ecs);
4597
4598 /* If stepping through a line, keep going if still within it.
4599
4600 Note that step_range_end is the address of the first instruction
4601 beyond the step range, and NOT the address of the last instruction
4602 within it!
4603
4604 Note also that during reverse execution, we may be stepping
4605 through a function epilogue and therefore must detect when
4606 the current-frame changes in the middle of a line. */
4607
4608 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4609 && (execution_direction != EXEC_REVERSE
4610 || frame_id_eq (get_frame_id (frame),
4611 ecs->event_thread->control.step_frame_id)))
4612 {
4613 if (debug_infrun)
4614 fprintf_unfiltered
4615 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4616 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4617 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4618
4619 /* Tentatively re-enable range stepping; `resume' disables it if
4620 necessary (e.g., if we're stepping over a breakpoint or we
4621 have software watchpoints). */
4622 ecs->event_thread->control.may_range_step = 1;
4623
4624 /* When stepping backward, stop at beginning of line range
4625 (unless it's the function entry point, in which case
4626 keep going back to the call point). */
4627 if (stop_pc == ecs->event_thread->control.step_range_start
4628 && stop_pc != ecs->stop_func_start
4629 && execution_direction == EXEC_REVERSE)
4630 {
4631 ecs->event_thread->control.stop_step = 1;
4632 end_stepping_range ();
4633 stop_stepping (ecs);
4634 }
4635 else
4636 keep_going (ecs);
4637
4638 return;
4639 }
4640
4641 /* We stepped out of the stepping range. */
4642
4643 /* If we are stepping at the source level and entered the runtime
4644 loader dynamic symbol resolution code...
4645
4646 EXEC_FORWARD: we keep on single stepping until we exit the run
4647 time loader code and reach the callee's address.
4648
4649 EXEC_REVERSE: we've already executed the callee (backward), and
4650 the runtime loader code is handled just like any other
4651 undebuggable function call. Now we need only keep stepping
4652 backward through the trampoline code, and that's handled further
4653 down, so there is nothing for us to do here. */
4654
4655 if (execution_direction != EXEC_REVERSE
4656 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4657 && in_solib_dynsym_resolve_code (stop_pc))
4658 {
4659 CORE_ADDR pc_after_resolver =
4660 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4661
4662 if (debug_infrun)
4663 fprintf_unfiltered (gdb_stdlog,
4664 "infrun: stepped into dynsym resolve code\n");
4665
4666 if (pc_after_resolver)
4667 {
4668 /* Set up a step-resume breakpoint at the address
4669 indicated by SKIP_SOLIB_RESOLVER. */
4670 struct symtab_and_line sr_sal;
4671
4672 init_sal (&sr_sal);
4673 sr_sal.pc = pc_after_resolver;
4674 sr_sal.pspace = get_frame_program_space (frame);
4675
4676 insert_step_resume_breakpoint_at_sal (gdbarch,
4677 sr_sal, null_frame_id);
4678 }
4679
4680 keep_going (ecs);
4681 return;
4682 }
4683
4684 if (ecs->event_thread->control.step_range_end != 1
4685 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4686 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4687 && get_frame_type (frame) == SIGTRAMP_FRAME)
4688 {
4689 if (debug_infrun)
4690 fprintf_unfiltered (gdb_stdlog,
4691 "infrun: stepped into signal trampoline\n");
4692 /* The inferior, while doing a "step" or "next", has ended up in
4693 a signal trampoline (either by a signal being delivered or by
4694 the signal handler returning). Just single-step until the
4695 inferior leaves the trampoline (either by calling the handler
4696 or returning). */
4697 keep_going (ecs);
4698 return;
4699 }
4700
4701 /* If we're in the return path from a shared library trampoline,
4702 we want to proceed through the trampoline when stepping. */
4703 /* macro/2012-04-25: This needs to come before the subroutine
4704 call check below as on some targets return trampolines look
4705 like subroutine calls (MIPS16 return thunks). */
4706 if (gdbarch_in_solib_return_trampoline (gdbarch,
4707 stop_pc, ecs->stop_func_name)
4708 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4709 {
4710 /* Determine where this trampoline returns. */
4711 CORE_ADDR real_stop_pc;
4712
4713 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4714
4715 if (debug_infrun)
4716 fprintf_unfiltered (gdb_stdlog,
4717 "infrun: stepped into solib return tramp\n");
4718
4719 /* Only proceed through if we know where it's going. */
4720 if (real_stop_pc)
4721 {
4722 /* And put the step-breakpoint there and go until there. */
4723 struct symtab_and_line sr_sal;
4724
4725 init_sal (&sr_sal); /* initialize to zeroes */
4726 sr_sal.pc = real_stop_pc;
4727 sr_sal.section = find_pc_overlay (sr_sal.pc);
4728 sr_sal.pspace = get_frame_program_space (frame);
4729
4730 /* Do not specify what the fp should be when we stop since
4731 on some machines the prologue is where the new fp value
4732 is established. */
4733 insert_step_resume_breakpoint_at_sal (gdbarch,
4734 sr_sal, null_frame_id);
4735
4736 /* Restart without fiddling with the step ranges or
4737 other state. */
4738 keep_going (ecs);
4739 return;
4740 }
4741 }
4742
4743 /* Check for subroutine calls. The check for the current frame
4744 equalling the step ID is not necessary - the check of the
4745 previous frame's ID is sufficient - but it is a common case and
4746 cheaper than checking the previous frame's ID.
4747
4748 NOTE: frame_id_eq will never report two invalid frame IDs as
4749 being equal, so to get into this block, both the current and
4750 previous frame must have valid frame IDs. */
4751 /* The outer_frame_id check is a heuristic to detect stepping
4752 through startup code. If we step over an instruction which
4753 sets the stack pointer from an invalid value to a valid value,
4754 we may detect that as a subroutine call from the mythical
4755 "outermost" function. This could be fixed by marking
4756 outermost frames as !stack_p,code_p,special_p. Then the
4757 initial outermost frame, before sp was valid, would
4758 have code_addr == &_start. See the comment in frame_id_eq
4759 for more. */
4760 if (!frame_id_eq (get_stack_frame_id (frame),
4761 ecs->event_thread->control.step_stack_frame_id)
4762 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4763 ecs->event_thread->control.step_stack_frame_id)
4764 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4765 outer_frame_id)
4766 || step_start_function != find_pc_function (stop_pc))))
4767 {
4768 CORE_ADDR real_stop_pc;
4769
4770 if (debug_infrun)
4771 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4772
4773 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4774 || ((ecs->event_thread->control.step_range_end == 1)
4775 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4776 ecs->stop_func_start)))
4777 {
4778 /* I presume that step_over_calls is only 0 when we're
4779 supposed to be stepping at the assembly language level
4780 ("stepi"). Just stop. */
4781 /* Also, maybe we just did a "nexti" inside a prolog, so we
4782 thought it was a subroutine call but it was not. Stop as
4783 well. FENN */
4784 /* And this works the same backward as frontward. MVS */
4785 ecs->event_thread->control.stop_step = 1;
4786 end_stepping_range ();
4787 stop_stepping (ecs);
4788 return;
4789 }
4790
4791 /* Reverse stepping through solib trampolines. */
4792
4793 if (execution_direction == EXEC_REVERSE
4794 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4795 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4796 || (ecs->stop_func_start == 0
4797 && in_solib_dynsym_resolve_code (stop_pc))))
4798 {
4799 /* Any solib trampoline code can be handled in reverse
4800 by simply continuing to single-step. We have already
4801 executed the solib function (backwards), and a few
4802 steps will take us back through the trampoline to the
4803 caller. */
4804 keep_going (ecs);
4805 return;
4806 }
4807
4808 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4809 {
4810 /* We're doing a "next".
4811
4812 Normal (forward) execution: set a breakpoint at the
4813 callee's return address (the address at which the caller
4814 will resume).
4815
4816 Reverse (backward) execution. set the step-resume
4817 breakpoint at the start of the function that we just
4818 stepped into (backwards), and continue to there. When we
4819 get there, we'll need to single-step back to the caller. */
4820
4821 if (execution_direction == EXEC_REVERSE)
4822 {
4823 /* If we're already at the start of the function, we've either
4824 just stepped backward into a single instruction function,
4825 or stepped back out of a signal handler to the first instruction
4826 of the function. Just keep going, which will single-step back
4827 to the caller. */
4828 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
4829 {
4830 struct symtab_and_line sr_sal;
4831
4832 /* Normal function call return (static or dynamic). */
4833 init_sal (&sr_sal);
4834 sr_sal.pc = ecs->stop_func_start;
4835 sr_sal.pspace = get_frame_program_space (frame);
4836 insert_step_resume_breakpoint_at_sal (gdbarch,
4837 sr_sal, null_frame_id);
4838 }
4839 }
4840 else
4841 insert_step_resume_breakpoint_at_caller (frame);
4842
4843 keep_going (ecs);
4844 return;
4845 }
4846
4847 /* If we are in a function call trampoline (a stub between the
4848 calling routine and the real function), locate the real
4849 function. That's what tells us (a) whether we want to step
4850 into it at all, and (b) what prologue we want to run to the
4851 end of, if we do step into it. */
4852 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4853 if (real_stop_pc == 0)
4854 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4855 if (real_stop_pc != 0)
4856 ecs->stop_func_start = real_stop_pc;
4857
4858 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4859 {
4860 struct symtab_and_line sr_sal;
4861
4862 init_sal (&sr_sal);
4863 sr_sal.pc = ecs->stop_func_start;
4864 sr_sal.pspace = get_frame_program_space (frame);
4865
4866 insert_step_resume_breakpoint_at_sal (gdbarch,
4867 sr_sal, null_frame_id);
4868 keep_going (ecs);
4869 return;
4870 }
4871
4872 /* If we have line number information for the function we are
4873 thinking of stepping into and the function isn't on the skip
4874 list, step into it.
4875
4876 If there are several symtabs at that PC (e.g. with include
4877 files), just want to know whether *any* of them have line
4878 numbers. find_pc_line handles this. */
4879 {
4880 struct symtab_and_line tmp_sal;
4881
4882 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4883 if (tmp_sal.line != 0
4884 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4885 &tmp_sal))
4886 {
4887 if (execution_direction == EXEC_REVERSE)
4888 handle_step_into_function_backward (gdbarch, ecs);
4889 else
4890 handle_step_into_function (gdbarch, ecs);
4891 return;
4892 }
4893 }
4894
4895 /* If we have no line number and the step-stop-if-no-debug is
4896 set, we stop the step so that the user has a chance to switch
4897 in assembly mode. */
4898 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4899 && step_stop_if_no_debug)
4900 {
4901 ecs->event_thread->control.stop_step = 1;
4902 end_stepping_range ();
4903 stop_stepping (ecs);
4904 return;
4905 }
4906
4907 if (execution_direction == EXEC_REVERSE)
4908 {
4909 /* If we're already at the start of the function, we've either just
4910 stepped backward into a single instruction function without line
4911 number info, or stepped back out of a signal handler to the first
4912 instruction of the function without line number info. Just keep
4913 going, which will single-step back to the caller. */
4914 if (ecs->stop_func_start != stop_pc)
4915 {
4916 /* Set a breakpoint at callee's start address.
4917 From there we can step once and be back in the caller. */
4918 struct symtab_and_line sr_sal;
4919
4920 init_sal (&sr_sal);
4921 sr_sal.pc = ecs->stop_func_start;
4922 sr_sal.pspace = get_frame_program_space (frame);
4923 insert_step_resume_breakpoint_at_sal (gdbarch,
4924 sr_sal, null_frame_id);
4925 }
4926 }
4927 else
4928 /* Set a breakpoint at callee's return address (the address
4929 at which the caller will resume). */
4930 insert_step_resume_breakpoint_at_caller (frame);
4931
4932 keep_going (ecs);
4933 return;
4934 }
4935
4936 /* Reverse stepping through solib trampolines. */
4937
4938 if (execution_direction == EXEC_REVERSE
4939 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4940 {
4941 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4942 || (ecs->stop_func_start == 0
4943 && in_solib_dynsym_resolve_code (stop_pc)))
4944 {
4945 /* Any solib trampoline code can be handled in reverse
4946 by simply continuing to single-step. We have already
4947 executed the solib function (backwards), and a few
4948 steps will take us back through the trampoline to the
4949 caller. */
4950 keep_going (ecs);
4951 return;
4952 }
4953 else if (in_solib_dynsym_resolve_code (stop_pc))
4954 {
4955 /* Stepped backward into the solib dynsym resolver.
4956 Set a breakpoint at its start and continue, then
4957 one more step will take us out. */
4958 struct symtab_and_line sr_sal;
4959
4960 init_sal (&sr_sal);
4961 sr_sal.pc = ecs->stop_func_start;
4962 sr_sal.pspace = get_frame_program_space (frame);
4963 insert_step_resume_breakpoint_at_sal (gdbarch,
4964 sr_sal, null_frame_id);
4965 keep_going (ecs);
4966 return;
4967 }
4968 }
4969
4970 stop_pc_sal = find_pc_line (stop_pc, 0);
4971
4972 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4973 the trampoline processing logic, however, there are some trampolines
4974 that have no names, so we should do trampoline handling first. */
4975 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4976 && ecs->stop_func_name == NULL
4977 && stop_pc_sal.line == 0)
4978 {
4979 if (debug_infrun)
4980 fprintf_unfiltered (gdb_stdlog,
4981 "infrun: stepped into undebuggable function\n");
4982
4983 /* The inferior just stepped into, or returned to, an
4984 undebuggable function (where there is no debugging information
4985 and no line number corresponding to the address where the
4986 inferior stopped). Since we want to skip this kind of code,
4987 we keep going until the inferior returns from this
4988 function - unless the user has asked us not to (via
4989 set step-mode) or we no longer know how to get back
4990 to the call site. */
4991 if (step_stop_if_no_debug
4992 || !frame_id_p (frame_unwind_caller_id (frame)))
4993 {
4994 /* If we have no line number and the step-stop-if-no-debug
4995 is set, we stop the step so that the user has a chance to
4996 switch in assembly mode. */
4997 ecs->event_thread->control.stop_step = 1;
4998 end_stepping_range ();
4999 stop_stepping (ecs);
5000 return;
5001 }
5002 else
5003 {
5004 /* Set a breakpoint at callee's return address (the address
5005 at which the caller will resume). */
5006 insert_step_resume_breakpoint_at_caller (frame);
5007 keep_going (ecs);
5008 return;
5009 }
5010 }
5011
5012 if (ecs->event_thread->control.step_range_end == 1)
5013 {
5014 /* It is stepi or nexti. We always want to stop stepping after
5015 one instruction. */
5016 if (debug_infrun)
5017 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5018 ecs->event_thread->control.stop_step = 1;
5019 end_stepping_range ();
5020 stop_stepping (ecs);
5021 return;
5022 }
5023
5024 if (stop_pc_sal.line == 0)
5025 {
5026 /* We have no line number information. That means to stop
5027 stepping (does this always happen right after one instruction,
5028 when we do "s" in a function with no line numbers,
5029 or can this happen as a result of a return or longjmp?). */
5030 if (debug_infrun)
5031 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5032 ecs->event_thread->control.stop_step = 1;
5033 end_stepping_range ();
5034 stop_stepping (ecs);
5035 return;
5036 }
5037
5038 /* Look for "calls" to inlined functions, part one. If the inline
5039 frame machinery detected some skipped call sites, we have entered
5040 a new inline function. */
5041
5042 if (frame_id_eq (get_frame_id (get_current_frame ()),
5043 ecs->event_thread->control.step_frame_id)
5044 && inline_skipped_frames (ecs->ptid))
5045 {
5046 struct symtab_and_line call_sal;
5047
5048 if (debug_infrun)
5049 fprintf_unfiltered (gdb_stdlog,
5050 "infrun: stepped into inlined function\n");
5051
5052 find_frame_sal (get_current_frame (), &call_sal);
5053
5054 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5055 {
5056 /* For "step", we're going to stop. But if the call site
5057 for this inlined function is on the same source line as
5058 we were previously stepping, go down into the function
5059 first. Otherwise stop at the call site. */
5060
5061 if (call_sal.line == ecs->event_thread->current_line
5062 && call_sal.symtab == ecs->event_thread->current_symtab)
5063 step_into_inline_frame (ecs->ptid);
5064
5065 ecs->event_thread->control.stop_step = 1;
5066 end_stepping_range ();
5067 stop_stepping (ecs);
5068 return;
5069 }
5070 else
5071 {
5072 /* For "next", we should stop at the call site if it is on a
5073 different source line. Otherwise continue through the
5074 inlined function. */
5075 if (call_sal.line == ecs->event_thread->current_line
5076 && call_sal.symtab == ecs->event_thread->current_symtab)
5077 keep_going (ecs);
5078 else
5079 {
5080 ecs->event_thread->control.stop_step = 1;
5081 end_stepping_range ();
5082 stop_stepping (ecs);
5083 }
5084 return;
5085 }
5086 }
5087
5088 /* Look for "calls" to inlined functions, part two. If we are still
5089 in the same real function we were stepping through, but we have
5090 to go further up to find the exact frame ID, we are stepping
5091 through a more inlined call beyond its call site. */
5092
5093 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5094 && !frame_id_eq (get_frame_id (get_current_frame ()),
5095 ecs->event_thread->control.step_frame_id)
5096 && stepped_in_from (get_current_frame (),
5097 ecs->event_thread->control.step_frame_id))
5098 {
5099 if (debug_infrun)
5100 fprintf_unfiltered (gdb_stdlog,
5101 "infrun: stepping through inlined function\n");
5102
5103 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5104 keep_going (ecs);
5105 else
5106 {
5107 ecs->event_thread->control.stop_step = 1;
5108 end_stepping_range ();
5109 stop_stepping (ecs);
5110 }
5111 return;
5112 }
5113
5114 if ((stop_pc == stop_pc_sal.pc)
5115 && (ecs->event_thread->current_line != stop_pc_sal.line
5116 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5117 {
5118 /* We are at the start of a different line. So stop. Note that
5119 we don't stop if we step into the middle of a different line.
5120 That is said to make things like for (;;) statements work
5121 better. */
5122 if (debug_infrun)
5123 fprintf_unfiltered (gdb_stdlog,
5124 "infrun: stepped to a different line\n");
5125 ecs->event_thread->control.stop_step = 1;
5126 end_stepping_range ();
5127 stop_stepping (ecs);
5128 return;
5129 }
5130
5131 /* We aren't done stepping.
5132
5133 Optimize by setting the stepping range to the line.
5134 (We might not be in the original line, but if we entered a
5135 new line in mid-statement, we continue stepping. This makes
5136 things like for(;;) statements work better.) */
5137
5138 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5139 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5140 ecs->event_thread->control.may_range_step = 1;
5141 set_step_info (frame, stop_pc_sal);
5142
5143 if (debug_infrun)
5144 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5145 keep_going (ecs);
5146 }
5147
5148 /* In all-stop mode, if we're currently stepping but have stopped in
5149 some other thread, we may need to switch back to the stepped
5150 thread. Returns true we set the inferior running, false if we left
5151 it stopped (and the event needs further processing). */
5152
5153 static int
5154 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5155 {
5156 if (!non_stop)
5157 {
5158 struct thread_info *tp;
5159 struct thread_info *stepping_thread;
5160 struct thread_info *step_over;
5161
5162 /* If any thread is blocked on some internal breakpoint, and we
5163 simply need to step over that breakpoint to get it going
5164 again, do that first. */
5165
5166 /* However, if we see an event for the stepping thread, then we
5167 know all other threads have been moved past their breakpoints
5168 already. Let the caller check whether the step is finished,
5169 etc., before deciding to move it past a breakpoint. */
5170 if (ecs->event_thread->control.step_range_end != 0)
5171 return 0;
5172
5173 /* Check if the current thread is blocked on an incomplete
5174 step-over, interrupted by a random signal. */
5175 if (ecs->event_thread->control.trap_expected
5176 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5177 {
5178 if (debug_infrun)
5179 {
5180 fprintf_unfiltered (gdb_stdlog,
5181 "infrun: need to finish step-over of [%s]\n",
5182 target_pid_to_str (ecs->event_thread->ptid));
5183 }
5184 keep_going (ecs);
5185 return 1;
5186 }
5187
5188 /* Check if the current thread is blocked by a single-step
5189 breakpoint of another thread. */
5190 if (ecs->hit_singlestep_breakpoint)
5191 {
5192 if (debug_infrun)
5193 {
5194 fprintf_unfiltered (gdb_stdlog,
5195 "infrun: need to step [%s] over single-step "
5196 "breakpoint\n",
5197 target_pid_to_str (ecs->ptid));
5198 }
5199 keep_going (ecs);
5200 return 1;
5201 }
5202
5203 /* Otherwise, we no longer expect a trap in the current thread.
5204 Clear the trap_expected flag before switching back -- this is
5205 what keep_going does as well, if we call it. */
5206 ecs->event_thread->control.trap_expected = 0;
5207
5208 /* If scheduler locking applies even if not stepping, there's no
5209 need to walk over threads. Above we've checked whether the
5210 current thread is stepping. If some other thread not the
5211 event thread is stepping, then it must be that scheduler
5212 locking is not in effect. */
5213 if (schedlock_applies (0))
5214 return 0;
5215
5216 /* Look for the stepping/nexting thread, and check if any other
5217 thread other than the stepping thread needs to start a
5218 step-over. Do all step-overs before actually proceeding with
5219 step/next/etc. */
5220 stepping_thread = NULL;
5221 step_over = NULL;
5222 ALL_THREADS (tp)
5223 {
5224 /* Ignore threads of processes we're not resuming. */
5225 if (!sched_multi
5226 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
5227 continue;
5228
5229 /* When stepping over a breakpoint, we lock all threads
5230 except the one that needs to move past the breakpoint.
5231 If a non-event thread has this set, the "incomplete
5232 step-over" check above should have caught it earlier. */
5233 gdb_assert (!tp->control.trap_expected);
5234
5235 /* Did we find the stepping thread? */
5236 if (tp->control.step_range_end)
5237 {
5238 /* Yep. There should only one though. */
5239 gdb_assert (stepping_thread == NULL);
5240
5241 /* The event thread is handled at the top, before we
5242 enter this loop. */
5243 gdb_assert (tp != ecs->event_thread);
5244
5245 /* If some thread other than the event thread is
5246 stepping, then scheduler locking can't be in effect,
5247 otherwise we wouldn't have resumed the current event
5248 thread in the first place. */
5249 gdb_assert (!schedlock_applies (1));
5250
5251 stepping_thread = tp;
5252 }
5253 else if (thread_still_needs_step_over (tp))
5254 {
5255 step_over = tp;
5256
5257 /* At the top we've returned early if the event thread
5258 is stepping. If some other thread not the event
5259 thread is stepping, then scheduler locking can't be
5260 in effect, and we can resume this thread. No need to
5261 keep looking for the stepping thread then. */
5262 break;
5263 }
5264 }
5265
5266 if (step_over != NULL)
5267 {
5268 tp = step_over;
5269 if (debug_infrun)
5270 {
5271 fprintf_unfiltered (gdb_stdlog,
5272 "infrun: need to step-over [%s]\n",
5273 target_pid_to_str (tp->ptid));
5274 }
5275
5276 /* Only the stepping thread should have this set. */
5277 gdb_assert (tp->control.step_range_end == 0);
5278
5279 ecs->ptid = tp->ptid;
5280 ecs->event_thread = tp;
5281 switch_to_thread (ecs->ptid);
5282 keep_going (ecs);
5283 return 1;
5284 }
5285
5286 if (stepping_thread != NULL)
5287 {
5288 struct frame_info *frame;
5289 struct gdbarch *gdbarch;
5290
5291 tp = stepping_thread;
5292
5293 /* If the stepping thread exited, then don't try to switch
5294 back and resume it, which could fail in several different
5295 ways depending on the target. Instead, just keep going.
5296
5297 We can find a stepping dead thread in the thread list in
5298 two cases:
5299
5300 - The target supports thread exit events, and when the
5301 target tries to delete the thread from the thread list,
5302 inferior_ptid pointed at the exiting thread. In such
5303 case, calling delete_thread does not really remove the
5304 thread from the list; instead, the thread is left listed,
5305 with 'exited' state.
5306
5307 - The target's debug interface does not support thread
5308 exit events, and so we have no idea whatsoever if the
5309 previously stepping thread is still alive. For that
5310 reason, we need to synchronously query the target
5311 now. */
5312 if (is_exited (tp->ptid)
5313 || !target_thread_alive (tp->ptid))
5314 {
5315 if (debug_infrun)
5316 fprintf_unfiltered (gdb_stdlog,
5317 "infrun: not switching back to "
5318 "stepped thread, it has vanished\n");
5319
5320 delete_thread (tp->ptid);
5321 keep_going (ecs);
5322 return 1;
5323 }
5324
5325 if (debug_infrun)
5326 fprintf_unfiltered (gdb_stdlog,
5327 "infrun: switching back to stepped thread\n");
5328
5329 ecs->event_thread = tp;
5330 ecs->ptid = tp->ptid;
5331 context_switch (ecs->ptid);
5332
5333 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5334 frame = get_current_frame ();
5335 gdbarch = get_frame_arch (frame);
5336
5337 /* If the PC of the thread we were trying to single-step has
5338 changed, then that thread has trapped or been signaled,
5339 but the event has not been reported to GDB yet. Re-poll
5340 the target looking for this particular thread's event
5341 (i.e. temporarily enable schedlock) by:
5342
5343 - setting a break at the current PC
5344 - resuming that particular thread, only (by setting
5345 trap expected)
5346
5347 This prevents us continuously moving the single-step
5348 breakpoint forward, one instruction at a time,
5349 overstepping. */
5350
5351 if (gdbarch_software_single_step_p (gdbarch)
5352 && stop_pc != tp->prev_pc)
5353 {
5354 if (debug_infrun)
5355 fprintf_unfiltered (gdb_stdlog,
5356 "infrun: expected thread advanced also\n");
5357
5358 insert_single_step_breakpoint (get_frame_arch (frame),
5359 get_frame_address_space (frame),
5360 stop_pc);
5361 singlestep_breakpoints_inserted_p = 1;
5362 ecs->event_thread->control.trap_expected = 1;
5363 singlestep_ptid = inferior_ptid;
5364 singlestep_pc = stop_pc;
5365
5366 resume (0, GDB_SIGNAL_0);
5367 prepare_to_wait (ecs);
5368 }
5369 else
5370 {
5371 if (debug_infrun)
5372 fprintf_unfiltered (gdb_stdlog,
5373 "infrun: expected thread still "
5374 "hasn't advanced\n");
5375 keep_going (ecs);
5376 }
5377
5378 return 1;
5379 }
5380 }
5381 return 0;
5382 }
5383
5384 /* Is thread TP in the middle of single-stepping? */
5385
5386 static int
5387 currently_stepping (struct thread_info *tp)
5388 {
5389 return ((tp->control.step_range_end
5390 && tp->control.step_resume_breakpoint == NULL)
5391 || tp->control.trap_expected
5392 || bpstat_should_step ());
5393 }
5394
5395 /* Inferior has stepped into a subroutine call with source code that
5396 we should not step over. Do step to the first line of code in
5397 it. */
5398
5399 static void
5400 handle_step_into_function (struct gdbarch *gdbarch,
5401 struct execution_control_state *ecs)
5402 {
5403 struct symtab *s;
5404 struct symtab_and_line stop_func_sal, sr_sal;
5405
5406 fill_in_stop_func (gdbarch, ecs);
5407
5408 s = find_pc_symtab (stop_pc);
5409 if (s && s->language != language_asm)
5410 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5411 ecs->stop_func_start);
5412
5413 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5414 /* Use the step_resume_break to step until the end of the prologue,
5415 even if that involves jumps (as it seems to on the vax under
5416 4.2). */
5417 /* If the prologue ends in the middle of a source line, continue to
5418 the end of that source line (if it is still within the function).
5419 Otherwise, just go to end of prologue. */
5420 if (stop_func_sal.end
5421 && stop_func_sal.pc != ecs->stop_func_start
5422 && stop_func_sal.end < ecs->stop_func_end)
5423 ecs->stop_func_start = stop_func_sal.end;
5424
5425 /* Architectures which require breakpoint adjustment might not be able
5426 to place a breakpoint at the computed address. If so, the test
5427 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5428 ecs->stop_func_start to an address at which a breakpoint may be
5429 legitimately placed.
5430
5431 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5432 made, GDB will enter an infinite loop when stepping through
5433 optimized code consisting of VLIW instructions which contain
5434 subinstructions corresponding to different source lines. On
5435 FR-V, it's not permitted to place a breakpoint on any but the
5436 first subinstruction of a VLIW instruction. When a breakpoint is
5437 set, GDB will adjust the breakpoint address to the beginning of
5438 the VLIW instruction. Thus, we need to make the corresponding
5439 adjustment here when computing the stop address. */
5440
5441 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5442 {
5443 ecs->stop_func_start
5444 = gdbarch_adjust_breakpoint_address (gdbarch,
5445 ecs->stop_func_start);
5446 }
5447
5448 if (ecs->stop_func_start == stop_pc)
5449 {
5450 /* We are already there: stop now. */
5451 ecs->event_thread->control.stop_step = 1;
5452 end_stepping_range ();
5453 stop_stepping (ecs);
5454 return;
5455 }
5456 else
5457 {
5458 /* Put the step-breakpoint there and go until there. */
5459 init_sal (&sr_sal); /* initialize to zeroes */
5460 sr_sal.pc = ecs->stop_func_start;
5461 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5462 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5463
5464 /* Do not specify what the fp should be when we stop since on
5465 some machines the prologue is where the new fp value is
5466 established. */
5467 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5468
5469 /* And make sure stepping stops right away then. */
5470 ecs->event_thread->control.step_range_end
5471 = ecs->event_thread->control.step_range_start;
5472 }
5473 keep_going (ecs);
5474 }
5475
5476 /* Inferior has stepped backward into a subroutine call with source
5477 code that we should not step over. Do step to the beginning of the
5478 last line of code in it. */
5479
5480 static void
5481 handle_step_into_function_backward (struct gdbarch *gdbarch,
5482 struct execution_control_state *ecs)
5483 {
5484 struct symtab *s;
5485 struct symtab_and_line stop_func_sal;
5486
5487 fill_in_stop_func (gdbarch, ecs);
5488
5489 s = find_pc_symtab (stop_pc);
5490 if (s && s->language != language_asm)
5491 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5492 ecs->stop_func_start);
5493
5494 stop_func_sal = find_pc_line (stop_pc, 0);
5495
5496 /* OK, we're just going to keep stepping here. */
5497 if (stop_func_sal.pc == stop_pc)
5498 {
5499 /* We're there already. Just stop stepping now. */
5500 ecs->event_thread->control.stop_step = 1;
5501 end_stepping_range ();
5502 stop_stepping (ecs);
5503 }
5504 else
5505 {
5506 /* Else just reset the step range and keep going.
5507 No step-resume breakpoint, they don't work for
5508 epilogues, which can have multiple entry paths. */
5509 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5510 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5511 keep_going (ecs);
5512 }
5513 return;
5514 }
5515
5516 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5517 This is used to both functions and to skip over code. */
5518
5519 static void
5520 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5521 struct symtab_and_line sr_sal,
5522 struct frame_id sr_id,
5523 enum bptype sr_type)
5524 {
5525 /* There should never be more than one step-resume or longjmp-resume
5526 breakpoint per thread, so we should never be setting a new
5527 step_resume_breakpoint when one is already active. */
5528 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5529 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5530
5531 if (debug_infrun)
5532 fprintf_unfiltered (gdb_stdlog,
5533 "infrun: inserting step-resume breakpoint at %s\n",
5534 paddress (gdbarch, sr_sal.pc));
5535
5536 inferior_thread ()->control.step_resume_breakpoint
5537 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5538 }
5539
5540 void
5541 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5542 struct symtab_and_line sr_sal,
5543 struct frame_id sr_id)
5544 {
5545 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5546 sr_sal, sr_id,
5547 bp_step_resume);
5548 }
5549
5550 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5551 This is used to skip a potential signal handler.
5552
5553 This is called with the interrupted function's frame. The signal
5554 handler, when it returns, will resume the interrupted function at
5555 RETURN_FRAME.pc. */
5556
5557 static void
5558 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5559 {
5560 struct symtab_and_line sr_sal;
5561 struct gdbarch *gdbarch;
5562
5563 gdb_assert (return_frame != NULL);
5564 init_sal (&sr_sal); /* initialize to zeros */
5565
5566 gdbarch = get_frame_arch (return_frame);
5567 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5568 sr_sal.section = find_pc_overlay (sr_sal.pc);
5569 sr_sal.pspace = get_frame_program_space (return_frame);
5570
5571 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5572 get_stack_frame_id (return_frame),
5573 bp_hp_step_resume);
5574 }
5575
5576 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5577 is used to skip a function after stepping into it (for "next" or if
5578 the called function has no debugging information).
5579
5580 The current function has almost always been reached by single
5581 stepping a call or return instruction. NEXT_FRAME belongs to the
5582 current function, and the breakpoint will be set at the caller's
5583 resume address.
5584
5585 This is a separate function rather than reusing
5586 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5587 get_prev_frame, which may stop prematurely (see the implementation
5588 of frame_unwind_caller_id for an example). */
5589
5590 static void
5591 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5592 {
5593 struct symtab_and_line sr_sal;
5594 struct gdbarch *gdbarch;
5595
5596 /* We shouldn't have gotten here if we don't know where the call site
5597 is. */
5598 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5599
5600 init_sal (&sr_sal); /* initialize to zeros */
5601
5602 gdbarch = frame_unwind_caller_arch (next_frame);
5603 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5604 frame_unwind_caller_pc (next_frame));
5605 sr_sal.section = find_pc_overlay (sr_sal.pc);
5606 sr_sal.pspace = frame_unwind_program_space (next_frame);
5607
5608 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5609 frame_unwind_caller_id (next_frame));
5610 }
5611
5612 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5613 new breakpoint at the target of a jmp_buf. The handling of
5614 longjmp-resume uses the same mechanisms used for handling
5615 "step-resume" breakpoints. */
5616
5617 static void
5618 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5619 {
5620 /* There should never be more than one longjmp-resume breakpoint per
5621 thread, so we should never be setting a new
5622 longjmp_resume_breakpoint when one is already active. */
5623 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
5624
5625 if (debug_infrun)
5626 fprintf_unfiltered (gdb_stdlog,
5627 "infrun: inserting longjmp-resume breakpoint at %s\n",
5628 paddress (gdbarch, pc));
5629
5630 inferior_thread ()->control.exception_resume_breakpoint =
5631 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5632 }
5633
5634 /* Insert an exception resume breakpoint. TP is the thread throwing
5635 the exception. The block B is the block of the unwinder debug hook
5636 function. FRAME is the frame corresponding to the call to this
5637 function. SYM is the symbol of the function argument holding the
5638 target PC of the exception. */
5639
5640 static void
5641 insert_exception_resume_breakpoint (struct thread_info *tp,
5642 struct block *b,
5643 struct frame_info *frame,
5644 struct symbol *sym)
5645 {
5646 volatile struct gdb_exception e;
5647
5648 /* We want to ignore errors here. */
5649 TRY_CATCH (e, RETURN_MASK_ERROR)
5650 {
5651 struct symbol *vsym;
5652 struct value *value;
5653 CORE_ADDR handler;
5654 struct breakpoint *bp;
5655
5656 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5657 value = read_var_value (vsym, frame);
5658 /* If the value was optimized out, revert to the old behavior. */
5659 if (! value_optimized_out (value))
5660 {
5661 handler = value_as_address (value);
5662
5663 if (debug_infrun)
5664 fprintf_unfiltered (gdb_stdlog,
5665 "infrun: exception resume at %lx\n",
5666 (unsigned long) handler);
5667
5668 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5669 handler, bp_exception_resume);
5670
5671 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
5672 frame = NULL;
5673
5674 bp->thread = tp->num;
5675 inferior_thread ()->control.exception_resume_breakpoint = bp;
5676 }
5677 }
5678 }
5679
5680 /* A helper for check_exception_resume that sets an
5681 exception-breakpoint based on a SystemTap probe. */
5682
5683 static void
5684 insert_exception_resume_from_probe (struct thread_info *tp,
5685 const struct bound_probe *probe,
5686 struct frame_info *frame)
5687 {
5688 struct value *arg_value;
5689 CORE_ADDR handler;
5690 struct breakpoint *bp;
5691
5692 arg_value = probe_safe_evaluate_at_pc (frame, 1);
5693 if (!arg_value)
5694 return;
5695
5696 handler = value_as_address (arg_value);
5697
5698 if (debug_infrun)
5699 fprintf_unfiltered (gdb_stdlog,
5700 "infrun: exception resume at %s\n",
5701 paddress (get_objfile_arch (probe->objfile),
5702 handler));
5703
5704 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5705 handler, bp_exception_resume);
5706 bp->thread = tp->num;
5707 inferior_thread ()->control.exception_resume_breakpoint = bp;
5708 }
5709
5710 /* This is called when an exception has been intercepted. Check to
5711 see whether the exception's destination is of interest, and if so,
5712 set an exception resume breakpoint there. */
5713
5714 static void
5715 check_exception_resume (struct execution_control_state *ecs,
5716 struct frame_info *frame)
5717 {
5718 volatile struct gdb_exception e;
5719 struct bound_probe probe;
5720 struct symbol *func;
5721
5722 /* First see if this exception unwinding breakpoint was set via a
5723 SystemTap probe point. If so, the probe has two arguments: the
5724 CFA and the HANDLER. We ignore the CFA, extract the handler, and
5725 set a breakpoint there. */
5726 probe = find_probe_by_pc (get_frame_pc (frame));
5727 if (probe.probe)
5728 {
5729 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
5730 return;
5731 }
5732
5733 func = get_frame_function (frame);
5734 if (!func)
5735 return;
5736
5737 TRY_CATCH (e, RETURN_MASK_ERROR)
5738 {
5739 struct block *b;
5740 struct block_iterator iter;
5741 struct symbol *sym;
5742 int argno = 0;
5743
5744 /* The exception breakpoint is a thread-specific breakpoint on
5745 the unwinder's debug hook, declared as:
5746
5747 void _Unwind_DebugHook (void *cfa, void *handler);
5748
5749 The CFA argument indicates the frame to which control is
5750 about to be transferred. HANDLER is the destination PC.
5751
5752 We ignore the CFA and set a temporary breakpoint at HANDLER.
5753 This is not extremely efficient but it avoids issues in gdb
5754 with computing the DWARF CFA, and it also works even in weird
5755 cases such as throwing an exception from inside a signal
5756 handler. */
5757
5758 b = SYMBOL_BLOCK_VALUE (func);
5759 ALL_BLOCK_SYMBOLS (b, iter, sym)
5760 {
5761 if (!SYMBOL_IS_ARGUMENT (sym))
5762 continue;
5763
5764 if (argno == 0)
5765 ++argno;
5766 else
5767 {
5768 insert_exception_resume_breakpoint (ecs->event_thread,
5769 b, frame, sym);
5770 break;
5771 }
5772 }
5773 }
5774 }
5775
5776 static void
5777 stop_stepping (struct execution_control_state *ecs)
5778 {
5779 if (debug_infrun)
5780 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5781
5782 clear_step_over_info ();
5783
5784 /* Let callers know we don't want to wait for the inferior anymore. */
5785 ecs->wait_some_more = 0;
5786 }
5787
5788 /* Called when we should continue running the inferior, because the
5789 current event doesn't cause a user visible stop. This does the
5790 resuming part; waiting for the next event is done elsewhere. */
5791
5792 static void
5793 keep_going (struct execution_control_state *ecs)
5794 {
5795 /* Make sure normal_stop is called if we get a QUIT handled before
5796 reaching resume. */
5797 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5798
5799 /* Save the pc before execution, to compare with pc after stop. */
5800 ecs->event_thread->prev_pc
5801 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5802
5803 if (ecs->event_thread->control.trap_expected
5804 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5805 {
5806 /* We haven't yet gotten our trap, and either: intercepted a
5807 non-signal event (e.g., a fork); or took a signal which we
5808 are supposed to pass through to the inferior. Simply
5809 continue. */
5810 discard_cleanups (old_cleanups);
5811 resume (currently_stepping (ecs->event_thread),
5812 ecs->event_thread->suspend.stop_signal);
5813 }
5814 else
5815 {
5816 volatile struct gdb_exception e;
5817 struct regcache *regcache = get_current_regcache ();
5818
5819 /* Either the trap was not expected, but we are continuing
5820 anyway (if we got a signal, the user asked it be passed to
5821 the child)
5822 -- or --
5823 We got our expected trap, but decided we should resume from
5824 it.
5825
5826 We're going to run this baby now!
5827
5828 Note that insert_breakpoints won't try to re-insert
5829 already inserted breakpoints. Therefore, we don't
5830 care if breakpoints were already inserted, or not. */
5831
5832 /* If we need to step over a breakpoint, and we're not using
5833 displaced stepping to do so, insert all breakpoints
5834 (watchpoints, etc.) but the one we're stepping over, step one
5835 instruction, and then re-insert the breakpoint when that step
5836 is finished. */
5837 if ((ecs->hit_singlestep_breakpoint
5838 || thread_still_needs_step_over (ecs->event_thread))
5839 && !use_displaced_stepping (get_regcache_arch (regcache)))
5840 {
5841 set_step_over_info (get_regcache_aspace (regcache),
5842 regcache_read_pc (regcache));
5843 }
5844 else
5845 clear_step_over_info ();
5846
5847 /* Stop stepping if inserting breakpoints fails. */
5848 TRY_CATCH (e, RETURN_MASK_ERROR)
5849 {
5850 insert_breakpoints ();
5851 }
5852 if (e.reason < 0)
5853 {
5854 exception_print (gdb_stderr, e);
5855 stop_stepping (ecs);
5856 return;
5857 }
5858
5859 ecs->event_thread->control.trap_expected
5860 = (ecs->event_thread->stepping_over_breakpoint
5861 || ecs->hit_singlestep_breakpoint);
5862
5863 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
5864 explicitly specifies that such a signal should be delivered
5865 to the target program). Typically, that would occur when a
5866 user is debugging a target monitor on a simulator: the target
5867 monitor sets a breakpoint; the simulator encounters this
5868 breakpoint and halts the simulation handing control to GDB;
5869 GDB, noting that the stop address doesn't map to any known
5870 breakpoint, returns control back to the simulator; the
5871 simulator then delivers the hardware equivalent of a
5872 GDB_SIGNAL_TRAP to the program being debugged. */
5873 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5874 && !signal_program[ecs->event_thread->suspend.stop_signal])
5875 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5876
5877 discard_cleanups (old_cleanups);
5878 resume (currently_stepping (ecs->event_thread),
5879 ecs->event_thread->suspend.stop_signal);
5880 }
5881
5882 prepare_to_wait (ecs);
5883 }
5884
5885 /* This function normally comes after a resume, before
5886 handle_inferior_event exits. It takes care of any last bits of
5887 housekeeping, and sets the all-important wait_some_more flag. */
5888
5889 static void
5890 prepare_to_wait (struct execution_control_state *ecs)
5891 {
5892 if (debug_infrun)
5893 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5894
5895 /* This is the old end of the while loop. Let everybody know we
5896 want to wait for the inferior some more and get called again
5897 soon. */
5898 ecs->wait_some_more = 1;
5899 }
5900
5901 /* We are done with the step range of a step/next/si/ni command.
5902 Called once for each n of a "step n" operation. Notify observers
5903 if not in the middle of doing a "step N" operation for N > 1. */
5904
5905 static void
5906 end_stepping_range (void)
5907 {
5908 if (inferior_thread ()->step_multi
5909 && inferior_thread ()->control.stop_step)
5910 return;
5911
5912 observer_notify_end_stepping_range ();
5913 }
5914
5915 /* Several print_*_reason functions to print why the inferior has stopped.
5916 We always print something when the inferior exits, or receives a signal.
5917 The rest of the cases are dealt with later on in normal_stop and
5918 print_it_typical. Ideally there should be a call to one of these
5919 print_*_reason functions functions from handle_inferior_event each time
5920 stop_stepping is called.
5921
5922 Note that we don't call these directly, instead we delegate that to
5923 the interpreters, through observers. Interpreters then call these
5924 with whatever uiout is right. */
5925
5926 void
5927 print_end_stepping_range_reason (struct ui_out *uiout)
5928 {
5929 /* For CLI-like interpreters, print nothing. */
5930
5931 if (ui_out_is_mi_like_p (uiout))
5932 {
5933 ui_out_field_string (uiout, "reason",
5934 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5935 }
5936 }
5937
5938 void
5939 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
5940 {
5941 annotate_signalled ();
5942 if (ui_out_is_mi_like_p (uiout))
5943 ui_out_field_string
5944 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5945 ui_out_text (uiout, "\nProgram terminated with signal ");
5946 annotate_signal_name ();
5947 ui_out_field_string (uiout, "signal-name",
5948 gdb_signal_to_name (siggnal));
5949 annotate_signal_name_end ();
5950 ui_out_text (uiout, ", ");
5951 annotate_signal_string ();
5952 ui_out_field_string (uiout, "signal-meaning",
5953 gdb_signal_to_string (siggnal));
5954 annotate_signal_string_end ();
5955 ui_out_text (uiout, ".\n");
5956 ui_out_text (uiout, "The program no longer exists.\n");
5957 }
5958
5959 void
5960 print_exited_reason (struct ui_out *uiout, int exitstatus)
5961 {
5962 struct inferior *inf = current_inferior ();
5963 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5964
5965 annotate_exited (exitstatus);
5966 if (exitstatus)
5967 {
5968 if (ui_out_is_mi_like_p (uiout))
5969 ui_out_field_string (uiout, "reason",
5970 async_reason_lookup (EXEC_ASYNC_EXITED));
5971 ui_out_text (uiout, "[Inferior ");
5972 ui_out_text (uiout, plongest (inf->num));
5973 ui_out_text (uiout, " (");
5974 ui_out_text (uiout, pidstr);
5975 ui_out_text (uiout, ") exited with code ");
5976 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5977 ui_out_text (uiout, "]\n");
5978 }
5979 else
5980 {
5981 if (ui_out_is_mi_like_p (uiout))
5982 ui_out_field_string
5983 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5984 ui_out_text (uiout, "[Inferior ");
5985 ui_out_text (uiout, plongest (inf->num));
5986 ui_out_text (uiout, " (");
5987 ui_out_text (uiout, pidstr);
5988 ui_out_text (uiout, ") exited normally]\n");
5989 }
5990 }
5991
5992 void
5993 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
5994 {
5995 annotate_signal ();
5996
5997 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5998 {
5999 struct thread_info *t = inferior_thread ();
6000
6001 ui_out_text (uiout, "\n[");
6002 ui_out_field_string (uiout, "thread-name",
6003 target_pid_to_str (t->ptid));
6004 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
6005 ui_out_text (uiout, " stopped");
6006 }
6007 else
6008 {
6009 ui_out_text (uiout, "\nProgram received signal ");
6010 annotate_signal_name ();
6011 if (ui_out_is_mi_like_p (uiout))
6012 ui_out_field_string
6013 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
6014 ui_out_field_string (uiout, "signal-name",
6015 gdb_signal_to_name (siggnal));
6016 annotate_signal_name_end ();
6017 ui_out_text (uiout, ", ");
6018 annotate_signal_string ();
6019 ui_out_field_string (uiout, "signal-meaning",
6020 gdb_signal_to_string (siggnal));
6021 annotate_signal_string_end ();
6022 }
6023 ui_out_text (uiout, ".\n");
6024 }
6025
6026 void
6027 print_no_history_reason (struct ui_out *uiout)
6028 {
6029 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
6030 }
6031
6032 /* Print current location without a level number, if we have changed
6033 functions or hit a breakpoint. Print source line if we have one.
6034 bpstat_print contains the logic deciding in detail what to print,
6035 based on the event(s) that just occurred. */
6036
6037 void
6038 print_stop_event (struct target_waitstatus *ws)
6039 {
6040 int bpstat_ret;
6041 int source_flag;
6042 int do_frame_printing = 1;
6043 struct thread_info *tp = inferior_thread ();
6044
6045 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
6046 switch (bpstat_ret)
6047 {
6048 case PRINT_UNKNOWN:
6049 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
6050 should) carry around the function and does (or should) use
6051 that when doing a frame comparison. */
6052 if (tp->control.stop_step
6053 && frame_id_eq (tp->control.step_frame_id,
6054 get_frame_id (get_current_frame ()))
6055 && step_start_function == find_pc_function (stop_pc))
6056 {
6057 /* Finished step, just print source line. */
6058 source_flag = SRC_LINE;
6059 }
6060 else
6061 {
6062 /* Print location and source line. */
6063 source_flag = SRC_AND_LOC;
6064 }
6065 break;
6066 case PRINT_SRC_AND_LOC:
6067 /* Print location and source line. */
6068 source_flag = SRC_AND_LOC;
6069 break;
6070 case PRINT_SRC_ONLY:
6071 source_flag = SRC_LINE;
6072 break;
6073 case PRINT_NOTHING:
6074 /* Something bogus. */
6075 source_flag = SRC_LINE;
6076 do_frame_printing = 0;
6077 break;
6078 default:
6079 internal_error (__FILE__, __LINE__, _("Unknown value."));
6080 }
6081
6082 /* The behavior of this routine with respect to the source
6083 flag is:
6084 SRC_LINE: Print only source line
6085 LOCATION: Print only location
6086 SRC_AND_LOC: Print location and source line. */
6087 if (do_frame_printing)
6088 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6089
6090 /* Display the auto-display expressions. */
6091 do_displays ();
6092 }
6093
6094 /* Here to return control to GDB when the inferior stops for real.
6095 Print appropriate messages, remove breakpoints, give terminal our modes.
6096
6097 STOP_PRINT_FRAME nonzero means print the executing frame
6098 (pc, function, args, file, line number and line text).
6099 BREAKPOINTS_FAILED nonzero means stop was due to error
6100 attempting to insert breakpoints. */
6101
6102 void
6103 normal_stop (void)
6104 {
6105 struct target_waitstatus last;
6106 ptid_t last_ptid;
6107 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6108
6109 get_last_target_status (&last_ptid, &last);
6110
6111 /* If an exception is thrown from this point on, make sure to
6112 propagate GDB's knowledge of the executing state to the
6113 frontend/user running state. A QUIT is an easy exception to see
6114 here, so do this before any filtered output. */
6115 if (!non_stop)
6116 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6117 else if (last.kind != TARGET_WAITKIND_SIGNALLED
6118 && last.kind != TARGET_WAITKIND_EXITED
6119 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6120 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6121
6122 /* As with the notification of thread events, we want to delay
6123 notifying the user that we've switched thread context until
6124 the inferior actually stops.
6125
6126 There's no point in saying anything if the inferior has exited.
6127 Note that SIGNALLED here means "exited with a signal", not
6128 "received a signal".
6129
6130 Also skip saying anything in non-stop mode. In that mode, as we
6131 don't want GDB to switch threads behind the user's back, to avoid
6132 races where the user is typing a command to apply to thread x,
6133 but GDB switches to thread y before the user finishes entering
6134 the command, fetch_inferior_event installs a cleanup to restore
6135 the current thread back to the thread the user had selected right
6136 after this event is handled, so we're not really switching, only
6137 informing of a stop. */
6138 if (!non_stop
6139 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6140 && target_has_execution
6141 && last.kind != TARGET_WAITKIND_SIGNALLED
6142 && last.kind != TARGET_WAITKIND_EXITED
6143 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6144 {
6145 target_terminal_ours_for_output ();
6146 printf_filtered (_("[Switching to %s]\n"),
6147 target_pid_to_str (inferior_ptid));
6148 annotate_thread_changed ();
6149 previous_inferior_ptid = inferior_ptid;
6150 }
6151
6152 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6153 {
6154 gdb_assert (sync_execution || !target_can_async_p ());
6155
6156 target_terminal_ours_for_output ();
6157 printf_filtered (_("No unwaited-for children left.\n"));
6158 }
6159
6160 if (!breakpoints_always_inserted_mode () && target_has_execution)
6161 {
6162 if (remove_breakpoints ())
6163 {
6164 target_terminal_ours_for_output ();
6165 printf_filtered (_("Cannot remove breakpoints because "
6166 "program is no longer writable.\nFurther "
6167 "execution is probably impossible.\n"));
6168 }
6169 }
6170
6171 /* If an auto-display called a function and that got a signal,
6172 delete that auto-display to avoid an infinite recursion. */
6173
6174 if (stopped_by_random_signal)
6175 disable_current_display ();
6176
6177 /* Don't print a message if in the middle of doing a "step n"
6178 operation for n > 1 */
6179 if (target_has_execution
6180 && last.kind != TARGET_WAITKIND_SIGNALLED
6181 && last.kind != TARGET_WAITKIND_EXITED
6182 && inferior_thread ()->step_multi
6183 && inferior_thread ()->control.stop_step)
6184 goto done;
6185
6186 target_terminal_ours ();
6187 async_enable_stdin ();
6188
6189 /* Set the current source location. This will also happen if we
6190 display the frame below, but the current SAL will be incorrect
6191 during a user hook-stop function. */
6192 if (has_stack_frames () && !stop_stack_dummy)
6193 set_current_sal_from_frame (get_current_frame ());
6194
6195 /* Let the user/frontend see the threads as stopped, but do nothing
6196 if the thread was running an infcall. We may be e.g., evaluating
6197 a breakpoint condition. In that case, the thread had state
6198 THREAD_RUNNING before the infcall, and shall remain set to
6199 running, all without informing the user/frontend about state
6200 transition changes. If this is actually a call command, then the
6201 thread was originally already stopped, so there's no state to
6202 finish either. */
6203 if (target_has_execution && inferior_thread ()->control.in_infcall)
6204 discard_cleanups (old_chain);
6205 else
6206 do_cleanups (old_chain);
6207
6208 /* Look up the hook_stop and run it (CLI internally handles problem
6209 of stop_command's pre-hook not existing). */
6210 if (stop_command)
6211 catch_errors (hook_stop_stub, stop_command,
6212 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6213
6214 if (!has_stack_frames ())
6215 goto done;
6216
6217 if (last.kind == TARGET_WAITKIND_SIGNALLED
6218 || last.kind == TARGET_WAITKIND_EXITED)
6219 goto done;
6220
6221 /* Select innermost stack frame - i.e., current frame is frame 0,
6222 and current location is based on that.
6223 Don't do this on return from a stack dummy routine,
6224 or if the program has exited. */
6225
6226 if (!stop_stack_dummy)
6227 {
6228 select_frame (get_current_frame ());
6229
6230 /* If --batch-silent is enabled then there's no need to print the current
6231 source location, and to try risks causing an error message about
6232 missing source files. */
6233 if (stop_print_frame && !batch_silent)
6234 print_stop_event (&last);
6235 }
6236
6237 /* Save the function value return registers, if we care.
6238 We might be about to restore their previous contents. */
6239 if (inferior_thread ()->control.proceed_to_finish
6240 && execution_direction != EXEC_REVERSE)
6241 {
6242 /* This should not be necessary. */
6243 if (stop_registers)
6244 regcache_xfree (stop_registers);
6245
6246 /* NB: The copy goes through to the target picking up the value of
6247 all the registers. */
6248 stop_registers = regcache_dup (get_current_regcache ());
6249 }
6250
6251 if (stop_stack_dummy == STOP_STACK_DUMMY)
6252 {
6253 /* Pop the empty frame that contains the stack dummy.
6254 This also restores inferior state prior to the call
6255 (struct infcall_suspend_state). */
6256 struct frame_info *frame = get_current_frame ();
6257
6258 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6259 frame_pop (frame);
6260 /* frame_pop() calls reinit_frame_cache as the last thing it
6261 does which means there's currently no selected frame. We
6262 don't need to re-establish a selected frame if the dummy call
6263 returns normally, that will be done by
6264 restore_infcall_control_state. However, we do have to handle
6265 the case where the dummy call is returning after being
6266 stopped (e.g. the dummy call previously hit a breakpoint).
6267 We can't know which case we have so just always re-establish
6268 a selected frame here. */
6269 select_frame (get_current_frame ());
6270 }
6271
6272 done:
6273 annotate_stopped ();
6274
6275 /* Suppress the stop observer if we're in the middle of:
6276
6277 - a step n (n > 1), as there still more steps to be done.
6278
6279 - a "finish" command, as the observer will be called in
6280 finish_command_continuation, so it can include the inferior
6281 function's return value.
6282
6283 - calling an inferior function, as we pretend we inferior didn't
6284 run at all. The return value of the call is handled by the
6285 expression evaluator, through call_function_by_hand. */
6286
6287 if (!target_has_execution
6288 || last.kind == TARGET_WAITKIND_SIGNALLED
6289 || last.kind == TARGET_WAITKIND_EXITED
6290 || last.kind == TARGET_WAITKIND_NO_RESUMED
6291 || (!(inferior_thread ()->step_multi
6292 && inferior_thread ()->control.stop_step)
6293 && !(inferior_thread ()->control.stop_bpstat
6294 && inferior_thread ()->control.proceed_to_finish)
6295 && !inferior_thread ()->control.in_infcall))
6296 {
6297 if (!ptid_equal (inferior_ptid, null_ptid))
6298 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6299 stop_print_frame);
6300 else
6301 observer_notify_normal_stop (NULL, stop_print_frame);
6302 }
6303
6304 if (target_has_execution)
6305 {
6306 if (last.kind != TARGET_WAITKIND_SIGNALLED
6307 && last.kind != TARGET_WAITKIND_EXITED)
6308 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6309 Delete any breakpoint that is to be deleted at the next stop. */
6310 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6311 }
6312
6313 /* Try to get rid of automatically added inferiors that are no
6314 longer needed. Keeping those around slows down things linearly.
6315 Note that this never removes the current inferior. */
6316 prune_inferiors ();
6317 }
6318
6319 static int
6320 hook_stop_stub (void *cmd)
6321 {
6322 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6323 return (0);
6324 }
6325 \f
6326 int
6327 signal_stop_state (int signo)
6328 {
6329 return signal_stop[signo];
6330 }
6331
6332 int
6333 signal_print_state (int signo)
6334 {
6335 return signal_print[signo];
6336 }
6337
6338 int
6339 signal_pass_state (int signo)
6340 {
6341 return signal_program[signo];
6342 }
6343
6344 static void
6345 signal_cache_update (int signo)
6346 {
6347 if (signo == -1)
6348 {
6349 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6350 signal_cache_update (signo);
6351
6352 return;
6353 }
6354
6355 signal_pass[signo] = (signal_stop[signo] == 0
6356 && signal_print[signo] == 0
6357 && signal_program[signo] == 1
6358 && signal_catch[signo] == 0);
6359 }
6360
6361 int
6362 signal_stop_update (int signo, int state)
6363 {
6364 int ret = signal_stop[signo];
6365
6366 signal_stop[signo] = state;
6367 signal_cache_update (signo);
6368 return ret;
6369 }
6370
6371 int
6372 signal_print_update (int signo, int state)
6373 {
6374 int ret = signal_print[signo];
6375
6376 signal_print[signo] = state;
6377 signal_cache_update (signo);
6378 return ret;
6379 }
6380
6381 int
6382 signal_pass_update (int signo, int state)
6383 {
6384 int ret = signal_program[signo];
6385
6386 signal_program[signo] = state;
6387 signal_cache_update (signo);
6388 return ret;
6389 }
6390
6391 /* Update the global 'signal_catch' from INFO and notify the
6392 target. */
6393
6394 void
6395 signal_catch_update (const unsigned int *info)
6396 {
6397 int i;
6398
6399 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6400 signal_catch[i] = info[i] > 0;
6401 signal_cache_update (-1);
6402 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6403 }
6404
6405 static void
6406 sig_print_header (void)
6407 {
6408 printf_filtered (_("Signal Stop\tPrint\tPass "
6409 "to program\tDescription\n"));
6410 }
6411
6412 static void
6413 sig_print_info (enum gdb_signal oursig)
6414 {
6415 const char *name = gdb_signal_to_name (oursig);
6416 int name_padding = 13 - strlen (name);
6417
6418 if (name_padding <= 0)
6419 name_padding = 0;
6420
6421 printf_filtered ("%s", name);
6422 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6423 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6424 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6425 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6426 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6427 }
6428
6429 /* Specify how various signals in the inferior should be handled. */
6430
6431 static void
6432 handle_command (char *args, int from_tty)
6433 {
6434 char **argv;
6435 int digits, wordlen;
6436 int sigfirst, signum, siglast;
6437 enum gdb_signal oursig;
6438 int allsigs;
6439 int nsigs;
6440 unsigned char *sigs;
6441 struct cleanup *old_chain;
6442
6443 if (args == NULL)
6444 {
6445 error_no_arg (_("signal to handle"));
6446 }
6447
6448 /* Allocate and zero an array of flags for which signals to handle. */
6449
6450 nsigs = (int) GDB_SIGNAL_LAST;
6451 sigs = (unsigned char *) alloca (nsigs);
6452 memset (sigs, 0, nsigs);
6453
6454 /* Break the command line up into args. */
6455
6456 argv = gdb_buildargv (args);
6457 old_chain = make_cleanup_freeargv (argv);
6458
6459 /* Walk through the args, looking for signal oursigs, signal names, and
6460 actions. Signal numbers and signal names may be interspersed with
6461 actions, with the actions being performed for all signals cumulatively
6462 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6463
6464 while (*argv != NULL)
6465 {
6466 wordlen = strlen (*argv);
6467 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6468 {;
6469 }
6470 allsigs = 0;
6471 sigfirst = siglast = -1;
6472
6473 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6474 {
6475 /* Apply action to all signals except those used by the
6476 debugger. Silently skip those. */
6477 allsigs = 1;
6478 sigfirst = 0;
6479 siglast = nsigs - 1;
6480 }
6481 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6482 {
6483 SET_SIGS (nsigs, sigs, signal_stop);
6484 SET_SIGS (nsigs, sigs, signal_print);
6485 }
6486 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6487 {
6488 UNSET_SIGS (nsigs, sigs, signal_program);
6489 }
6490 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6491 {
6492 SET_SIGS (nsigs, sigs, signal_print);
6493 }
6494 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6495 {
6496 SET_SIGS (nsigs, sigs, signal_program);
6497 }
6498 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6499 {
6500 UNSET_SIGS (nsigs, sigs, signal_stop);
6501 }
6502 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6503 {
6504 SET_SIGS (nsigs, sigs, signal_program);
6505 }
6506 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6507 {
6508 UNSET_SIGS (nsigs, sigs, signal_print);
6509 UNSET_SIGS (nsigs, sigs, signal_stop);
6510 }
6511 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6512 {
6513 UNSET_SIGS (nsigs, sigs, signal_program);
6514 }
6515 else if (digits > 0)
6516 {
6517 /* It is numeric. The numeric signal refers to our own
6518 internal signal numbering from target.h, not to host/target
6519 signal number. This is a feature; users really should be
6520 using symbolic names anyway, and the common ones like
6521 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6522
6523 sigfirst = siglast = (int)
6524 gdb_signal_from_command (atoi (*argv));
6525 if ((*argv)[digits] == '-')
6526 {
6527 siglast = (int)
6528 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6529 }
6530 if (sigfirst > siglast)
6531 {
6532 /* Bet he didn't figure we'd think of this case... */
6533 signum = sigfirst;
6534 sigfirst = siglast;
6535 siglast = signum;
6536 }
6537 }
6538 else
6539 {
6540 oursig = gdb_signal_from_name (*argv);
6541 if (oursig != GDB_SIGNAL_UNKNOWN)
6542 {
6543 sigfirst = siglast = (int) oursig;
6544 }
6545 else
6546 {
6547 /* Not a number and not a recognized flag word => complain. */
6548 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6549 }
6550 }
6551
6552 /* If any signal numbers or symbol names were found, set flags for
6553 which signals to apply actions to. */
6554
6555 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6556 {
6557 switch ((enum gdb_signal) signum)
6558 {
6559 case GDB_SIGNAL_TRAP:
6560 case GDB_SIGNAL_INT:
6561 if (!allsigs && !sigs[signum])
6562 {
6563 if (query (_("%s is used by the debugger.\n\
6564 Are you sure you want to change it? "),
6565 gdb_signal_to_name ((enum gdb_signal) signum)))
6566 {
6567 sigs[signum] = 1;
6568 }
6569 else
6570 {
6571 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6572 gdb_flush (gdb_stdout);
6573 }
6574 }
6575 break;
6576 case GDB_SIGNAL_0:
6577 case GDB_SIGNAL_DEFAULT:
6578 case GDB_SIGNAL_UNKNOWN:
6579 /* Make sure that "all" doesn't print these. */
6580 break;
6581 default:
6582 sigs[signum] = 1;
6583 break;
6584 }
6585 }
6586
6587 argv++;
6588 }
6589
6590 for (signum = 0; signum < nsigs; signum++)
6591 if (sigs[signum])
6592 {
6593 signal_cache_update (-1);
6594 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6595 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
6596
6597 if (from_tty)
6598 {
6599 /* Show the results. */
6600 sig_print_header ();
6601 for (; signum < nsigs; signum++)
6602 if (sigs[signum])
6603 sig_print_info (signum);
6604 }
6605
6606 break;
6607 }
6608
6609 do_cleanups (old_chain);
6610 }
6611
6612 /* Complete the "handle" command. */
6613
6614 static VEC (char_ptr) *
6615 handle_completer (struct cmd_list_element *ignore,
6616 const char *text, const char *word)
6617 {
6618 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
6619 static const char * const keywords[] =
6620 {
6621 "all",
6622 "stop",
6623 "ignore",
6624 "print",
6625 "pass",
6626 "nostop",
6627 "noignore",
6628 "noprint",
6629 "nopass",
6630 NULL,
6631 };
6632
6633 vec_signals = signal_completer (ignore, text, word);
6634 vec_keywords = complete_on_enum (keywords, word, word);
6635
6636 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
6637 VEC_free (char_ptr, vec_signals);
6638 VEC_free (char_ptr, vec_keywords);
6639 return return_val;
6640 }
6641
6642 static void
6643 xdb_handle_command (char *args, int from_tty)
6644 {
6645 char **argv;
6646 struct cleanup *old_chain;
6647
6648 if (args == NULL)
6649 error_no_arg (_("xdb command"));
6650
6651 /* Break the command line up into args. */
6652
6653 argv = gdb_buildargv (args);
6654 old_chain = make_cleanup_freeargv (argv);
6655 if (argv[1] != (char *) NULL)
6656 {
6657 char *argBuf;
6658 int bufLen;
6659
6660 bufLen = strlen (argv[0]) + 20;
6661 argBuf = (char *) xmalloc (bufLen);
6662 if (argBuf)
6663 {
6664 int validFlag = 1;
6665 enum gdb_signal oursig;
6666
6667 oursig = gdb_signal_from_name (argv[0]);
6668 memset (argBuf, 0, bufLen);
6669 if (strcmp (argv[1], "Q") == 0)
6670 sprintf (argBuf, "%s %s", argv[0], "noprint");
6671 else
6672 {
6673 if (strcmp (argv[1], "s") == 0)
6674 {
6675 if (!signal_stop[oursig])
6676 sprintf (argBuf, "%s %s", argv[0], "stop");
6677 else
6678 sprintf (argBuf, "%s %s", argv[0], "nostop");
6679 }
6680 else if (strcmp (argv[1], "i") == 0)
6681 {
6682 if (!signal_program[oursig])
6683 sprintf (argBuf, "%s %s", argv[0], "pass");
6684 else
6685 sprintf (argBuf, "%s %s", argv[0], "nopass");
6686 }
6687 else if (strcmp (argv[1], "r") == 0)
6688 {
6689 if (!signal_print[oursig])
6690 sprintf (argBuf, "%s %s", argv[0], "print");
6691 else
6692 sprintf (argBuf, "%s %s", argv[0], "noprint");
6693 }
6694 else
6695 validFlag = 0;
6696 }
6697 if (validFlag)
6698 handle_command (argBuf, from_tty);
6699 else
6700 printf_filtered (_("Invalid signal handling flag.\n"));
6701 if (argBuf)
6702 xfree (argBuf);
6703 }
6704 }
6705 do_cleanups (old_chain);
6706 }
6707
6708 enum gdb_signal
6709 gdb_signal_from_command (int num)
6710 {
6711 if (num >= 1 && num <= 15)
6712 return (enum gdb_signal) num;
6713 error (_("Only signals 1-15 are valid as numeric signals.\n\
6714 Use \"info signals\" for a list of symbolic signals."));
6715 }
6716
6717 /* Print current contents of the tables set by the handle command.
6718 It is possible we should just be printing signals actually used
6719 by the current target (but for things to work right when switching
6720 targets, all signals should be in the signal tables). */
6721
6722 static void
6723 signals_info (char *signum_exp, int from_tty)
6724 {
6725 enum gdb_signal oursig;
6726
6727 sig_print_header ();
6728
6729 if (signum_exp)
6730 {
6731 /* First see if this is a symbol name. */
6732 oursig = gdb_signal_from_name (signum_exp);
6733 if (oursig == GDB_SIGNAL_UNKNOWN)
6734 {
6735 /* No, try numeric. */
6736 oursig =
6737 gdb_signal_from_command (parse_and_eval_long (signum_exp));
6738 }
6739 sig_print_info (oursig);
6740 return;
6741 }
6742
6743 printf_filtered ("\n");
6744 /* These ugly casts brought to you by the native VAX compiler. */
6745 for (oursig = GDB_SIGNAL_FIRST;
6746 (int) oursig < (int) GDB_SIGNAL_LAST;
6747 oursig = (enum gdb_signal) ((int) oursig + 1))
6748 {
6749 QUIT;
6750
6751 if (oursig != GDB_SIGNAL_UNKNOWN
6752 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
6753 sig_print_info (oursig);
6754 }
6755
6756 printf_filtered (_("\nUse the \"handle\" command "
6757 "to change these tables.\n"));
6758 }
6759
6760 /* Check if it makes sense to read $_siginfo from the current thread
6761 at this point. If not, throw an error. */
6762
6763 static void
6764 validate_siginfo_access (void)
6765 {
6766 /* No current inferior, no siginfo. */
6767 if (ptid_equal (inferior_ptid, null_ptid))
6768 error (_("No thread selected."));
6769
6770 /* Don't try to read from a dead thread. */
6771 if (is_exited (inferior_ptid))
6772 error (_("The current thread has terminated"));
6773
6774 /* ... or from a spinning thread. */
6775 if (is_running (inferior_ptid))
6776 error (_("Selected thread is running."));
6777 }
6778
6779 /* The $_siginfo convenience variable is a bit special. We don't know
6780 for sure the type of the value until we actually have a chance to
6781 fetch the data. The type can change depending on gdbarch, so it is
6782 also dependent on which thread you have selected.
6783
6784 1. making $_siginfo be an internalvar that creates a new value on
6785 access.
6786
6787 2. making the value of $_siginfo be an lval_computed value. */
6788
6789 /* This function implements the lval_computed support for reading a
6790 $_siginfo value. */
6791
6792 static void
6793 siginfo_value_read (struct value *v)
6794 {
6795 LONGEST transferred;
6796
6797 validate_siginfo_access ();
6798
6799 transferred =
6800 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6801 NULL,
6802 value_contents_all_raw (v),
6803 value_offset (v),
6804 TYPE_LENGTH (value_type (v)));
6805
6806 if (transferred != TYPE_LENGTH (value_type (v)))
6807 error (_("Unable to read siginfo"));
6808 }
6809
6810 /* This function implements the lval_computed support for writing a
6811 $_siginfo value. */
6812
6813 static void
6814 siginfo_value_write (struct value *v, struct value *fromval)
6815 {
6816 LONGEST transferred;
6817
6818 validate_siginfo_access ();
6819
6820 transferred = target_write (&current_target,
6821 TARGET_OBJECT_SIGNAL_INFO,
6822 NULL,
6823 value_contents_all_raw (fromval),
6824 value_offset (v),
6825 TYPE_LENGTH (value_type (fromval)));
6826
6827 if (transferred != TYPE_LENGTH (value_type (fromval)))
6828 error (_("Unable to write siginfo"));
6829 }
6830
6831 static const struct lval_funcs siginfo_value_funcs =
6832 {
6833 siginfo_value_read,
6834 siginfo_value_write
6835 };
6836
6837 /* Return a new value with the correct type for the siginfo object of
6838 the current thread using architecture GDBARCH. Return a void value
6839 if there's no object available. */
6840
6841 static struct value *
6842 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
6843 void *ignore)
6844 {
6845 if (target_has_stack
6846 && !ptid_equal (inferior_ptid, null_ptid)
6847 && gdbarch_get_siginfo_type_p (gdbarch))
6848 {
6849 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6850
6851 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6852 }
6853
6854 return allocate_value (builtin_type (gdbarch)->builtin_void);
6855 }
6856
6857 \f
6858 /* infcall_suspend_state contains state about the program itself like its
6859 registers and any signal it received when it last stopped.
6860 This state must be restored regardless of how the inferior function call
6861 ends (either successfully, or after it hits a breakpoint or signal)
6862 if the program is to properly continue where it left off. */
6863
6864 struct infcall_suspend_state
6865 {
6866 struct thread_suspend_state thread_suspend;
6867 #if 0 /* Currently unused and empty structures are not valid C. */
6868 struct inferior_suspend_state inferior_suspend;
6869 #endif
6870
6871 /* Other fields: */
6872 CORE_ADDR stop_pc;
6873 struct regcache *registers;
6874
6875 /* Format of SIGINFO_DATA or NULL if it is not present. */
6876 struct gdbarch *siginfo_gdbarch;
6877
6878 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6879 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6880 content would be invalid. */
6881 gdb_byte *siginfo_data;
6882 };
6883
6884 struct infcall_suspend_state *
6885 save_infcall_suspend_state (void)
6886 {
6887 struct infcall_suspend_state *inf_state;
6888 struct thread_info *tp = inferior_thread ();
6889 #if 0
6890 struct inferior *inf = current_inferior ();
6891 #endif
6892 struct regcache *regcache = get_current_regcache ();
6893 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6894 gdb_byte *siginfo_data = NULL;
6895
6896 if (gdbarch_get_siginfo_type_p (gdbarch))
6897 {
6898 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6899 size_t len = TYPE_LENGTH (type);
6900 struct cleanup *back_to;
6901
6902 siginfo_data = xmalloc (len);
6903 back_to = make_cleanup (xfree, siginfo_data);
6904
6905 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6906 siginfo_data, 0, len) == len)
6907 discard_cleanups (back_to);
6908 else
6909 {
6910 /* Errors ignored. */
6911 do_cleanups (back_to);
6912 siginfo_data = NULL;
6913 }
6914 }
6915
6916 inf_state = XCNEW (struct infcall_suspend_state);
6917
6918 if (siginfo_data)
6919 {
6920 inf_state->siginfo_gdbarch = gdbarch;
6921 inf_state->siginfo_data = siginfo_data;
6922 }
6923
6924 inf_state->thread_suspend = tp->suspend;
6925 #if 0 /* Currently unused and empty structures are not valid C. */
6926 inf_state->inferior_suspend = inf->suspend;
6927 #endif
6928
6929 /* run_inferior_call will not use the signal due to its `proceed' call with
6930 GDB_SIGNAL_0 anyway. */
6931 tp->suspend.stop_signal = GDB_SIGNAL_0;
6932
6933 inf_state->stop_pc = stop_pc;
6934
6935 inf_state->registers = regcache_dup (regcache);
6936
6937 return inf_state;
6938 }
6939
6940 /* Restore inferior session state to INF_STATE. */
6941
6942 void
6943 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6944 {
6945 struct thread_info *tp = inferior_thread ();
6946 #if 0
6947 struct inferior *inf = current_inferior ();
6948 #endif
6949 struct regcache *regcache = get_current_regcache ();
6950 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6951
6952 tp->suspend = inf_state->thread_suspend;
6953 #if 0 /* Currently unused and empty structures are not valid C. */
6954 inf->suspend = inf_state->inferior_suspend;
6955 #endif
6956
6957 stop_pc = inf_state->stop_pc;
6958
6959 if (inf_state->siginfo_gdbarch == gdbarch)
6960 {
6961 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6962
6963 /* Errors ignored. */
6964 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6965 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
6966 }
6967
6968 /* The inferior can be gone if the user types "print exit(0)"
6969 (and perhaps other times). */
6970 if (target_has_execution)
6971 /* NB: The register write goes through to the target. */
6972 regcache_cpy (regcache, inf_state->registers);
6973
6974 discard_infcall_suspend_state (inf_state);
6975 }
6976
6977 static void
6978 do_restore_infcall_suspend_state_cleanup (void *state)
6979 {
6980 restore_infcall_suspend_state (state);
6981 }
6982
6983 struct cleanup *
6984 make_cleanup_restore_infcall_suspend_state
6985 (struct infcall_suspend_state *inf_state)
6986 {
6987 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6988 }
6989
6990 void
6991 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6992 {
6993 regcache_xfree (inf_state->registers);
6994 xfree (inf_state->siginfo_data);
6995 xfree (inf_state);
6996 }
6997
6998 struct regcache *
6999 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
7000 {
7001 return inf_state->registers;
7002 }
7003
7004 /* infcall_control_state contains state regarding gdb's control of the
7005 inferior itself like stepping control. It also contains session state like
7006 the user's currently selected frame. */
7007
7008 struct infcall_control_state
7009 {
7010 struct thread_control_state thread_control;
7011 struct inferior_control_state inferior_control;
7012
7013 /* Other fields: */
7014 enum stop_stack_kind stop_stack_dummy;
7015 int stopped_by_random_signal;
7016 int stop_after_trap;
7017
7018 /* ID if the selected frame when the inferior function call was made. */
7019 struct frame_id selected_frame_id;
7020 };
7021
7022 /* Save all of the information associated with the inferior<==>gdb
7023 connection. */
7024
7025 struct infcall_control_state *
7026 save_infcall_control_state (void)
7027 {
7028 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
7029 struct thread_info *tp = inferior_thread ();
7030 struct inferior *inf = current_inferior ();
7031
7032 inf_status->thread_control = tp->control;
7033 inf_status->inferior_control = inf->control;
7034
7035 tp->control.step_resume_breakpoint = NULL;
7036 tp->control.exception_resume_breakpoint = NULL;
7037
7038 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
7039 chain. If caller's caller is walking the chain, they'll be happier if we
7040 hand them back the original chain when restore_infcall_control_state is
7041 called. */
7042 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
7043
7044 /* Other fields: */
7045 inf_status->stop_stack_dummy = stop_stack_dummy;
7046 inf_status->stopped_by_random_signal = stopped_by_random_signal;
7047 inf_status->stop_after_trap = stop_after_trap;
7048
7049 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
7050
7051 return inf_status;
7052 }
7053
7054 static int
7055 restore_selected_frame (void *args)
7056 {
7057 struct frame_id *fid = (struct frame_id *) args;
7058 struct frame_info *frame;
7059
7060 frame = frame_find_by_id (*fid);
7061
7062 /* If inf_status->selected_frame_id is NULL, there was no previously
7063 selected frame. */
7064 if (frame == NULL)
7065 {
7066 warning (_("Unable to restore previously selected frame."));
7067 return 0;
7068 }
7069
7070 select_frame (frame);
7071
7072 return (1);
7073 }
7074
7075 /* Restore inferior session state to INF_STATUS. */
7076
7077 void
7078 restore_infcall_control_state (struct infcall_control_state *inf_status)
7079 {
7080 struct thread_info *tp = inferior_thread ();
7081 struct inferior *inf = current_inferior ();
7082
7083 if (tp->control.step_resume_breakpoint)
7084 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7085
7086 if (tp->control.exception_resume_breakpoint)
7087 tp->control.exception_resume_breakpoint->disposition
7088 = disp_del_at_next_stop;
7089
7090 /* Handle the bpstat_copy of the chain. */
7091 bpstat_clear (&tp->control.stop_bpstat);
7092
7093 tp->control = inf_status->thread_control;
7094 inf->control = inf_status->inferior_control;
7095
7096 /* Other fields: */
7097 stop_stack_dummy = inf_status->stop_stack_dummy;
7098 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7099 stop_after_trap = inf_status->stop_after_trap;
7100
7101 if (target_has_stack)
7102 {
7103 /* The point of catch_errors is that if the stack is clobbered,
7104 walking the stack might encounter a garbage pointer and
7105 error() trying to dereference it. */
7106 if (catch_errors
7107 (restore_selected_frame, &inf_status->selected_frame_id,
7108 "Unable to restore previously selected frame:\n",
7109 RETURN_MASK_ERROR) == 0)
7110 /* Error in restoring the selected frame. Select the innermost
7111 frame. */
7112 select_frame (get_current_frame ());
7113 }
7114
7115 xfree (inf_status);
7116 }
7117
7118 static void
7119 do_restore_infcall_control_state_cleanup (void *sts)
7120 {
7121 restore_infcall_control_state (sts);
7122 }
7123
7124 struct cleanup *
7125 make_cleanup_restore_infcall_control_state
7126 (struct infcall_control_state *inf_status)
7127 {
7128 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7129 }
7130
7131 void
7132 discard_infcall_control_state (struct infcall_control_state *inf_status)
7133 {
7134 if (inf_status->thread_control.step_resume_breakpoint)
7135 inf_status->thread_control.step_resume_breakpoint->disposition
7136 = disp_del_at_next_stop;
7137
7138 if (inf_status->thread_control.exception_resume_breakpoint)
7139 inf_status->thread_control.exception_resume_breakpoint->disposition
7140 = disp_del_at_next_stop;
7141
7142 /* See save_infcall_control_state for info on stop_bpstat. */
7143 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7144
7145 xfree (inf_status);
7146 }
7147 \f
7148 /* restore_inferior_ptid() will be used by the cleanup machinery
7149 to restore the inferior_ptid value saved in a call to
7150 save_inferior_ptid(). */
7151
7152 static void
7153 restore_inferior_ptid (void *arg)
7154 {
7155 ptid_t *saved_ptid_ptr = arg;
7156
7157 inferior_ptid = *saved_ptid_ptr;
7158 xfree (arg);
7159 }
7160
7161 /* Save the value of inferior_ptid so that it may be restored by a
7162 later call to do_cleanups(). Returns the struct cleanup pointer
7163 needed for later doing the cleanup. */
7164
7165 struct cleanup *
7166 save_inferior_ptid (void)
7167 {
7168 ptid_t *saved_ptid_ptr;
7169
7170 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7171 *saved_ptid_ptr = inferior_ptid;
7172 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7173 }
7174
7175 /* See inferior.h. */
7176
7177 void
7178 clear_exit_convenience_vars (void)
7179 {
7180 clear_internalvar (lookup_internalvar ("_exitsignal"));
7181 clear_internalvar (lookup_internalvar ("_exitcode"));
7182 }
7183 \f
7184
7185 /* User interface for reverse debugging:
7186 Set exec-direction / show exec-direction commands
7187 (returns error unless target implements to_set_exec_direction method). */
7188
7189 int execution_direction = EXEC_FORWARD;
7190 static const char exec_forward[] = "forward";
7191 static const char exec_reverse[] = "reverse";
7192 static const char *exec_direction = exec_forward;
7193 static const char *const exec_direction_names[] = {
7194 exec_forward,
7195 exec_reverse,
7196 NULL
7197 };
7198
7199 static void
7200 set_exec_direction_func (char *args, int from_tty,
7201 struct cmd_list_element *cmd)
7202 {
7203 if (target_can_execute_reverse)
7204 {
7205 if (!strcmp (exec_direction, exec_forward))
7206 execution_direction = EXEC_FORWARD;
7207 else if (!strcmp (exec_direction, exec_reverse))
7208 execution_direction = EXEC_REVERSE;
7209 }
7210 else
7211 {
7212 exec_direction = exec_forward;
7213 error (_("Target does not support this operation."));
7214 }
7215 }
7216
7217 static void
7218 show_exec_direction_func (struct ui_file *out, int from_tty,
7219 struct cmd_list_element *cmd, const char *value)
7220 {
7221 switch (execution_direction) {
7222 case EXEC_FORWARD:
7223 fprintf_filtered (out, _("Forward.\n"));
7224 break;
7225 case EXEC_REVERSE:
7226 fprintf_filtered (out, _("Reverse.\n"));
7227 break;
7228 default:
7229 internal_error (__FILE__, __LINE__,
7230 _("bogus execution_direction value: %d"),
7231 (int) execution_direction);
7232 }
7233 }
7234
7235 static void
7236 show_schedule_multiple (struct ui_file *file, int from_tty,
7237 struct cmd_list_element *c, const char *value)
7238 {
7239 fprintf_filtered (file, _("Resuming the execution of threads "
7240 "of all processes is %s.\n"), value);
7241 }
7242
7243 /* Implementation of `siginfo' variable. */
7244
7245 static const struct internalvar_funcs siginfo_funcs =
7246 {
7247 siginfo_make_value,
7248 NULL,
7249 NULL
7250 };
7251
7252 void
7253 _initialize_infrun (void)
7254 {
7255 int i;
7256 int numsigs;
7257 struct cmd_list_element *c;
7258
7259 add_info ("signals", signals_info, _("\
7260 What debugger does when program gets various signals.\n\
7261 Specify a signal as argument to print info on that signal only."));
7262 add_info_alias ("handle", "signals", 0);
7263
7264 c = add_com ("handle", class_run, handle_command, _("\
7265 Specify how to handle signals.\n\
7266 Usage: handle SIGNAL [ACTIONS]\n\
7267 Args are signals and actions to apply to those signals.\n\
7268 If no actions are specified, the current settings for the specified signals\n\
7269 will be displayed instead.\n\
7270 \n\
7271 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7272 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7273 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7274 The special arg \"all\" is recognized to mean all signals except those\n\
7275 used by the debugger, typically SIGTRAP and SIGINT.\n\
7276 \n\
7277 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7278 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7279 Stop means reenter debugger if this signal happens (implies print).\n\
7280 Print means print a message if this signal happens.\n\
7281 Pass means let program see this signal; otherwise program doesn't know.\n\
7282 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7283 Pass and Stop may be combined.\n\
7284 \n\
7285 Multiple signals may be specified. Signal numbers and signal names\n\
7286 may be interspersed with actions, with the actions being performed for\n\
7287 all signals cumulatively specified."));
7288 set_cmd_completer (c, handle_completer);
7289
7290 if (xdb_commands)
7291 {
7292 add_com ("lz", class_info, signals_info, _("\
7293 What debugger does when program gets various signals.\n\
7294 Specify a signal as argument to print info on that signal only."));
7295 add_com ("z", class_run, xdb_handle_command, _("\
7296 Specify how to handle a signal.\n\
7297 Args are signals and actions to apply to those signals.\n\
7298 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7299 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7300 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7301 The special arg \"all\" is recognized to mean all signals except those\n\
7302 used by the debugger, typically SIGTRAP and SIGINT.\n\
7303 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7304 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7305 nopass), \"Q\" (noprint)\n\
7306 Stop means reenter debugger if this signal happens (implies print).\n\
7307 Print means print a message if this signal happens.\n\
7308 Pass means let program see this signal; otherwise program doesn't know.\n\
7309 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7310 Pass and Stop may be combined."));
7311 }
7312
7313 if (!dbx_commands)
7314 stop_command = add_cmd ("stop", class_obscure,
7315 not_just_help_class_command, _("\
7316 There is no `stop' command, but you can set a hook on `stop'.\n\
7317 This allows you to set a list of commands to be run each time execution\n\
7318 of the program stops."), &cmdlist);
7319
7320 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7321 Set inferior debugging."), _("\
7322 Show inferior debugging."), _("\
7323 When non-zero, inferior specific debugging is enabled."),
7324 NULL,
7325 show_debug_infrun,
7326 &setdebuglist, &showdebuglist);
7327
7328 add_setshow_boolean_cmd ("displaced", class_maintenance,
7329 &debug_displaced, _("\
7330 Set displaced stepping debugging."), _("\
7331 Show displaced stepping debugging."), _("\
7332 When non-zero, displaced stepping specific debugging is enabled."),
7333 NULL,
7334 show_debug_displaced,
7335 &setdebuglist, &showdebuglist);
7336
7337 add_setshow_boolean_cmd ("non-stop", no_class,
7338 &non_stop_1, _("\
7339 Set whether gdb controls the inferior in non-stop mode."), _("\
7340 Show whether gdb controls the inferior in non-stop mode."), _("\
7341 When debugging a multi-threaded program and this setting is\n\
7342 off (the default, also called all-stop mode), when one thread stops\n\
7343 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7344 all other threads in the program while you interact with the thread of\n\
7345 interest. When you continue or step a thread, you can allow the other\n\
7346 threads to run, or have them remain stopped, but while you inspect any\n\
7347 thread's state, all threads stop.\n\
7348 \n\
7349 In non-stop mode, when one thread stops, other threads can continue\n\
7350 to run freely. You'll be able to step each thread independently,\n\
7351 leave it stopped or free to run as needed."),
7352 set_non_stop,
7353 show_non_stop,
7354 &setlist,
7355 &showlist);
7356
7357 numsigs = (int) GDB_SIGNAL_LAST;
7358 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7359 signal_print = (unsigned char *)
7360 xmalloc (sizeof (signal_print[0]) * numsigs);
7361 signal_program = (unsigned char *)
7362 xmalloc (sizeof (signal_program[0]) * numsigs);
7363 signal_catch = (unsigned char *)
7364 xmalloc (sizeof (signal_catch[0]) * numsigs);
7365 signal_pass = (unsigned char *)
7366 xmalloc (sizeof (signal_program[0]) * numsigs);
7367 for (i = 0; i < numsigs; i++)
7368 {
7369 signal_stop[i] = 1;
7370 signal_print[i] = 1;
7371 signal_program[i] = 1;
7372 signal_catch[i] = 0;
7373 }
7374
7375 /* Signals caused by debugger's own actions
7376 should not be given to the program afterwards. */
7377 signal_program[GDB_SIGNAL_TRAP] = 0;
7378 signal_program[GDB_SIGNAL_INT] = 0;
7379
7380 /* Signals that are not errors should not normally enter the debugger. */
7381 signal_stop[GDB_SIGNAL_ALRM] = 0;
7382 signal_print[GDB_SIGNAL_ALRM] = 0;
7383 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7384 signal_print[GDB_SIGNAL_VTALRM] = 0;
7385 signal_stop[GDB_SIGNAL_PROF] = 0;
7386 signal_print[GDB_SIGNAL_PROF] = 0;
7387 signal_stop[GDB_SIGNAL_CHLD] = 0;
7388 signal_print[GDB_SIGNAL_CHLD] = 0;
7389 signal_stop[GDB_SIGNAL_IO] = 0;
7390 signal_print[GDB_SIGNAL_IO] = 0;
7391 signal_stop[GDB_SIGNAL_POLL] = 0;
7392 signal_print[GDB_SIGNAL_POLL] = 0;
7393 signal_stop[GDB_SIGNAL_URG] = 0;
7394 signal_print[GDB_SIGNAL_URG] = 0;
7395 signal_stop[GDB_SIGNAL_WINCH] = 0;
7396 signal_print[GDB_SIGNAL_WINCH] = 0;
7397 signal_stop[GDB_SIGNAL_PRIO] = 0;
7398 signal_print[GDB_SIGNAL_PRIO] = 0;
7399
7400 /* These signals are used internally by user-level thread
7401 implementations. (See signal(5) on Solaris.) Like the above
7402 signals, a healthy program receives and handles them as part of
7403 its normal operation. */
7404 signal_stop[GDB_SIGNAL_LWP] = 0;
7405 signal_print[GDB_SIGNAL_LWP] = 0;
7406 signal_stop[GDB_SIGNAL_WAITING] = 0;
7407 signal_print[GDB_SIGNAL_WAITING] = 0;
7408 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7409 signal_print[GDB_SIGNAL_CANCEL] = 0;
7410
7411 /* Update cached state. */
7412 signal_cache_update (-1);
7413
7414 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7415 &stop_on_solib_events, _("\
7416 Set stopping for shared library events."), _("\
7417 Show stopping for shared library events."), _("\
7418 If nonzero, gdb will give control to the user when the dynamic linker\n\
7419 notifies gdb of shared library events. The most common event of interest\n\
7420 to the user would be loading/unloading of a new library."),
7421 set_stop_on_solib_events,
7422 show_stop_on_solib_events,
7423 &setlist, &showlist);
7424
7425 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7426 follow_fork_mode_kind_names,
7427 &follow_fork_mode_string, _("\
7428 Set debugger response to a program call of fork or vfork."), _("\
7429 Show debugger response to a program call of fork or vfork."), _("\
7430 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7431 parent - the original process is debugged after a fork\n\
7432 child - the new process is debugged after a fork\n\
7433 The unfollowed process will continue to run.\n\
7434 By default, the debugger will follow the parent process."),
7435 NULL,
7436 show_follow_fork_mode_string,
7437 &setlist, &showlist);
7438
7439 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7440 follow_exec_mode_names,
7441 &follow_exec_mode_string, _("\
7442 Set debugger response to a program call of exec."), _("\
7443 Show debugger response to a program call of exec."), _("\
7444 An exec call replaces the program image of a process.\n\
7445 \n\
7446 follow-exec-mode can be:\n\
7447 \n\
7448 new - the debugger creates a new inferior and rebinds the process\n\
7449 to this new inferior. The program the process was running before\n\
7450 the exec call can be restarted afterwards by restarting the original\n\
7451 inferior.\n\
7452 \n\
7453 same - the debugger keeps the process bound to the same inferior.\n\
7454 The new executable image replaces the previous executable loaded in\n\
7455 the inferior. Restarting the inferior after the exec call restarts\n\
7456 the executable the process was running after the exec call.\n\
7457 \n\
7458 By default, the debugger will use the same inferior."),
7459 NULL,
7460 show_follow_exec_mode_string,
7461 &setlist, &showlist);
7462
7463 add_setshow_enum_cmd ("scheduler-locking", class_run,
7464 scheduler_enums, &scheduler_mode, _("\
7465 Set mode for locking scheduler during execution."), _("\
7466 Show mode for locking scheduler during execution."), _("\
7467 off == no locking (threads may preempt at any time)\n\
7468 on == full locking (no thread except the current thread may run)\n\
7469 step == scheduler locked during every single-step operation.\n\
7470 In this mode, no other thread may run during a step command.\n\
7471 Other threads may run while stepping over a function call ('next')."),
7472 set_schedlock_func, /* traps on target vector */
7473 show_scheduler_mode,
7474 &setlist, &showlist);
7475
7476 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7477 Set mode for resuming threads of all processes."), _("\
7478 Show mode for resuming threads of all processes."), _("\
7479 When on, execution commands (such as 'continue' or 'next') resume all\n\
7480 threads of all processes. When off (which is the default), execution\n\
7481 commands only resume the threads of the current process. The set of\n\
7482 threads that are resumed is further refined by the scheduler-locking\n\
7483 mode (see help set scheduler-locking)."),
7484 NULL,
7485 show_schedule_multiple,
7486 &setlist, &showlist);
7487
7488 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7489 Set mode of the step operation."), _("\
7490 Show mode of the step operation."), _("\
7491 When set, doing a step over a function without debug line information\n\
7492 will stop at the first instruction of that function. Otherwise, the\n\
7493 function is skipped and the step command stops at a different source line."),
7494 NULL,
7495 show_step_stop_if_no_debug,
7496 &setlist, &showlist);
7497
7498 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7499 &can_use_displaced_stepping, _("\
7500 Set debugger's willingness to use displaced stepping."), _("\
7501 Show debugger's willingness to use displaced stepping."), _("\
7502 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7503 supported by the target architecture. If off, gdb will not use displaced\n\
7504 stepping to step over breakpoints, even if such is supported by the target\n\
7505 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7506 if the target architecture supports it and non-stop mode is active, but will not\n\
7507 use it in all-stop mode (see help set non-stop)."),
7508 NULL,
7509 show_can_use_displaced_stepping,
7510 &setlist, &showlist);
7511
7512 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7513 &exec_direction, _("Set direction of execution.\n\
7514 Options are 'forward' or 'reverse'."),
7515 _("Show direction of execution (forward/reverse)."),
7516 _("Tells gdb whether to execute forward or backward."),
7517 set_exec_direction_func, show_exec_direction_func,
7518 &setlist, &showlist);
7519
7520 /* Set/show detach-on-fork: user-settable mode. */
7521
7522 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7523 Set whether gdb will detach the child of a fork."), _("\
7524 Show whether gdb will detach the child of a fork."), _("\
7525 Tells gdb whether to detach the child of a fork."),
7526 NULL, NULL, &setlist, &showlist);
7527
7528 /* Set/show disable address space randomization mode. */
7529
7530 add_setshow_boolean_cmd ("disable-randomization", class_support,
7531 &disable_randomization, _("\
7532 Set disabling of debuggee's virtual address space randomization."), _("\
7533 Show disabling of debuggee's virtual address space randomization."), _("\
7534 When this mode is on (which is the default), randomization of the virtual\n\
7535 address space is disabled. Standalone programs run with the randomization\n\
7536 enabled by default on some platforms."),
7537 &set_disable_randomization,
7538 &show_disable_randomization,
7539 &setlist, &showlist);
7540
7541 /* ptid initializations */
7542 inferior_ptid = null_ptid;
7543 target_last_wait_ptid = minus_one_ptid;
7544
7545 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7546 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7547 observer_attach_thread_exit (infrun_thread_thread_exit);
7548 observer_attach_inferior_exit (infrun_inferior_exit);
7549
7550 /* Explicitly create without lookup, since that tries to create a
7551 value with a void typed value, and when we get here, gdbarch
7552 isn't initialized yet. At this point, we're quite sure there
7553 isn't another convenience variable of the same name. */
7554 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7555
7556 add_setshow_boolean_cmd ("observer", no_class,
7557 &observer_mode_1, _("\
7558 Set whether gdb controls the inferior in observer mode."), _("\
7559 Show whether gdb controls the inferior in observer mode."), _("\
7560 In observer mode, GDB can get data from the inferior, but not\n\
7561 affect its execution. Registers and memory may not be changed,\n\
7562 breakpoints may not be set, and the program cannot be interrupted\n\
7563 or signalled."),
7564 set_observer_mode,
7565 show_observer_mode,
7566 &setlist,
7567 &showlist);
7568 }